标签:每种 自定义 val goodsId session behavior sql id row
package SparkSQL.fun.project
import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DataTypes, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
/**
* 统计每种行为的触发次数排名前三的商品id
*/
object BehaviorCode2 {
def main(args: Array[String]): Unit = {
val sparkConf = new SparkConf().setAppName("project01").setMaster("local[*]")
val session = SparkSession.builder().config(sparkConf).getOrCreate()
val map = Map("mode"->"dropMalformed","inferSchema"->"true")
val frame = session.read.options(map).csv("G:\\shixunworkspace\\sparkcode\\src\\main\\java\\SparkSQL\\fun\\project\\b.csv")
// "userId", "goodsId", "categoryId", "behavior", "time"
import session.implicits._
val frame1: Dataset[UserBehaviorBean] = frame.map(row => {
UserBehaviorBean(row.getInt(0), row.getInt(1),
row.getInt(2), row.getString(3), row.getInt(4))
})
val frame3 = frame1.toDF("userId", "goodsId", "categoryId", "behavior", "time")
frame3.createTempView("tmp")
val frame2 = session.sql("select behavior, goodsId, count(*) count from tmp group by behavior, goodsId")
frame2.show()
frame2.createTempView("tmp1")
val frame4 = session.sql("select behavior, goodsId, count, row_number() over(partition by behavior, goodsId order by count) rn from tmp1")
frame4.show()
frame4.createTempView("temp2")
val frame5 = session.sql("select behavior, goodsId, count, rn from temp2 where rn <= 3")
frame5.show()
session.stop()
}
}
标签:每种,自定义,val,goodsId,session,behavior,sql,id,row 来源: https://www.cnblogs.com/jsqup/p/16659672.html
本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享; 2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关; 3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关; 4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除; 5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。