package com.darrenchan.spark.sql

import org.apache.spark.sql.SparkSession

/**
  * DataFrame和RDD实战
  * 文件内容：
  * 1|chenchi|13278895582|darrenchan1992@163.com
  * 2|chenchi2|13278895582|darrenchan1992@164.com
  */
object DataFrameCaseShiZhan {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().master("local[2]").appName("DataFrameCaseShiZhan").getOrCreate()

    //rdd
    val rdd = spark.sparkContext.textFile("student.data")

    import spark.implicits._
    val studentDF = rdd.map(_.split("\\|")).map(line => Student(line(0).toInt, line(1), line(2), line(3))).toDF()

    //show默认只显示20条，并且太长的字段显示darrenchan1992@16...
    //可以传入显示的条数，传入false则不会截取...，会全部显示
    studentDF.show(30, false)
    //前三个元素
    studentDF.take(3).foreach(println)
    //前两个元素
    studentDF.head(2).foreach(println)
    //第一个元素，等价于head()
    println(studentDF.first())

    //取某一列
    studentDF.select("email").show(10, false)

    //过滤
    studentDF.filter("name='' OR name='NULL'").show()

    //name以m开头（区分大小写）
    studentDF.filter("SUBSTR(name,0,1)='m'").show()

    //排序
    studentDF.sort(studentDF.col("name")).show()//升序
    studentDF.sort(studentDF("name").desc).show()//降序

    studentDF.sort($"name".asc, studentDF("id").desc).show()

    //重命名列名
    //df.withColumnRenamed("count", "total")
    studentDF.select(studentDF.col("name").as("stu_name")).show()


    //关联查询
    val studentDF2 = rdd.map(_.split("\\|")).map(line => Student(line(0).toInt, line(1), line(2), line(3))).toDF()
    studentDF.join(studentDF2, studentDF.col("id") === studentDF2.col("id"), "right_outer").show()

    spark.stop()

    spark.sql("""use udw_ns.default""")
    spark.sql(
      s"""
         |select T3.rid `文章ID`, case T4.action_id
         |when '2001' then '评论'
         |when '2002' then '点赞'
         |when '2004' then '分享'
         |when '2005' then '收藏'
         |when '2007' then '关注'
         |end `互动行为`,
         |count(1) `总量` from
         |(
         |	select T2.app_id, T1.rid from
         |	(
         |		select b_appid, rid from bjh_dim_essay_df
         |		where event_day='20190722'
         |	)T1
         |	join
         |	(
         |		select distinct app_id from bjh_cms_server
         |		where event_day='20190722'
         |		and (urlkey='Q88GNO2I8C' or urlkey='BH6SK7P7E6')
         |	)T2
         |	on
         |	T1.b_appid=T2.app_id
         |)T3
         |join
         |(
         |	select rid, action_id from baiduapp_dwd_log_interact_di
         |	where event_day='20190722'
         |	and action_id in ('2001', '2002', '2004', '2005', '2007')
         |	and appid=1
         |)T4
         |on T3.rid=split(T4.rid, '_')[1]
         |group by T3.rid, T4.action_id
      """.stripMargin).repartition(1).write.mode("overwrite").option("delimiter", "\t").format("com.databricks.spark.csv").
      option("header", "true").save(s"afs://yinglong.afs.baidu.com:9902/user/feed-bjh/chenchi/appdata/result_group/20190722/groupsum")


    /**
      * 最终代码
      */
    var event_day = "20190722"
    spark.sql(
      s"""
         |select '7月22日' `日期`, T5.name `作者名称`, T5.app_id `百家号作者ID`, T5.rid `内容NID`, case T6.action_id
         |when '2001' then '评论'
         |when '2002' then '点赞'
         |when '2004' then '分享'
         |when '2005' then '收藏'
         |when '2007' then '关注'
         |end `互动行为`,
         |if(length(T6.opts)=10, from_unixtime(T6.opts), from_unixtime(cast(T6.opts/1000 as int))) `互动时间` from
         |(
         |	select T4.name, T3.app_id, T3.rid from
         |	(
         |		select T2.app_id, T1.rid from
         |		(
         |			select b_appid, rid from bjh_dim_essay_df
         |			where event_day='20190722'
         |		)T1
         |		join
         |		(
         |			select distinct app_id from bjh_cms_server
         |			where event_day='20190722'
         |			and (urlkey='Q88GNO2I8C' or urlkey='BH6SK7P7E6')
         |		)T2
         |		on
         |		T1.b_appid=T2.app_id
         |	)T3
         |	join
         |	(
         |		select id, name from bjh_dim_author_df
         |		where event_day='20190722'
         |	)T4
         |	on T3.app_id=T4.id
         |)T5
         |join
         |(
         |	select rid, action_id, opts from baiduapp_dwd_log_interact_di
         |	where event_day='20190722'
         |	and action_id in ('2001', '2002', '2004', '2005', '2007')
         |  and appid=1
         |)T6
         |on T5.rid=split(T6.rid, '_')[1]
         |group by T5.name, T5.app_id, T5.rid, T6.action_id, T6.opts
         |order by T5.name, T5.app_id, T5.rid, T6.action_id, if(length(T6.opts)=10, T6.opts * 1000, T6.opts) asc
      """.stripMargin).repartition(1).write.mode("overwrite").option("delimiter", "\t").format("com.databricks.spark.csv").
      option("header", "true").save(s"afs://yinglong.afs.baidu.com:9902/user/feed-bjh/chenchi/appdata/result_group/20190722_2")


    spark.sql(
      """
        |select row_number() over(order by T3.c desc), T3.id, T3.c from
        |(
        |	select T2.id, count(distinct T1.rid) c
        |	from
        |	(
        |		select distinct b_appid, rid
        |		from bjh_dim_essay_df
        |		where event_day='20190812'
        |		and status='publish'
        |		and unix_timestamp('20190812', 'yyyyMMdd') - unix_timestamp(publish_at, 'yyyy-MM-dd HH:mm:ss') >=0
        |		and unix_timestamp('20190812', 'yyyyMMdd') - unix_timestamp(publish_at, 'yyyy-MM-dd HH:mm:ss') <= 30 * 24 * 60 * 60
        |	) T1
        |	join
        |	(
        |		select distinct id
        |		from bjh_dim_author_df
        |		where event_day='20190812'
        |		and status='pass'
        |		and level=0
        |		and is_fake=0
        |	) T2
        |	on T1.b_appid=T2.id
        |	group by T2.id
        |	having count(distinct T1.rid) >= 1
        |	DISTRIBUTE BY RAND() SORT BY RAND() LIMIT 4000
        |) T3
      """.stripMargin).show(500)

    spark.sql(
      """
        |select T3.id, T3.c from
        |(
        |	select T2.id, count(distinct T1.rid) c
        |	from
        |	(
        |		select distinct b_appid, rid
        |		from bjh_dim_essay_df
        |		where event_day='20190812'
        |		and status='publish'
        |		and unix_timestamp('20190812', 'yyyyMMdd') - unix_timestamp(publish_at, 'yyyy-MM-dd HH:mm:ss') >=0
        |		and unix_timestamp('20190812', 'yyyyMMdd') - unix_timestamp(publish_at, 'yyyy-MM-dd HH:mm:ss') <= 30 * 24 * 60 * 60
        |	) T1
        |	join
        |	(
        |		select distinct id
        |		from bjh_dim_author_df
        |		where event_day='20190812'
        |		and status='pass'
        |		and level=0
        |		and is_fake=0
        |	) T2
        |	on T1.b_appid=T2.id
        |	group by T2.id
        |	having count(distinct T1.rid) = 22
        |) T3
        |order by T3.c
      """.stripMargin).show(500)


    spark.sql(
      s"""
         |select article_meta_info from hpb_ods_bjh_article_meta_mola_df
         |where event_day = '20190908'
         |and type='news_special_column'
       """.stripMargin).show(5, false)

  }
}

case class Student(id: Int, name: String, phone: String, email: String)
