package com.kylin

import java.util.Properties
import org.apache.spark.sql._
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

/**
  * @author tianfusheng
  * @e-mail linuxmorebetter@gmail.com
  * @date 2020/2/5
  */
object Core {

  val mysql_user="root"
  val mysql_pwd ="root"
  val mysql_driver="com.mysql.jdbc.Driver"
  val mysql_url = "jdbc:mysql://127.0.0.1:3306/test?useUnicode=true&characterEncoding=UTF-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC"
  //（1）功能一：基于电影记录数据采集，应分析整理数据得到电影片库，并在前台进行列表展示。
  //（2）功能二：针对每一部电影，通过Hadoop搭建的环境，用Spark工具处理数据，得到每部电影对应的每天票房变化趋势。
  def step1(spark: SparkSession): Unit ={
    //jdbc:mysql://127.0.0.1:3306/febs_shiro_jwt?useUnicode=true&characterEncoding=UTF-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC
    val df =spark.read.format("csv").option("header","true").csv("/Users/kylin/Documents/workspace/FEBS-Vue/engine/src/main/resources/data.csv").cache()
    df.show(10)
    df.printSchema()
    //val df2 =df.where($"box_office".equalTo(" "))
    //df2.show(10)
    val connectionProperties = new Properties
    //connectionProperties.setProperty("dbtable", "data")
    connectionProperties.setProperty("user", mysql_user)
    connectionProperties.setProperty("password", mysql_pwd)
    connectionProperties.setProperty("driver", mysql_driver)
    connectionProperties.setProperty("truncate","true")
    df.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "data_all", connectionProperties);
    //（3）功能三：针对所有票房记录，基于Spark工具对每条记录综合处理得到上映情况差的电影，在前台显示出电影片名和统计参考数据。
    // 与之对应的是，推选一些票房高、上座率高的好电影。top10
    df.createOrReplaceTempView("data")
    val df3 = spark.sql("select movie_title,CAST(box_office AS DOUBLE) from data")
    df3.   show(10)
    val box_office_sum = df3.groupBy("movie_title").sum()


    val df5=box_office_sum.select(box_office_sum.col("sum(box_office)").alias("sum_box_office"),box_office_sum.col("movie_title"))

    println("--------------top10 desc-------------")
    df5.orderBy(desc("sum_box_office")).show(10)

    println("--------------top10------------------")
    df5.orderBy("sum_box_office").show(10)
    //df5.write.mode(SaveMode.Overwrite).jdbc(mysql_url,"data_top",connectionProperties)

    //（4）功能四：基于Spark工具，找到每个电影上映以来票房最高的一天并由高到底排序。
    val df4=spark.sql("select data_date,movie_title,CAST(box_office AS DOUBLE) from data")
    df4.show(10)
    //df4.groupBy("movie_title").agg(("box_office","max")).agg(collect_list(col("date1"))).show(10)
    df4.createOrReplaceTempView("data2")
    val df6 = spark.sql("select movie_title,max(box_office) as max_box_office from data2 group by movie_title")
    df6.createOrReplaceTempView("data3")

    //单日最高票房记录
    val max_df=spark.sql("select d2.* from data2 d2 join data3 d3 on d2.movie_title=d3.movie_title and d2.box_office=d3.max_box_office")
    //max_df.write.mode(SaveMode.Overwrite).jdbc(mysql_url,"max_data_one",connectionProperties)



    //（5）功能五：寻找上映时间最长的电影top10排行（排除点映和展映，数据清洗）
    import spark.implicits._
    //数据清洗
    val df7= df.where($"release_days".notEqual("点映")).where($"release_days".notEqual("展映")).where($"attendances".notEqual("--"))
    df7.createOrReplaceTempView("data4")
    val df_total_day =spark.sql("select movie_title,count(movie_title) as total_day from data4 group by movie_title order by total_day desc")
    //df_total_day.write.mode(SaveMode.Overwrite).jdbc(mysql_url,"data_total_day",connectionProperties)

  }



}
