package com.kylin

import java.util.Properties
import org.apache.spark.sql._
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

/**
  * @author tianfusheng
  * @e-mail linuxmorebetter@gmail.com
  * @date 2020/2/5
  */
object Core {

  val mysql_user="root"
  val mysql_pwd ="root"
  val mysql_driver="com.mysql.jdbc.Driver"
  val mysql_url = "jdbc:mysql://106.13.32.254:3306/febs_spark_map?useUnicode=true&characterEncoding=UTF-8&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC"
  //（1）功能一：基于电影记录数据采集，应分析整理数据得到电影片库，并在前台进行列表展示。
  //（2）功能二：针对每一部电影，通过Hadoop搭建的环境，用Spark工具处理数据，得到每部电影对应的每天票房变化趋势。
  def step1(spark: SparkSession): Unit ={

    val connectionProperties = new Properties
    //connectionProperties.setProperty("dbtable", "data")
    connectionProperties.setProperty("user", mysql_user)
    connectionProperties.setProperty("password", mysql_pwd)
    connectionProperties.setProperty("driver", mysql_driver)
    connectionProperties.setProperty("truncate","true")

    val df =spark.read.format("csv").option("header","true").csv("engine/src/main/resources/2018地区客运量.csv")
    df.show(10)
    df.printSchema()
    df.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "freight_transport", connectionProperties)

    val df2 =spark.read.format("csv").option("header","true").csv("engine/src/main/resources/年客运量.csv")
    df2.show(10)
    df2.printSchema()
    df2.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "year_passenger", connectionProperties)

    val df4 =spark.read.format("csv").option("header","true").csv("engine/src/main/resources/月客运量2.csv")
    df4.show(10)
    df4.printSchema()
    df4.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "month_passenger", connectionProperties)


    val df3 =spark.read.format("csv").option("header","true").csv("engine/src/main/resources/年货运量.csv")
    df3.show(10)
    df3.printSchema()
    df3.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "year_freight", connectionProperties)



    val df5 =spark.read.format("csv").option("header","true").csv("engine/src/main/resources/月货运量2.csv")
    df5.show(10)
    df5.printSchema()
    df5.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "month_freight", connectionProperties)


    val df6 =spark.read.format("csv").option("header","true").csv("engine/src/main/resources/近五年客运量.csv")
    df6.show(10)
    df6.printSchema()
    df6.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "five_years_passenger", connectionProperties)

    val df7 =spark.read.format("csv").option("header","true").csv("engine/src/main/resources/近五年货运量.csv")
    df7.show(10)
    df7.printSchema()
    df7.write.mode(SaveMode.Overwrite).jdbc(mysql_url, "five_years_freight", connectionProperties)



  }



}
