package com.spark.cust.movie

import java.util.Properties

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}


/**
 * @description: 根据数据集3，计算每年发行电影的数量，并进行可视化(柱状图)
 * @time: 2020/12/3 15:45
 * @author: lhy
 */
object Code01_MovieNumber {
    def main(args: Array[String]): Unit = {
        val spark: SparkSession = SparkSession.builder().appName("MovieNumber").master("local").getOrCreate()
        import spark.implicits._
        //生成字段
        val fields: Array[StructField] =Array(StructField("years",StringType,nullable = true),
                                              StructField("number",IntegerType,nullable = true))
        // schema描述了模式信息，模式中包含years和number两个字段 即：schema就是表头
        val schema: StructType = StructType(fields)
        // 下面加载文件生成RDD
        val movie: RDD[String] = spark.sparkContext.textFile("input/movie/movies.dat")
                                                   .map(f =>f.split("::")(1))
        val rowRDD: RDD[Row] = movie.map{ f =>
                                            val index_L = f.lastIndexOf('(') + 1
                                            val index_R = f.lastIndexOf(')')
                                            val year = f.substring(index_L,index_R)
                                            (year,1)
        }.reduceByKey(_+_).sortByKey()
          .map(attributes => Row(attributes._1,attributes._2))
        // 把"表头"和"表中的记录"拼接起来
        val movieDF: DataFrame = spark.createDataFrame(rowRDD,schema)
        // 下面创建一个prop变量用来保存JDBC连接参数
        val prop = new Properties()
        prop.put("user","root")
        prop.put("password","bigdata")
        prop.put("driver","com.mysql.jdbc.Driver")
        // 下面连接数据库，采用append模式，表示追加记录到数据库spark的student表中
        movieDF.write.mode("append").jdbc("jdbc:mysql://192.168.21.104:3306/spark","spark.movie_years",prop)

        // 必须注册成临时表才能供下面的查询使用
        movieDF.createOrReplaceTempView("movie_years")
        val results: DataFrame = spark.sql("SELECT years,number FROM movie_years where number > 50")
        results.map(attributes => attributes(0)+" "+attributes(1)).show()
    }
}
