package test

import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}

object StatisticsRating {
  def main(args: Array[String]) {
    val sparkConf = new SparkConf().setAppName("RatingStraming").setMaster("local[2]")
    val sc = new SparkContext(sparkConf)
    // 创建Streaming的上下文，包括Spark的配置和时间间隔，这里时间为间隔20秒
    val ssc = new StreamingContext(sc, Seconds(20))
    // 指定监控的目录
    //    val lin =  ssc.fileStream("D:\\Linux\\BigData\\Spark\\work\\streaming")
    val lines = ssc.textFileStream("F:\\ratingfile")
    // 对指定文件夹变化的数据进行单词统计并且打印

    val rating = lines.map(lines => {
      val fileds = lines.split(",")
      (fileds(1).toInt, fileds(2).toDouble)
    })

    //(914,3.5)
    val movieScores: DStream[(Int, (Int, Double))] = rating.groupByKey().map(data => {
      val avg = data._2.sum / data._2.size
      (data._1, (data._1, avg))
    })

    //    movieScores.print()
    //    movieScores.saveAsTextFiles("D:\\Linux\\BigData\\Spark\\work\\result\\temp\\")
    //    val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)
    //    wordCounts.print()
    //    wordCounts.saveAsTextFiles("D:\\Linux\\BigData\\Spark\\work\\result\\")


    //读取movie文件
    val line_movie = sc.textFile("F:\\spark\\movie\\movies.csv")

    val movieskey = line_movie.map(line_movie => {
      val fileds = line_movie.split(",")
      (fileds(0).toInt, (fileds(0).toInt, fileds(1))) //(MovieID,MovieName）
    })

    //     val movieskey = ReadiMovies.func()

    //连接,<movie, averageRating, movieName>
    val result: DStream[(Int, Double, String)] = movieScores.transform(rdd => {
      rdd.join(movieskey)
        .map(f => (f._1, f._2._1._2, f._2._2._2))
    })

    result.print()

    result.repartition(1).saveAsTextFiles("F:\\spark\\result\\")

    // 启动Streaming
    ssc.start()
    ssc.awaitTermination()
  }
}
