package cn.oldsix.spark.core.moviegroom.etl

import cn.oldsix.spark.core.moviegroom.casebean.{Links, Movies, Ratings, Tags}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql._

/**
  * @ Author : Wu.D.J
  * @ Create : 2017.08.07
  */
object ETL {
    def main(args: Array[String]): Unit = {
        val spark = SparkSession
                .builder()
                .appName("ETL")
                .config("spark.storage.blockManagerHeartBeatMs", "240000")
                .enableHiveSupport()
                .getOrCreate()
        
        import spark.implicits._
        
        // 读取links数据文件，过滤掉缺失值数据，进行数据清洗，并转成DataFrame,存入hive数据仓库
        val linkDF = spark.sparkContext.textFile("/data/spark-example/movie/links.txt")
                .filter(!_.endsWith(","))
                .map(_.split(","))
                .map(x => Links(x(0).trim.toInt, x(1).trim.toInt, x(2).trim.toInt))
                .toDF
        
        linkDF.registerTempTable("temp_links")
        spark.sql("drop table if exists links")
        spark.sql("create table if not exists links (movieId int, imdbId int, tmdbId int)")
        spark.sql("insert into table links select * from temp_links")
    
        // 读取movies数据文件，过滤掉缺失值数据，进行数据清洗，并转成DataFrame,存入hive数据仓库
        val movieDF = spark.sparkContext.textFile("/data/spark-example/movie/movies.txt")
                .filter(!_.endsWith(","))
                .map(_.split(","))
                .map(x => Movies(x(0).trim.toInt, x(1).trim, x(2).trim))
                .toDF
        
        movieDF.registerTempTable("temp_movies")
        spark.sql("drop table if exists movies")
        spark.sql("create table if not exists movies (movieId int, title string, genres string)")
        spark.sql("insert into table movies select * from temp_movies")
    
        // 读取tags数据文件，过滤掉缺失值数据，进行数据清洗，并转成DataFrame,存入hive数据仓库
        val tagDF = spark.sparkContext.textFile("/data/spark-example/movie/tags.txt")
                .filter(!_.endsWith(","))
                .map(x => rebuild(x))
                .map(_.split(","))
                .map(x => Tags(x(0).trim.toInt, x(1).trim.toInt, x(2).trim, x(3).trim.toInt))
                .toDF
        
        tagDF.registerTempTable("temp_tags")
        spark.sql("drop table if exists tags")
        spark.sql("create table if not exists tags (userId int, movieId int, tag string, timestamp int)")
        spark.sql("insert into table tags select * from temp_tags")
    
        // 读取ratings数据文件，过滤掉缺失值数据，进行数据清洗，并转成DataFrame,存入hive数据仓库
        val ratingDF = spark.sparkContext.textFile("/data/spark-example/movie/ratings.txt")
                .filter(!_.endsWith(","))
                .map(_.split(","))
                .map(x => Ratings(x(0).trim.toInt, x(1).trim.toInt, x(2).trim.toDouble, x(3).trim.toInt))
                .toDF
        
        ratingDF.registerTempTable("temp_ratings")
        spark.sql("drop table if exists ratings")
        spark.sql("create table if not exists ratings (userId int, movieId int, rating double, timestamp int)")
        spark.sql("insert into table ratings select * from temp_ratings")
    
        // 统计ratings表的数据量，取60%的数据作为训练数据，取40%的数据作为测试数据
        val totalCount = spark.sql("select * from ratings").count().toInt
        val percent = 0.6
        val trainDataCount = (totalCount * percent).toInt
        val testDataCount = totalCount - trainDataCount
    
        // 通过timestamp字段做ASC排序，取前面60%的数据作为训练数据，存入hive数据仓库
        // 这里将order by limit分成 2步 操作，以防spark出现 OOM（out of memory） 的情况
        // 调大spark.storage.blockManagerHeartBeatMs参数，防止由于数据量太大，导致worker节点在一定时间内没有给master发送信号，master以为worker挂掉
        // 同时，将order by与limit操作的数据都存入临时表
        val tempTrainData = spark.sql("select userId, movieId, rating from ratings order by timestamp asc")
        tempTrainData.registerTempTable("temp_orderby")
        spark.sql("create table if not exists orderby_train_data (userId int, movieId int, rating double)")
        spark.sql("insert into table orderby_train_data select * from temp_orderby")
        // --------------
        val tempTrainDataLimit = spark.sql("select * from orderby_train_data limit " + trainDataCount)
        tempTrainDataLimit.registerTempTable("temp_limit")
        spark.sql("create table if not exists limit_train_data (userId int, movieId int, rating double)")
        spark.sql("insert into table limit_train_data select * from temp_limit")
    
        spark.sql("drop table if exists train_data")
        spark.sql("create table if not exists train_data (userId int, movieId int, rating double)")
        spark.sql("insert into table train_data select * from limit_train_data")
    
        // 通过timestamp字段做DESC排序，取前面40%的数据作为测试数据，存入hive数据仓库
        // 这里将order by limit分成 2步 操作，以防spark出现 OOM（out of memory） 的情况
        val tempTestData = spark.sql("select userId, movieId, rating from ratings order by timestamp desc")
        tempTestData.registerTempTable("temp_orderby_test")
        spark.sql("create table if not exists orderby_test_data (userId int, movieId int, rating double)")
        spark.sql("insert into table orderby_test_data select * from temp_orderby_test")
    
        val tempTestDataLimit = spark.sql("select * from orderby_test_data limit " + testDataCount)
        tempTrainDataLimit.registerTempTable("temp_limit_test")
        spark.sql("create table if not exists limit_test_data (userId int, movieId int, rating double)")
        spark.sql("insert into table limit_test_data select * from temp_limit_test")
        
        spark.sql("drop table if exists test_data")
        spark.sql("create table if not exists test_data (userId int, movieId int, rating double)")
        // 调大spark.storage.blockManagerHeartBeatMs参数，防止由于数据量太大，导致worker节点在一定时间内没有给master发送信号，master以为worker挂掉
        spark.sql("insert into table test_data select * from limit_test_data")
    }
    
    private def rebuild(input : String) : String = {
        val arr = input.split(",")
        arr.take(2).mkString(",") + "," + arr.drop(2).dropRight(1).mkString.replace("\"", "") + "," + arr.takeRight(1).mkString
    }
}
