package com.ocean.recommend

import java.sql.Connection

import com.alibaba.druid.pool.DruidDataSourceFactory
import javax.sql.DataSource
import org.apache.http.HttpHost
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest
import org.elasticsearch.client.indices.{CreateIndexRequest, GetIndexRequest}
import org.elasticsearch.client.{RequestOptions, RestClient, RestHighLevelClient}
import redis.clients.jedis.Jedis

/**
  * 电影样例类
  *
  * @param mid    电影ID
  * @param name   电影的名称
  * @param genres 电影所属类别 每一项用“|”分割
  */
case class Movie(mid: Int, name: String, genres: String)

/**
  * 评分样例类
  *
  * @param uid       用户ID
  * @param mid       电影ID
  * @param score     分数
  * @param timestamp 时间戳
  */
case class Rating(uid: Int, mid: Int, score: Double, timestamp: Long)

/**
  * 标签样例类
  *
  * @param uid       用户ID
  * @param mid       电影ID
  * @param tag       标签
  * @param timestamp 时间戳
  */
case class Tag(uid: Int, mid: Int, tag: String, timestamp: Long)

/**
  * mysql连接配置
  *
  * @param uri
  * @param user
  * @param password
  */
case class MysqlConfig(uri: String, user: String, password: String)

/**
  *
  * @param httpHosts http主机
  * @param port      端口
  * @param index     需要操作的索引
  */
case class ESConfig(httpHosts: String, port: Int, index: String)

/**
  * 加载数据到mysql与es与redis
  */
object DataLoader {

  // 定义常量
  val MOVIE_DATA_PATH = "D:\\myproject\\recommendations\\recommend\\dataloader\\src\\main\\resources\\movies.csv"
  val RATING_DATA_PATH = "D:\\myproject\\recommendations\\recommend\\dataloader\\src\\main\\resources\\ratings.csv"
  val TAG_DATA_PATH = "D:\\myproject\\recommendations\\recommend\\dataloader\\src\\main\\resources\\tags.csv"

  //用户最近K次评分
  val RECENTLY_RATING_MOVIE_NUM = 20

  val MySql_MOVIE_Table = "movie"
  val MySql_RATING_Table = "rating"
  val MySql_TAG_Table = "tag"
  val ES_MOVIE_INDEX = "movie"

  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.uri" -> "jdbc:mysql://192.168.10.105:3306/recommend?useUnicode=true&characterEncoding=utf8&rewriteBatchedStatements=true&useSSL=false",
      "mysql.user" -> "root",
      "mysql.password" -> "cde32wsxzaq1",
      "es.httpHosts" -> "192.168.10.105",
      "es.port" -> "9200",
      "es.index" -> "recommend"
    )

    // 创建一个sparkConf
    val sparkConf: SparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("DataLoader")

    // 创建一个SparkSession
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._

    // 加载数据
    val movieRDD: RDD[String] = spark.sparkContext.textFile(MOVIE_DATA_PATH)

    //对原始的movie数据的预处理(所有的数据都只保存mid<2000的，因为数据量有些大)
    val movieDF: DataFrame = movieRDD.map(
      item => {
        if (item.contains("\"")) {
          val attr0 = item.substring(0, item.indexOf(",", 0)).trim
          val attr2 = item.substring(item.lastIndexOf(",") + 1).trim
          val attr1 = item.replace(attr0, "").replace(attr2, "")
          Movie(attr0.toInt, attr1.substring(attr1.indexOf("\"", 0) + 1, attr1.lastIndexOf("\"")).trim, attr2)
        } else {
          val attr = item.split(",")
          Movie(attr(0).toInt, attr(1).trim, attr(2).trim)
        }
      }
    ).filter(movie => movie.mid < 2000)
      .toDF()

    val ratingRDD: RDD[String] = spark.sparkContext.textFile(RATING_DATA_PATH)

    val ratingObjectRDD: RDD[Rating] = ratingRDD.map(item => {
      val attr = item.split(",")
      Rating(attr(0).toInt, attr(1).toInt, attr(2).toDouble, attr(3).toInt)
    }).filter(rating => rating.mid < 2000).cache()

    val ratingDF: DataFrame = ratingObjectRDD.toDF()

    val tagRDD: RDD[String] = spark.sparkContext.textFile(TAG_DATA_PATH)
    //将tagRDD装换为DataFrame
    val tagDF: DataFrame = tagRDD.map(item => {
      val attr = item.split(",")
      Tag(attr(0).toInt, attr(1).toInt, attr(2).trim, attr(3).toInt)
    }).filter(tag => tag.mid < 2000).toDF()

    implicit val mysqlConfig = MysqlConfig(config("mysql.uri"), config("mysql.user"), config("mysql.password"))
    //将数据保存到mysql
    //    saveDataToMySql(movieDF, ratingDF, tagDF)

    import org.apache.spark.sql.functions._

    /**
      * 将用户给电影打的标签追加到movieDF中
      * mid, tags
      * tags: tag1|tag2|tag3...
      */
    val newTag: DataFrame = tagDF.groupBy($"mid")
      .agg(concat_ws("|", collect_set($"tag")).as("tags"))
      .select("mid", "tags")
    val movieWithTagDF: DataFrame = movieDF.join(newTag, Seq("mid"), "left")

    //将数据保存到es
    implicit val esConfig = ESConfig(config("es.httpHosts"), config("es.port").toInt, config("es.index"))
    //    saveDataToEs(movieWithTagDF)

    val recentlyRatingRDD: RDD[(Int, List[(Int, Double)])] = ratingObjectRDD.map(data => (data.uid, (data.mid, data.score, data.timestamp)))
      .groupByKey()
      .map {
        case (uid, iter) => {
          (uid, iter.toList.sortWith(_._3 > _._3).take(RECENTLY_RATING_MOVIE_NUM).map(data => (data._1, data._2)))
        }
      }

    saveDataToRedis(recentlyRatingRDD)

    spark.stop()
  }

  def saveDataToMySql(movieDF: DataFrame, ratingDF: DataFrame, tagDF: DataFrame)(implicit mysqlConfig: MysqlConfig): Unit = {
    val properties = new java.util.Properties()
    properties.put("user", mysqlConfig.user)
    properties.put("password", mysqlConfig.password)

    //写入数据
    movieDF.write.mode(SaveMode.Overwrite).jdbc(mysqlConfig.uri, MySql_MOVIE_Table, properties)
    ratingDF.write.mode(SaveMode.Overwrite).jdbc(mysqlConfig.uri, MySql_RATING_Table, properties)
    tagDF.write.mode(SaveMode.Overwrite).jdbc(mysqlConfig.uri, MySql_TAG_Table, properties)

    //创建索引
    properties.setProperty("url", mysqlConfig.uri)
    properties.setProperty("username", mysqlConfig.user)
    properties.setProperty("driverClassName", "com.mysql.jdbc.Driver")
    val dataSource: DataSource = DruidDataSourceFactory.createDataSource(properties)
    val indexOnMovieSql = "CREATE INDEX movie_mid_index ON movie (mid)"
    val indexOnRatingSql = "CREATE INDEX rating_uid_index ON rating (uid)"
    val indexOnRatingSql2 = "CREATE INDEX rating_mid_index ON rating (mid)"
    val indexOnTagSql = "CREATE INDEX tag_mid_index ON tag (mid)"
    val indexOnTagSql2 = "CREATE INDEX tag_uid_index ON tag (uid)"
    val connection: Connection = dataSource.getConnection
    connection.prepareStatement(indexOnMovieSql).execute()
    connection.prepareStatement(indexOnRatingSql).execute()
    connection.prepareStatement(indexOnRatingSql2).execute()
    connection.prepareStatement(indexOnTagSql).execute()
    connection.prepareStatement(indexOnTagSql2).execute()
    connection.close()
  }

  def saveDataToEs(movieWithTagDF: DataFrame)(implicit eSConfig: ESConfig) = {
    val esClient = new RestHighLevelClient(RestClient
      .builder(new HttpHost(eSConfig.httpHosts, eSConfig.port, "http")))

    val indexExists: Boolean = esClient.indices().exists(new GetIndexRequest({
      eSConfig.index
    }), RequestOptions.DEFAULT)
    if (indexExists) {
      esClient.indices().delete(new DeleteIndexRequest(eSConfig.index), RequestOptions.DEFAULT)
    }

    esClient.indices().create(new CreateIndexRequest(eSConfig.index), RequestOptions.DEFAULT)

    movieWithTagDF.write
      .option("es.nodes", eSConfig.httpHosts + ":" + eSConfig.port)
      .option("es.nodes.wan.only", "true")
      .option("es.http.timeout", "100m")
      .option("es.mapping.id", "mid")
      .mode("overwrite")
      .format("org.elasticsearch.spark.sql")
      .save(eSConfig.index)

    esClient.close()
  }

  def saveDataToRedis(recentlyRatingRDD: RDD[(Int, List[(Int, Double)])]) = {
    val jedis = new Jedis("192.168.10.105")

    recentlyRatingRDD.collect().foreach {
      data => {
        val uid: Int = data._1
        if (jedis.exists("uid:" + uid)) {
          jedis.del("uid:" + uid)
        }
        for (elem <- data._2) {
          jedis.lpush(s"uid:$uid", elem._1 + ":" + elem._2)
        }
      }
    }

    jedis.close()
  }
}
