package com.etc

import java.net.InetAddress
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.{MongoClient, MongoClientURI}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.common.transport.InetSocketTransportAddress
import org.elasticsearch.transport.client.PreBuiltTransportClient



/**
  * Movie 的数据集 数据集字段进行分割
  *
  * 1^                                                    电影ID
  * Toy Story (1995)^                                     电影的名字
  * ^                                                     电影的描述
  *  81 minutes^                                          电影的时长
  *  March 20, 2001^                                      电影的发行时间
  *  1995^                                                电影的拍摄时间
  *  English^                                             电影的语言
  *  Adventure|Animation|Children|Comedy|Fantasy^         电影的类型
  *  Tom |Wallace Shawn.....^                             电影的演员
  *  John Lasseter^                                       电影的导演
  * tag1|tag2|tag3|...                                    电影的tag
  *
  */

case class Movie(val mid:Int, val name:String,val descri:String,val timelong: String,val issue: String,
                 val shoot: String,val language: String,val genres: String,val actors: String,val directives: String)

/**
  * Rating数据集：用户对电影的评分数据集
  * 1                    用户ID
  * 31                   电影ID
  * 2.5                  用户对于电影的评分
  * 1260759144           用户对于电影的评分的时间
  */
case class Rating(val uid:Int,val mid:Int,val score:Double,val timestamp:Int)

/**
  * Tag数据集： 用户对电影的标签数据集
  *
  * 15,                         用户ID
  * 339,                        电影ID
  * sandra 'boring' bullock,    标签的具体内容
  * 1138537770                  用户对于电影的打标签的时间
  *
  */
case class Tag(val uid:Int,val mid:Int,val tag:String,val timestamp:Int)


/**
  *MongoDB连接配置
  * @param url
  * @param db
  */
case class MongodbConfig(val url:String,val db:String)

/**
  *ElasticSearch
  *
  * @param httpHosts                 http的主机列表 已 分割
  * @param transportHosts            transport 主机列表
  * @param index                     所需要操作的索引
  * @param clustername               es集群的名字
  */
case class  EsConfig(val httpHosts: String,val transportHosts: String,val index:String,val clustername:String)

  //数据的住加载服务
object DataLoader {
    val MOVIE_DATA_PATH = "F:\\demo\\MovieRecommendSystem\\recommender\\dataloader\\src\\main\\resources\\movies.csv\\"
    val RATING_DATA_PATH = "F:\\demo\\MovieRecommendSystem\\recommender\\dataloader\\src\\main\\resources\\ratings.csv\\"
    val TAG_DATA_PATH = "F:\\demo\\MovieRecommendSystem\\recommender\\dataloader\\src\\main\\resources\\tags.csv\\"

    val MONGODB_MOVIE_COLLECTION = "Movie"
    val MONGODB_RATING_COLLECTION = "Rating"
    val MONGODB_TAG_COLLECTION = "Tag"
    val ES_MOVIE_INDEX = "movies"

    def main(args: Array[String]): Unit = {

      val config = Map(
        "spark.cores" -> "local[*]",
        "spark.name" -> "DataLoader",
        "mongodb.url" -> "mongodb://localhost:27017/movies" ,
        "mongodb.db" -> "movies",
        "es.httpHosts" -> "localhost:9200",
        "es.transportHosts" -> "localhost:9300",
        "es.cluster.name" -> "my-application",
        "es.index" ->"movies"
      )

      //需要创建一个Sparkconf配置
      val conf = new SparkConf().setMaster(config("spark.cores")).setAppName(config("spark.name"))

      //创建一个SparkSession
      val spark = SparkSession.builder().config(conf).getOrCreate()

      import spark.implicits._

      //将 Movie Rating Tag  数据集加载进来 ; 首先加载Movie
      val movieRDD = spark.sparkContext.textFile(MOVIE_DATA_PATH)
        val movieDF =  movieRDD.map(item => {
        val attr = item.split("\\^")
        Movie(attr(0).toInt,attr(1).trim,attr(2).trim,attr(3).trim,attr(4).trim,attr(5).trim,attr(6).trim,attr(7).trim,attr(8).trim,attr(9).trim)
      }).toDF()

      //Rating数据集 加载
      val ratingRDD = spark.sparkContext.textFile(RATING_DATA_PATH)
      val ratingDF = ratingRDD.map(item =>{
        val attr = item.split(",")
        Rating(attr(0).toInt,attr(1).toInt,attr(2).toDouble,attr(3).toInt)
      }).toDF()

      //Tag 数据集 加载
      val tagRDD = spark.sparkContext.textFile(TAG_DATA_PATH)
      val tagDF =  tagRDD.map(item => {
        val attr = item.split(",")
        Tag(attr(0).toInt,attr(1).toInt,attr(2).trim,attr(3).toInt)
      }).toDF()


      implicit val mongodbConfig = MongodbConfig(config("mongodb.url"),config("mongodb.db"))

      //将数据保存到MongoDB中的方法
      storeDataInMongodb(movieDF,ratingDF,tagDF)


      /**
        * Movie数据集 数据集字段进行分割
        *
        * 1^                           电影ID
        * Toy Story (1995)^            电影的名字
        * In the highlands ....^       电影的描述
        * 85^                          电影的时长
        * August 26, 1997^              电影的发行日期
        * 1995^                         电影的拍摄日期
        * English ^                     电影的语言
        * Action|Drama|Romance|War ^    电影的类型
        * Liam Neeson|Jessica Lange...  电影的演员
        * Michael Caton-Jones           电影的导演
        * tag1|tag2|tag3|....           电影的Tag
        *
        *
        * MID . tag1|tag2|tag3
        */

      import org.apache.spark.sql.functions._
      //首先要对tag数据进行处理   处理后 mid  , tag1|tag2|tag3|
      val newTag = tagDF.groupBy($"mid")
          .agg(concat_ws("|",collect_set($"tag"))
          .as("tags")).select("mid","tags")

      //movieDF 和 newTag 以 mid 和 mid  左连接
      val  frame = movieDF.join(newTag,Seq("mid","mid"),"left")

      implicit val esConfig = EsConfig(config("es.httpHosts"),config("es.transportHosts"),config("es.index"),config("es.cluster.name"))
      storeDataInEs(frame)
      spark.stop()

    }

    //将数据保存到MongoDB中的方法
    def storeDataInMongodb(movieDF:DataFrame,ratingDF:DataFrame,tagDF:DataFrame)(implicit mongoconfig:MongodbConfig):Unit ={

      //新建一个到mongodb的连接
      val mongoclient  = MongoClient(MongoClientURI(mongoconfig.url))

      //如果mongodb中有对应的数据库，，删除
      mongoclient(mongoconfig.db)("MONGODB_MOVIE_COLLECTION").dropCollection()
      mongoclient(mongoconfig.db)("MONGODB_RATING_COLLECTION").dropCollection()
      mongoclient(mongoconfig.db)("MONGODB_TAG_COLLECTION").dropCollection()

      //将当前的数据写入到mongodb
      movieDF
        .write
        .option("uri",mongoconfig.url)
        .option("collection",MONGODB_MOVIE_COLLECTION)
        .mode("overwrite")
        .format("com.mongodb.spark.sql")
          .save()

      ratingDF
        .write
        .option("uri",mongoconfig.url)
        .option("collection",MONGODB_RATING_COLLECTION)
        .mode("overwrite")
        .format("com.mongodb.spark.sql")
          .save()


      tagDF
        .write
        .option("uri",mongoconfig.url)
        .option("collection",MONGODB_TAG_COLLECTION)
        .mode("overwrite")
        .format("com.mongodb.spark.sql")
        .save()

      //对数据表进行表索引创建、
      mongoclient(mongoconfig.db)(MONGODB_MOVIE_COLLECTION).createIndex(MongoDBObject("mid" -> 1))


      mongoclient(mongoconfig.db)(MONGODB_RATING_COLLECTION).createIndex(MongoDBObject("mid" -> 1))
      mongoclient(mongoconfig.db)(MONGODB_RATING_COLLECTION).createIndex(MongoDBObject("mid" -> 1))


      mongoclient(mongoconfig.db)(MONGODB_TAG_COLLECTION).createIndex(MongoDBObject("mid" -> 1))
      mongoclient(mongoconfig.db)(MONGODB_TAG_COLLECTION).createIndex(MongoDBObject("mid" -> 1))

     //关闭mongodb连接
      mongoclient.close()
    }



    //将数据存到Es中
    def storeDataInEs(movieDF:DataFrame)(implicit esConfig: EsConfig):Unit ={

      //新建一个配置
      val settings = Settings.builder.put("cluster.name",esConfig.clustername).build()

      //新建一个Es的客户端
      val esclient = new  PreBuiltTransportClient (settings)

      val REGEX_HOST_PORT = "(.+):(\\d+)".r
      esConfig.transportHosts.split(",").foreach{
        case REGEX_HOST_PORT(host:String,port:String) => {
          esclient.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(host),port.toInt))
        }
      }

      //需要清除掉ES中遗留的数据
      if(esclient.admin().indices().exists(new IndicesExistsRequest(esConfig.index)).actionGet().isExists){
        esclient.admin().indices().delete(new DeleteIndexRequest(esConfig.index))
      }
      esclient.admin().indices().create(new CreateIndexRequest(esConfig.index))
      //将数据写入es中

      movieDF
        .write
        .option("es.nodes",esConfig.httpHosts)
        .option("es.http.timeout","100m")
        .option("es,mapping.id","mid")
        .mode("overwrite")
        .format("org.elasticsearch.spark.sql")
        .save(esConfig.index + "/"+ ES_MOVIE_INDEX)
    }
}
