package com.lvmama.monkey.buy

import com.lvmama.monkey.common.logging.LazyLogging
import com.lvmama.monkey.common.utils.JDBCUtils.JDBCTemplate
import com.lvmama.monkey.common.utils.spark.SparkApplication
import com.lvmama.monkey.config.JobConfig
import com.lvmama.monkey.common.utils.Conversion._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions._
import org.apache.spark.sql._
import org.apache.spark.storage.StorageLevel
import com.lvmama.monkey.common.utils.DateUtils._
import com.lvmama.monkey.common.utils.Utils._
import com.lvmama.monkey.common.utils.MathUtils._
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.types.DoubleType

/**
  * Created by hejing on 2017/11/15.
  * 选取要入库的字段，作为预测集,存到hdfs
  */
class CreatePurchasingPredictData(config: JobConfig) extends SparkApplication with LazyLogging{
  override var appName: String = "CreatePurchasingPredictData"
  override var sparkConfig: Map[String, String] = config.spark
  val JDBCDefault = JDBCTemplate.JDBCDefaultSet
  val connP = JDBCTemplate.getConProperties

  def execute(): Unit = {
    sparkConfig += ("spark.app.name" -> appName)
    sparkConfig += ("spark.master" -> "local[*]")
    //    sparkConfig+=("spark.default.parallelism" -> "40")
    //    sparkConfig +=("spark.sql.shuffle.partitions" -> "300" )
    withSparkContext {
      sc =>
        val sqlContext = new SQLContext(sc)
        import sqlContext.implicits._

        //step1:选出要用到的字段
        val mysqlDF: DataFrame = sqlContext.LoadFromMysql("comment_detail", "monkey").select(col("product_id").as("productId"),
          date_format(col("create_date"), "yyyyMMdd").as("date"),col("sense_type").as("senseType"))
          .coalesce(12).na.drop().orderBy(col("productId").desc, col("date").desc)
        mysqlDF.persist(StorageLevel.MEMORY_AND_DISK_SER)

        //step2:求出了各Id的平均senseType,0、1、2三种评论的均值
        val senseType = mysqlDF.groupBy("productId").agg("senseType" -> "avg").withColumn("senseType", decimal2(col("avg(senseType)").cast(DoubleType))).drop("avg(senseType)")
        //step3: date1-date7，依次为每一个id对应创建7天的时间，并新增一列week将时间转为对应周几，DateTable将7天的时间联合到一起
        val sense = mysqlDF.dropDuplicates(Seq("productId")).drop("senseType")
        val date1: RDD[(String, String)] = sense.rdd.map(x => (x(0).toString, getPastDate(0)))
        val date2: RDD[(String, String)] = sense.rdd.map(x => (x(0).toString, getPastDate(1)))
        val date3: RDD[(String, String)] = sense.rdd.map(x => (x(0).toString, getPastDate(2)))
        val date4: RDD[(String, String)] = sense.rdd.map(x => (x(0).toString, getPastDate(3)))
        val date5: RDD[(String, String)] = sense.rdd.map(x => (x(0).toString, getPastDate(4)))
        val date6: RDD[(String, String)] = sense.rdd.map(x => (x(0).toString, getPastDate(5)))
        val date7: RDD[(String, String)] = sense.rdd.map(x => (x(0).toString, getPastDate(6)))
        val DateTable = date1.toDF("id","date").unionAll(date2.toDF()).unionAll(date3.toDF()).unionAll(date4.toDF()).unionAll(date5.toDF())
          .unionAll(date6.toDF()).unionAll(date7.toDF()).withColumn("week", dayOfWeek(col("date").as("String"))).orderBy(col("id").desc,col("date"))

        //step4:将senseType和DateTable连接成一张表
        val joinDate_senseType = DateTable.join(senseType,DateTable("id") === senseType("productId"), "inner").drop("productId").repartition(sc.defaultParallelism)
        joinDate_senseType.persist(StorageLevel.MEMORY_AND_DISK_SER)

        //step5: 求出该id对应week评价总数，包含好中差评  |productIds|week_a|good|
        val amount = mysqlDF.withColumn("week_a", dayOfWeek(col("date").as("String"))).groupBy("productId","week_a").agg("senseType" -> "count")
          .orderBy(col("productId").desc).withColumnRenamed("count(senseType)","good").withColumnRenamed("date","dates")
          .withColumnRenamed("productId", "productIds").repartition(sc.defaultParallelism)
        mysqlDF.unpersist()
        amount.persist(StorageLevel.MEMORY_AND_DISK_SER)
        //step6: 将评价总数表，join到之前处理好的表 joinDate_senseType:|      id|    date|week|senseType|,  得到:|      id|    date|week|senseType|good_comment|
        //week_a和week相等的值保留，不相等置为0，因为join的时候产生很多冗余的行
        val joinLastTable = joinDate_senseType.join(amount,joinDate_senseType("id") === amount("productIds"), "inner")
          .withColumn("good_comment",when(col("week")===col("week_a"),col("good")).otherwise("0"))
          .drop("productId").drop("productIds").drop("week_a").drop("good")
        amount.unpersist()
        joinLastTable.persist(StorageLevel.MEMORY_AND_DISK_SER)

        //step7: 将上一步处理好的joinLastTable，按照id,date分组，week=2会有7条数据，sum之后就是正常的week=2的值，|     ids|   dates|sum(good_comment)|
        val good_comment = joinLastTable.groupBy("id","date").agg("good_comment" -> "sum").withColumnRenamed("id","ids").withColumnRenamed("date","dates").repartition(sc.defaultParallelism)
        joinLastTable.unpersist()
        good_comment.persist(StorageLevel.MEMORY_AND_DISK_SER)

        // step8: 得到最终入库的数据，存到mysql，之后选取出来作为预测数据的输入值 |      id|    date|week|senseType|good_comment|
        val fin = joinDate_senseType.join(good_comment,joinDate_senseType("id") === good_comment("ids") && joinDate_senseType("date") === good_comment("dates"), "inner")
          .select(col("id").cast("long"),col("date"),col("week"),col("senseType"),col("sum(good_comment)").as("good_comment"))
          .repartition(sc.defaultParallelism).orderBy(col("id").desc,col("week").asc)

        val output = new Path("hdfs://hadoop/user/monkey/Purchasing/predictData")
        val output2 = new Path("hdfs://hadoop/user/monkey/Purchasing/predictDataResult")
        val hdfs = org.apache.hadoop.fs.FileSystem.get(new java.net.URI("hdfs://hadoop"), new org.apache.hadoop.conf.Configuration())
        if (hdfs.exists(output)) hdfs.delete(output, true)
        if (hdfs.exists(output2)) hdfs.delete(output2, true)
        fin.rdd.saveAsTextFile("hdfs://hadoop/user/monkey/Purchasing/predictData")
        val selectDate = sc.textFile("hdfs://hadoop/user/monkey/Purchasing/predictData/part-*").flatMap(line => line.split("\\[")).flatMap(line => line.split("\\]"))
          .filter(!_.trim.equals("")).saveAsTextFile("hdfs://hadoop/user/monkey/Purchasing/predictDataResult")

        joinDate_senseType.unpersist()
        good_comment.unpersist()
    }

  }
}

object CreatePurchasingPredictData {
  val config = JobConfig()
  def apply(): Unit = new CreatePurchasingPredictData(config).execute()
}
