package com.lvmama.monkey.analysis

import com.lvmama.monkey.common.logging.LazyLogging
import com.lvmama.monkey.common.utils.JDBCUtils.JDBCTemplate
import com.lvmama.monkey.common.utils.spark.SparkApplication
import com.lvmama.monkey.config.JobConfig
import com.lvmama.monkey.common.utils.Conversion._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions._
import org.apache.spark.sql._
import org.apache.spark.storage.StorageLevel
import com.lvmama.monkey.common.utils.MathUtils._
import org.apache.hadoop.fs.Path
import com.lvmama.monkey.common.utils.DateUtils.getPastDate

/**
  * Created by hejing on 2017/11/9.
  * 选取要入库的字段，作为预测评价的预测集
  */
class CreateCommentPredictData(config: JobConfig) extends SparkApplication with LazyLogging{
  override var appName: String = "CreateCommentPredictData"
  override var sparkConfig: Map[String, String] = config.spark
  val JDBCDefault = JDBCTemplate.JDBCDefaultSet
  val connP = JDBCTemplate.getConProperties

  def execute(): Unit = {
    sparkConfig += ("spark.app.name" -> appName)
    sparkConfig += ("spark.master" -> "local[*]")
    //    sparkConfig+=("spark.default.parallelism" -> "40")
    //    sparkConfig +=("spark.sql.shuffle.partitions" -> "300" )
    withSparkContext {
      sc =>
        val sqlContext = new SQLContext(sc)
        import sqlContext.implicits._

        //part1-选取训练集
        val mysqlDF: DataFrame = sqlContext.LoadFromMysql("comment_detail", "monkey").select(col("product_id"),
          date_format(col("oper_time"),"yyyyMMdd").as("oper_time"),col("date_num"),decimal2(col("positive_prob")).as("positive"),decimal2(col("negative_prob")).as("negative"),col("sense_type"))
          .withColumn("sense_type2", when(col("sense_type")===2,1).otherwise(0)).withColumn("sense_type3", col("sense_type"))
          .coalesce(sc.defaultParallelism).na.drop().orderBy(col("product_id").desc,col("oper_time").desc)
        mysqlDF.persist(StorageLevel.MEMORY_AND_DISK_SER)

        //step2:求出groupBy(id)对应date_num，positive,negative,sense_type，good,amount
        val average = mysqlDF.groupBy("product_id").agg("date_num" -> "avg","positive" -> "avg","negative" -> "avg","sense_type" -> "avg","sense_type2" -> "sum","sense_type3" -> "count")
          .withColumnRenamed("sum(sense_type2)","goods").withColumnRenamed("count(sense_type3)","amounts")
          .withColumn("date_num", decimal2(col("avg(date_num)"))).withColumn("positive", decimal2(col("avg(positive)")))
          .withColumn("negative", decimal2(col("avg(negative)"))).withColumn("sense_type", decimal2(col("avg(sense_type)")))
          .drop("avg(positive)").drop("avg(negative)").drop("avg(sense_type)").drop("avg(date_num)")
          .orderBy(col("product_id").desc)

        //step3:生成未来7天的时间
        val sense = mysqlDF.select("product_id", "oper_time").dropDuplicates(Seq("product_id")).orderBy(col("product_id").desc)
        val date1: RDD[(Long, String)] = sense.rdd.map(x => (x(0).asInstanceOf[Long], getPastDate(0)))
        val date2: RDD[(Long, String)] = sense.rdd.map(x => (x(0).asInstanceOf[Long], getPastDate(1)))
        val date3: RDD[(Long, String)] = sense.rdd.map(x => (x(0).asInstanceOf[Long], getPastDate(2)))
        val date4: RDD[(Long, String)] = sense.rdd.map(x => (x(0).asInstanceOf[Long], getPastDate(3)))
        val date5: RDD[(Long, String)] = sense.rdd.map(x => (x(0).asInstanceOf[Long], getPastDate(4)))
        val date6: RDD[(Long, String)] = sense.rdd.map(x => (x(0).asInstanceOf[Long], getPastDate(5)))
        val date7: RDD[(Long, String)] = sense.rdd.map(x => (x(0).asInstanceOf[Long], getPastDate(6)))
        val date = date1.toDF("id","date").unionAll(date2.toDF()).unionAll(date3.toDF()).unionAll(date4.toDF()).unionAll(date5.toDF())
          .unionAll(date6.toDF()).unionAll(date7.toDF()).orderBy(col("id").desc)

        //step4:合并date和average，得到最终需要的字段
        val join  = date.join(average, date("id") === average("product_id"), "inner").drop("product_id").withColumn("good",col("goods"))
          .withColumn("amount",col("amounts")).drop("goods").drop("amounts").orderBy(col("id").desc, col("date").desc).na.drop()

        //step5:将处理好的数据存到hdfs,然后处理成需要的训练集的格式，保存到commentPredictDataResult
        val output = new Path("hdfs://hadoop/user/monkey/Comment/commentPredictData")
        val output2 = new Path("hdfs://hadoop/user/monkey/Comment/commentPredictDataResult")
        val hdfs = org.apache.hadoop.fs.FileSystem.get(new java.net.URI("hdfs://hadoop"), new org.apache.hadoop.conf.Configuration())
        if (hdfs.exists(output)) hdfs.delete(output, true)
        if (hdfs.exists(output2)) hdfs.delete(output2, true)
        join.rdd.saveAsTextFile("hdfs://hadoop/user/monkey/Comment/commentPredictData")
        val parquetData = sc.textFile("hdfs://hadoop/user/monkey/Comment/commentPredictData/part-*")
        val tempData = parquetData.flatMap(line => line.split("\\[")).flatMap(line => line.split("\\]")).filter(!_.trim.equals("")).saveAsTextFile("hdfs://hadoop/user/monkey/Comment/commentPredictDataResult")//.coalesce(1)

        mysqlDF.unpersist()
    }
  }
}

object CreateCommentPredictData {
  val config = JobConfig()
  def apply(): Unit = new CreateCommentPredictData(config).execute()
}
