package com.lvmama.monkey.analysis

/**
  * Created by hejing on 2017/11/9.
  * 选取训练集
  */

import com.lvmama.monkey.common.logging.LazyLogging
import com.lvmama.monkey.common.utils.JDBCUtils.JDBCTemplate
import com.lvmama.monkey.common.utils.spark.SparkApplication
import com.lvmama.monkey.config.JobConfig
import com.lvmama.monkey.common.utils.Conversion._
import org.apache.spark.sql.functions._
import org.apache.spark.sql._
import com.lvmama.monkey.common.utils.MathUtils._
import org.apache.hadoop.fs.Path
import org.apache.spark.storage.StorageLevel

class ChooseCommentTrainData(config: JobConfig) extends SparkApplication with LazyLogging{
  override var appName: String = "ChooseCommentTrainData"
  override var sparkConfig: Map[String, String] = config.spark
  val JDBCDefault = JDBCTemplate.JDBCDefaultSet
  val connP = JDBCTemplate.getConProperties

  def execute(): Unit = {
    sparkConfig += ("spark.app.name" -> appName)
    sparkConfig += ("spark.master" -> "local[*]")
    sparkConfig+=("spark.default.parallelism" -> "40")
    sparkConfig +=("spark.sql.shuffle.partitions" -> "300" )
    withSparkContext {
      sc =>
        val sqlContext = new SQLContext(sc)
        import sqlContext.implicits._

        //step1:选取需要的字段
        val mysqlDF: DataFrame = sqlContext.LoadFromMysql("comment_detail", "monkey").select(col("product_id"),
                  date_format(col("oper_time"),"yyyyMMdd").as("oper_time"),col("date_num"),decimal2(col("positive_prob")).as("positive"),decimal2(col("negative_prob"))
                    .as("negative"),col("sense_type")).withColumn("sense_type2", when(col("sense_type")===2,1).otherwise(0))
                  .coalesce(sc.defaultParallelism).na.drop().orderBy(col("product_id").desc,col("oper_time").desc)
        mysqlDF.persist(StorageLevel.MEMORY_AND_DISK_SER)
        //step2:求出该id对应good_rate(好评率=好评数/评论总数)
        val good_rate = mysqlDF.groupBy("product_id","oper_time").agg("sense_type2" -> "sum","sense_type" -> "count").withColumnRenamed("sum(sense_type2)","good")
                    .withColumnRenamed("count(sense_type)","amount").withColumn("good_rate2",decimal2(col("good")/col("amount")*100))
                    .withColumn("good_rate", when(col("good_rate2").between(0,10),1).when(col("good_rate2").between(11,20),2).when(col("good_rate2").between(21,30),3)
                    .when(col("good_rate2").between(31,40),4).when(col("good_rate2").between(41,50),5).when(col("good_rate2").between(51,60),6)
                    .when(col("good_rate2").between(61,70),7).when(col("good_rate2").between(71,80),8).when(col("good_rate2").between(81,90),9)
                    .when(col("good_rate2").between(91,100),10)).withColumnRenamed("product_id","product_ids").withColumnRenamed("oper_time","oper_times").drop("good_rate2")
        //step3:求出groupBy(id和opertime)对应date_num,positive,negative,sense_type
        val average = mysqlDF.groupBy("product_id","oper_time").agg("date_num" -> "avg","positive" -> "avg","negative" -> "avg","sense_type" -> "avg")
                            .withColumn("date_num", decimal2(col("avg(date_num)"))).withColumn("positive", decimal2(col("avg(positive)")))
                            .withColumn("negative", decimal2(col("avg(negative)"))).withColumn("sense_type", decimal2(col("avg(sense_type)")))
                            .drop("avg(date_num)").drop("avg(positive)").drop("avg(negative)").drop("avg(sense_type)")
        //step4:合并good_rate和average两张表，得到最终需要的表
        val table = average.join(good_rate, good_rate("product_ids") === average("product_id") && good_rate("oper_times") === average("oper_time"), "inner")
                          .drop("product_ids").drop("oper_times").orderBy(col("product_id").desc,col("oper_time").desc).na.drop()

        val output = new Path("hdfs://hadoop/user/monkey/Comment/ChooseCommentTrainData")
        val output2 = new Path("hdfs://hadoop/user/monkey/Comment/ChooseCommentTrainDataResult")
        val hdfs = org.apache.hadoop.fs.FileSystem.get(new java.net.URI("hdfs://hadoop"), new org.apache.hadoop.conf.Configuration())
        // 如果目录存在，则删除输出目录
        if (hdfs.exists(output)) hdfs.delete(output, true)
        if (hdfs.exists(output2)) hdfs.delete(output2, true)
        //step5:将处理好的数据存到本地,然后处理成需要的训练集的格式，保存到ChooseCommentTrainDataResult
        table.rdd.saveAsTextFile("hdfs://hadoop/user/monkey/Comment/ChooseCommentTrainData")
        val parquetData = sc.textFile("hdfs://hadoop/user/monkey/Comment/ChooseCommentTrainData/part-*")
        val tempData = parquetData.flatMap(line => line.split("\\[")).flatMap(line => line.split("\\]")).filter(!_.trim.equals("")).saveAsTextFile("hdfs://hadoop/user/monkey/Comment/ChooseCommentTrainDataResult")

        mysqlDF.unpersist()
    }
  }
}

object ChooseCommentTrainData {
  val config = JobConfig()
  def apply(): Unit = new ChooseCommentTrainData(config).execute()
}
