package Sun


import java.text.SimpleDateFormat
import java.util
import java.util.{GregorianCalendar, Calendar}

import breeze.linalg.max
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SQLContext, DataFrame, Row}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.types.{StructType, StringType, StructField}
import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.sql.functions._
import redis.clients.jedis.Response
import scala.math
import scala.collection.mutable.ArrayBuffer


object ETL {

  //给出了保留活动的数量
  val numberOfAction = 20

  //三个参数，第一个为大文件 monitor的位置参数
  //第二个为 小文件conversion的位置
  //第三个目前暂定为结果的位置

  //将暂存的文件转换为训练用的向量的函数


  def main(args: Array[String]) {



    //进行channel的运算
    val jedis = RedisClient.pool.getResource
    jedis.select(BasicFunction.dbIndex)
//    val pp = jedis.pipelined()
//
//    val jedis2 = RedisClient.pool.getResource
//    jedis2.select(BasicFunction.dbIndex)
//    val pp2 = jedis2.pipelined()
//
//
//    val temp2 = pp.get("channelcount")
//    val temp3 = pp.get("adwebcount")
//    val temp4 = pp.get("channelcount123")
//
//
//    val temp5 = pp2.get("channelcount")
//    val temp6 = pp2.get("adtypecount")
//    val temp7 = pp2.get("channelcount")
//
//
//
//    var test = Map[Response[String],Double]()
//    test = test + (temp2 -> 0.1)
//    test = test + (temp3 -> 0.1)
//    test = test + (temp4 -> 0.1)
//    var re = "213"
//
//    var test2 = Map[Response[String],Double]()
//    test2 = test2 + (temp5 -> 0.1)
//    test2 = test2 + (temp6 -> 0.1)
//    test2 = test2 + (temp7 -> 0.1)
//
//    pp.sync()
//    test.keys.foreach(key => {
//      re = key.get()
//      println(re)
//    })
//
//
//    pp2.sync()
//
//    test.keys.foreach(key => {
//    re = key.get()
//    println(re)
//    })
//    jedis.close()
//    jedis2.close()

    //做一个能修改的
    val sparkConf = new SparkConf().setAppName("ETL").set("spark.hadoop.validateOutputSpecs", "false")
    val sc = new SparkContext(sparkConf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)

    //设置初始文件夹位置
    val path =  sc.broadcast(args(0))
    val mode = sc.broadcast(args(1))


    BasicFunction.basicpath = path.value
    BasicFunction.mode = mode.value

    val df_AdBasedata = admonitorData(sc, args)
    val conversion = conversionJoin(sc,args,df_AdBasedata)
    val finalResult = admonitorJoin(sc,args,conversion)

    finalResult.write.format("json").save(BasicFunction.result_compete_path)

    //小样本测试
   //val temp =  sqlContext.read.format("json").load("H:/MutiTouch/result_compete.json")
//   val finalResult = sqlContext.read.format("json").load(BasicFunction.basicloadpath(BasicFunction.result_compete_path))

   Separate.compute_statistics_FeatureC(finalResult,sc)

   val  featureCvalue = sqlContext.read.format("json").load(BasicFunction.basicloadpath(BasicFunction.featureC_path))

   var df_result = finalResult.join(featureCvalue, finalResult.col("mzid") === featureCvalue.col("mzid2")).drop("mzid2")


   Separate.toVecotr(df_result)
   // Separate.toVecotr(finalResult,sc)

  //  prepareAllData(finalResult)
  }



  //给出一个可以直接读取特征的函数
  //读取的格式是（userid,data:array[Double]）
  def loadSampleData(sc: SparkContext,path:String): RDD[(String, Array[Double])] = {

    val sqlContext = new org.apache.spark.sql.SQLContext(sc)

    val vectorSample = sc.textFile(path)
    val result = vectorSample.map(x => x.split(",")).map(x => (x(0), {

      var temp = new ArrayBuffer[Double]()
      for (i <- 1 to x.length - 1)
        temp += x.apply(i).toDouble
      temp.toArray
    }) //end KV
    ) //.map(x => Separate.pre_ActionsSeparate(x._2,"web"))


    return result
  }




  //一个分别处理正例和负例的函数将正例和负例做区分  分配向量化
  //通过正例和负例的区分来进行相关操作，分别向量化
  def prepareAllData(df_result: DataFrame,sc:SparkContext):Unit = {

    //选出那些转化的样例
    val df_positive_result = df_result.filter("conversion_time = null")

    //选出为转换的样例数字
    val positive_count = df_positive_result.count().toInt

    val df_negative_data = df_result.filter("conversion_time != null")

    //由于负例远远大于正例，则在取出的时候 使用最近的一些
    df_negative_data.sort(df_negative_data.col("exposure_time"))
    val df_negative_result = df_negative_data.limit(positive_count * 100)

    val df_negative_final = Separate.toVecotr(df_negative_result)
    val df_positive_final = Separate.toVecotr(df_positive_result)


  }



  //ETL代码作为主程序，来控制数据的各个步骤，最终的输出结果为算法模型需要的vector_ label集合
  def ETLdata(args: Array[String], sc: SparkContext): DataFrame = {

    //进行ETL清理，之后存入HIVE
    var df_AdBasedata = admonitorData(sc, args)
    var df_Conversion = conversionJoin(sc, args, df_AdBasedata)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    val df_admonitor = sqlContext.read.format("json").load("H:/MutiTouch/result_compete.json")

    return df_admonitor

  }


  //给出处理admotier的函数

  def admonitorData(sc: SparkContext, args: Array[String]): DataFrame = {
    var viewFile = sc.textFile(BasicFunction.log_path)

    //给出sql上下文
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)

    //给出了要处理的最终结果dataframe
    val schemaString = "mzid,eid,caid,spid,br,os,java,action,exposure_time,moring,afternoon,night,weekday"
    var fields = List[StructField]()
    for (columnName <- schemaString.split(",")) {
      fields = fields :+ StructField(columnName, StringType, true)
    }
    val structType_AdMonitor = StructType.apply(fields)

    var newFile =

      viewFile

        .map(x => x.split("\\^"))
        .filter(x => {
          //去掉尾号0-3的数据,而且字段有问题的 去掉重复然后重排
          (x.size > 18) && (x(0).charAt(x(0).size - 1) ==  '0' ||  x(0).charAt(x(0).size - 1)  ==  '1'||  x(0).charAt(x(0).size - 1)  ==  '2' ||  x(0).charAt(x(0).size  - 1)  ==  '3')
        })

        //进行时间的清洗
        .map(x => separatTime(x))



    //留下接口，准备转换时间
    //准备三个文件转成DF，进行更深度操作


    //此处应该动态生成,给出上下文创建DF
    val df_AdBasedata = sqlContext.createDataFrame(newFile, structType_AdMonitor)

    //截取保留每个用户前n次点击，进行数据的筛选

    return df_AdBasedata
  }

  def conversionJoin(sc: SparkContext, args: Array[String], df_AdBasedata: DataFrame): DataFrame = {
    //给出转换广告的DF,进行广播
    var fields2 = List[StructField]()
    fields2 = fields2 :+ StructField("mzid2", StringType, true)
    fields2 = fields2 :+ StructField("conversion_time", StringType, true)
    val structType_conversion = StructType.apply(fields2)

    //读取转化的文件
    var conversionFile = sc.textFile(BasicFunction.conversion_path)

    val count1 = conversionFile.count()

    var conversionRdd = conversionFile.map(x => x.split(" ")).map(x => {
      //给出时间转化函数
      val sdf = new SimpleDateFormat("yyyyMMddHHmmss")
      val date = sdf.parse(x(1)).getTime.toString()
      Row(x(0), date)
    })

    //尝试解决数据倾斜问题
    .groupBy(x => x(0))


      //只取第一个
      .map(x => (x._1,x._2.toList.sortWith((y,z) => (y.apply(1).toString.toDouble < z(1).toString.toDouble)).take(1)))

    //进行数量的数据平滑
    .flatMap(x => x._2)

    //根据spark的设置来进行文本链接,创建DF
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    var df_Conversion = sqlContext.createDataFrame(conversionRdd, structType_conversion)


    //进行筛选  去掉重复的 选择第一次的转化进行实验
    //小变量进行广播 提高速度
    val broadcast_Conversion = sc.broadcast(df_Conversion).value
    var df_result = df_AdBasedata.join(broadcast_Conversion, df_AdBasedata.col("mzid") === broadcast_Conversion.col("mzid2"),"left_outer").drop("mzid2")

   // df_result.rdd.map(x => x.toString().replace("[","").replace("]","").split(",")).filter(x => x(13) != "null").foreach(x => print(x(13)))
   //rdd.filter(x => x.apply(13).toString != "null" )

    //保留之前的schema
    val tempSchema = df_result.schema
    //去掉转换时间小于

    val df_rdd = df_result.rdd.map(x => x.toString().replace("[","").replace("]","").split(","))
    //筛选掉不符合规则的转化 或者没有转化  或者曝光时间大于转换时间，超过这个不符合本模型的设定
      .filter(x =>
      {
        var result_bool:Boolean = true
        //当为空的时候不过滤
        if (x(13) == "null")
          result_bool = true

        //过滤掉曝光时间大于转化的
        else if(x(13).toLong > x(8).toLong)
          result_bool = true
        else
          result_bool =  false

        result_bool
      }
      )


    //同时将原时间转换成UNIX时间
    // 把第一个变量变成channel + 随机数 便于进行数据倾斜的解决
    .map(x => {

         var result = x
         val randomnum = (math.random * 100).toInt % 10
         result(0) = result(0) + "$" + randomnum
         result

     })


   .groupBy(x => x(0))

         //删除一些list中访问少的内容,正例排除
   .filter(x => { x._2.size> 1 || x._2.take(1).toList.apply(0).apply(13) != "null" })

        //通过初级分组先解决一步的筛选
    .map(x =>

        (x._1,x._2.toList.sortWith((y,z) => (y.apply(8).toString.toDouble > z(8).toString.toDouble)).take(5)))

        //只取前前面的部分，删掉随机数
    .map(x => (x._1.toString.split("\\$")(0),x._2))

        //再进行总体合并,去掉随机数
    .reduceByKey((x,y) => {
          var temp = x:::y
          //合并在一起后取出整体数据量
          temp = temp.sortWith((y,z) => (y.apply(8).toString.toDouble > z(8).toString.toDouble)).take(numberOfAction)
          temp

        })

         //删除一些list中访问少的内容,最后保证条目中至少在15条以上
    .filter(x => x._2.size> 10  ||  x._2.apply(0).apply(13) != "null" )

        //进行数量的数据平滑
    .flatMap(x => x._2)


         //修改回标准的情况
    .map(x =>
        {
          val row1 = x(0).split("\\$")(0)
          Row(row1,x(1),x(2),x(3),x(4),x(5),x(6),x(7),x(8),x(9),x(10),x(11),x(12),x(13))
     })

        //清理完成后去重，提高速度
    .distinct()

    //val count123 = df_rdd.filter(x => x(13) != "null").count()
    //  .map(x => Row(x(0),x(1),x(2),x(3),x(4),x(5),x(6),x(7),x(8),x(9),x(10),x(11),x(12),x(13)))


    df_result = sqlContext.createDataFrame(df_rdd, tempSchema)

    return df_result
  }

  //转化函数与监测函数相join

  def admonitorJoin(sc: SparkContext, args: Array[String], df_Conversion: DataFrame): DataFrame = {
    //根据spark的设置来进行文本链接
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)

    //获取正例数据
    //获取监测点的数据
    var appenddix = sc.textFile(BasicFunction.appendix_path)
    var rdd_admonitor = appenddix.map(x => x.split("\t")).map(x => Row(x(0), x(1), x(2)))
    var fields = List[StructField]()
    fields = fields :+ StructField("spid", StringType, true)
    fields = fields :+ StructField("adweb", StringType, true)
    fields = fields :+ StructField("adtype", StringType, true)

    val structType_admonitor = StructType.apply(fields)

    val df_Admonitor = sqlContext.createDataFrame(rdd_admonitor, structType_admonitor)
    val broadcast_Conversion = sc.broadcast(df_Admonitor)
    val df_result = df_Conversion.join(broadcast_Conversion.value, "spid")
    return df_result
  }


  //对于时间进行分解
  def separatTime(row: Array[String]): Row = {
    val loadTime = Transfertime.Transtime(row(17).toString)

    //year + month + day + h + M + m;
    val nowtime = loadTime.split(",")

    var morning = 0
    var afernoon = 0
    var night = 0

    if (nowtime(3).toInt < 12 && nowtime(3).toInt > 6) {
      morning = 1
      afernoon = 0
      night = 0
    }
    else if (nowtime(3).toInt < 19 && nowtime(3).toInt > 12) {
      morning = 0
      afernoon = 1
      night = 0
    }
    else {
      morning = 0
      afernoon = 0
      night = 1

    }

    var weekday = 0

    //按日历格式编写
    val calendar = new GregorianCalendar(nowtime(0).toInt, nowtime(1).toInt, nowtime(2).toInt, nowtime(3).toInt, nowtime(4).toInt, nowtime(5).toInt)

    if (calendar.isWeekDateSupported == true)
      weekday = 1
    else
      weekday = 0

    //同时将原时间转换成UNIX时间
    // 把第一个变量变成channel + 随机数 便于进行数据倾斜的解决




    var result = Row(row(0) , row(1), row(2), row(3), row(4), row(5), row(9), row(18), calendar.getTimeInMillis.toString(), morning.toString(), afernoon.toString(), night.toString(), weekday.toString())
    val charSpilt = "^"

    //最终结果中去掉一些空字段
    //sci:6 dpi:7  fla:8  lan :10 ip:11 loc：12 ref：13 sek：14 mind：15 kwid：16
    //var result = row(0) + charSpilt + row(1) + charSpilt + row(2) + charSpilt + row(3) + charSpilt + row(4) + charSpilt + row(5) + charSpilt + row(9) + charSpilt + row(18) + charSpilt + calendar.getTimeInMillis + charSpilt +  morning + charSpilt + afernoon + charSpilt + night + charSpilt + weekday
    return result
  }

  //给一个基于文本的更新redis的方式
  def channelUpdate(): Unit =
  {
    //书写一个更新redis的channel
    val jedis = RedisClient.pool.getResource
    jedis.select(BasicFunction.dbIndex)

    BasicFunction.channelDic.keys.foreach {
      key => {
        //修改redis keyvalue的值
        val value = BasicFunction.channelDic(key)

        //获得当前的key值
        val indexkey = "adchannel" + key

        //如果key存在
        jedis.set(indexkey,value.toString)
      }
    }

    //设置数据量
    jedis.set("channelcount",BasicFunction.channelDic.size.toString)
    jedis.close()
  }


}
