package org.jxkj.util

import org.apache.hadoop.hbase.client.Put
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.jxkj.app.{ColumnType2, MysqlDml}
import org.jxkj.bean.{CmPoint, FormaluPoint, PointData}
import org.jxkj.data.IDaoProvider
import org.jxkj.util.DataInsert.properties
import java.text.SimpleDateFormat
import java.util
import java.util.{ArrayList, Date, LinkedHashMap, List, Map, UUID}

import org.jxkj.client.NodeJsClient
import org.jxkj.dto.PointsReqResDTO

import scala.collection.JavaConversions._
import scala.collection.immutable.ListMap
import scala.collection.mutable

object DftoMap {
  //智能抄表mysql LYL 数据到hbase
  def insertZNCB(ss: SparkSession, sc: SparkContext, time: Array[String], stat_type: String, org: String): Unit = {

    val startTime = time(0)
    val endTime = time(1)
    val jdbcDF = ss.read
      .format("jdbc")
      .option("url", MysqlDml.url2)
      .option("dbtable", "hms_stat_caldata")
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .load()
    jdbcDF.createOrReplaceTempView("hms_stat_caldata")

    val jdbcDF2 = ss.read
      .format("jdbc")
      .option("url", MysqlDml.url2)
      .option("dbtable", "hms_cm_point")
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .load()
    jdbcDF2.createOrReplaceTempView("hms_cm_point")

    val df = ss.sql(s"select a.point_code,cast(unix_timestamp(stat_date,'yyyy/MM/dd HH:mm:ss') as int) stat_date,cast(calvalue as string) calvalue,point_hid" +
      s" from hms_stat_caldata a,hms_cm_point b" +
      s" where a.point_code=b.point_code and stat_type='$stat_type' and a.sub_org_id='$org' and stat_date between '$startTime' and '$endTime'")
    df.show()
    insertDF2H(ss, sc, time, df)

  }

  //非公式换算油位 mysql数据到hbase 已停
  def calDataHSYW(ss: SparkSession, sc: SparkContext, hTable: IDaoProvider, time: Array[String], step: Int): Unit = {

    val startTime = time(0)
    val endTime = time(1)
    val dimension = "day"
    val jdbcDF = ss.read
      .format("jdbc")
      .option("url", MysqlDml.url2)
      .option("dbtable", "hms_oil_level_convert")
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .load()
    jdbcDF.createOrReplaceTempView("hms_oil_level_convert")

    val jdbcDF2 = ss.read
      .format("jdbc")
      .option("url", MysqlDml.url2)
      .option("dbtable", "hms_cm_point")
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .load()
    jdbcDF2.createOrReplaceTempView("hms_cm_point")

    val df = ss.sql(s"select b.point_code,cast(unix_timestamp(stat_time,'yyyy/MM/dd HH:mm:ss') as int) stat_date,cast(oil_trap_level as string) calvalue,point_hid" +
      s" from hms_oil_level_convert a join hms_cm_point b" +
      s" on substr(b.point_name,3,6)='F 换算油位' and a.asset_pid=b.assetid " +
      s" where stat_time between  '$startTime'  and  '$endTime'" +
      s" and oil_trap_level is not null")
    df.printSchema()
    df.show()
    insertDF2H(ss, sc, time, df)

  }

  /**
   * 对象转换
   * @param timeValueMap
   * @return
   */
  def toPointsReqResDTO(pointCode: String, timeValueMap: mutable.Map[Int, String]): PointsReqResDTO = {
    if (timeValueMap == null || timeValueMap.isEmpty) {
      return null;
    }
    // 先用treeMap保证按时间顺序
    val treeMap = new mutable.TreeMap[Int, String]()
    for((time, value) <- timeValueMap) {
      treeMap.put(time, value)
    }

    val data = new util.LinkedHashMap[String, util.List[util.List[AnyRef]]]
    for((time, value) <- treeMap) {
        if (!data.containsKey(pointCode)) {
          data.put(pointCode, new util.ArrayList[util.List[AnyRef]])
        }
        val element = new util.ArrayList[AnyRef](2)
        element.add(0, time.toString)
        element.add(1, value)
        data.get(pointCode).add(element)
    }
    val dto: PointsReqResDTO = new PointsReqResDTO
    dto.setData(data)
    dto
  }

  /**
   * 保存数据
   * （20210713修改：改为保存数据到nodejs服务，不再使用hbase）
   *
   * @param ss
   * @param sc
   * @param time
   * @param df
   */
  def insertDF2H(ss: SparkSession, sc: SparkContext, time: Array[String], df: DataFrame): Unit = {
    // 获取测点
    val point_set = df.select("point_code", "point_hid").dropDuplicates()
      .rdd.map(x => (x.getAs[String]("point_code"), x.getAs[Int]("point_hid"))).collect().toMap[String, Int]
    println(point_set)

    // 处理每个测点
    for (p <- point_set) {
      println(p)
      // 获取测点的计算结果
      val timeValueMap2 = df.filter(s"point_code='${p._1}'")
        .rdd.map(row => (row.getAs[Int]("stat_date"), row.getAs[String]("calvalue"))).collect().toMap[Int, String]
      println(timeValueMap2)

      //: _*将转化为参数序列
      val timeValueMap: mutable.Map[Int, String] = scala.collection.mutable.Map(timeValueMap2.toSeq: _*)
      println("begin insert " + 1)

      // 保存数据到Nodejs服务
      var pointsReqResDTO: PointsReqResDTO = toPointsReqResDTO(p._1, timeValueMap);
      NodeJsClient.write(pointsReqResDTO);
    }
  }

  //公式计算存hbase 按300秒循环
  /*
  def insertFormula2H(ss: SparkSession, sc: SparkContext, time: Array[String]): Unit = {
    val startTime = time(0)
    val endTime = time(1)
    val year: String = startTime.substring(0, 4)
    //使用class的static sdfs会线程安全报错 java.lang.NumberFormatException: For input string: ""
    val sdfs = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss")
    val step=300

    //线程池
    //val threadPool: ExecutorService = Executors.newFixedThreadPool(10)
    //线程计数器，阻塞主线程
    //val cdl = new CountDownLatch(10)

    DataInsert.initialTable()
    //val properties = PropertiesUtils.initial("hbase.properties")
    val formula = new CmPointServiceImpl
    val tableName = properties.getProperty("table_" + year)
    val mutator= DataInsert.getMutator(startTime)

    val df=ss.sql("select distinct point_code,point_hid from hms_cm_point where substr(point_code,1,4)='HMS_' AND formula is not null and formula<>'' and calc_flag='FORMULA'")

    /*
    for (i <- 0 to 9) {
      threadPool.execute(new Runnable {
       override def run(): Unit = {
          try{
     */
    val point_set=df.rdd.map(x=>(x.getAs[String]("point_code"),x.getAs[Int]("point_hid"))).collect().toMap[String,Int]

    for(p <- point_set){
      println(p)
      var start = startTime
      var starttimestamp = sdfs.parse(startTime).getTime/1000
      var timeValueMap = new mutable.HashMap[Int, String]()

      while(start<=endTime){
        println("start--"+start)
        try{
        val values = formula.formulaPoint(p._1, start, "day").toString
         timeValueMap.put(starttimestamp.toInt,values)
        start = sdfs.format(new Date(starttimestamp*1000 + step * 1000))
        starttimestamp= starttimestamp+step
        }catch{
          case ex: Exception => println("error"+p._1+":"+start)
        }
       }
      println(timeValueMap)
      val puts: util.ArrayList[Put] = DataInsert.addList(timeValueMap, p._1, p._2)
      println("begin insert "+ 1)

      if(puts!=null) {
        mutator.mutate(puts)
        mutator.flush()
      }

    }

       /*
          }catch {
            case ex:Exception =>ex.printStackTrace()
          }
         cdl.countDown()
       }
      })
    }
    threadPool.shutdown()
    cdl.await()
    */
    //会根据某些因素（比如接收的Put数据的总量）启发式地执行Batch Put操作，且会异步的提交Batch Put请求，这样MapReduce作业的执行也不会被打断
    // BufferedMutator通过mutate方法提交数据，flush方法可以强制刷新缓冲区提交数据，在执行close方法之前也会刷新缓冲区。
    mutator.close

  }
 */
  //公式计算存hbase 批量时间段 不考虑递归测点
  def insertFormula2Hbase(ss: SparkSession, sc: SparkContext, time: Array[String]): Unit = {
    val startTime = time(0)
    val endTime = time(1)
    val year: String = startTime.substring(0, 4)
    //使用class的static sdfs会线程安全报错 java.lang.NumberFormatException: For input string: ""
    val sdfs = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss")
    val step = 300

    //线程池
    //val threadPool: ExecutorService = Executors.newFixedThreadPool(10)
    //线程计数器，阻塞主线程
    //val cdl = new CountDownLatch(10)

    DataInsert.initialTable()
    //val properties = PropertiesUtils.initial("hbase.properties")
    val formula = new FormaluPoint
    val tableName = properties.getProperty("table_" + year)
    val mutator = DataInsert.getMutator(startTime)

    val df = ss.sql("select distinct point_code,point_hid from hms_cm_point where substr(point_code,1,4)='HMS_' and substr(point_code,-4)='HSYW' AND formula is not null and formula<>'' and calc_flag='FORMULA'")


    /*
    for (i <- 0 to 9) {
      threadPool.execute(new Runnable {
       override def run(): Unit = {
          try{
     */
    val point_set = df.rdd.map(x => (x.getAs[String]("point_code"), x.getAs[Int]("point_hid"))).collect().toMap[String, Int]

    for (p <- point_set) {
      println(p)
      var timeValueMap = new mutable.HashMap[Integer, String]()
      try {
        timeValueMap.putAll(formula.formulaPoint(p._1, startTime, endTime, "day", step))
        val puts: util.ArrayList[Put] = DataInsert.addList2(timeValueMap, p._1, p._2)
        println("begin insert " + 1)
        if (puts != null) {
          mutator.mutate(puts)
          mutator.flush()
        }
      } catch {
        case ex: Exception => println("error" + p._1 + ":" + startTime)
      }

    }
    mutator.close
    /*
       }catch {
         case ex:Exception =>ex.printStackTrace()
       }
      cdl.countDown()
    }
   })
 }
 threadPool.shutdown()
 cdl.await()
 */

  }


  //公式计算存MYSQL 单时点 可递归
  /*
  def insertFormula2M(ss: SparkSession, sc: SparkContext, time: Array[String],stat_type:String): Unit = {
    val startTime = time(0)
    val endTime = time(1)
    val year: String = startTime.substring(0, 4)
    val sdfs = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss")
    val step=3600*24
    import ss.implicits._
    //val properties = PropertiesUtils.initial("hbase.properties")
    val formula = new CmPointServiceImpl
    val df=ss.sql("select distinct point_code,multi_ratio,create_time from hms_cm_point where substr(point_code,1,4)='HMS_' AND formula is not null and formula<>'' and calc_flag='STATISTIC' order by create_time")
//.orderBy("create_time")
    val point_set=df.rdd.map(x=>(x.getAs[String]("point_code"),x.getAs[Double]("multi_ratio"))).collect().toMap[String,Double]
    println(point_set)
    val list: util.ArrayList[PointData] = new util.ArrayList[PointData]()
    for(p <- point_set){
      println(p)
      var start = startTime
      var starttimestamp = sdfs.parse(startTime).getTime/1000
      //var timeValueMap = new mutable.HashMap[Int, String]()

      while(start<=endTime){
        println("start--"+start)
        try{
          val values = formula.formulaPoint(p._1, start, stat_type).toString
          //timeValueMap.put(starttimestamp.toInt,values)
          list.add(new PointData(p._1,start,values))

          start = sdfs.format(new Date(starttimestamp*1000 + step * 1000))
          starttimestamp= starttimestamp+step
        }catch{
          case ex: Exception => println("error"+p._1+":"+start)
        }
      }
    }
    val rdd = sc.parallelize(list,10)
    val edosDF = rdd.map(x => {
      ColumnType2(x.getName, x.getTime, x.getValue)
    }).toDF()
    edosDF.show()
    edosDF.createOrReplaceTempView("t_ods_formula")

    //生成udf函数 可以在withcolumn里使用
    val generateUUID = udf(() => UUID.randomUUID().toString.replace("-",""))
    //将自定义函数注册到SparkSQL里
    ss.udf.register("uuID", generateUUID)

    val resultDf =ss.sql(s"select uuID() data_id,checkpoint point_code,'$stat_type' stat_type,createtime stat_date,'' end_date,round(pvalue,4) calvalue" +
      ",'GZB' project_id,'10000' org_id,'formalu' sub_org_id,'' create_person,now() create_time,'' modify_person,now() modify_time"+
      " from t_ods_formula a where pvalue<>'NaN'")
    //resultDf.show()
    val tablename = "hms_stat_caldata"
    val delString = s"delete from $tablename where sub_org_id='formalu' and stat_date between '$startTime' and '$endTime' and stat_type='$stat_type'"

    MysqlDml.delete2(delString)
    //写mysql
    resultDf.write
      .format("jdbc")
      .mode("append")
      .option("url", MysqlDml.url2)
      .option("dbtable", tablename)
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .save()
  }

*/
  //公式计算存MYSQL 时段 不可递归
  //智能抄表算法
  def insertFormula2Mysql(ss: SparkSession, sc: SparkContext, time: Array[String], stat_type: String, step: Int): Unit = {
    val startTime = time(0)
    val endTime = time(1)
    val sdfs = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss")

    import ss.implicits._


    // 从数据库取测点
    val df = ss.sql(
      s"""
         | select distinct
         |   point_code,
         |   cast(create_time as string) as create_time
         | from
         |   hms_cm_point
         | where
         |   substr(point_code, 1, 9) = 'HMS_DLFX_'
         |   and formula is not null
         |   and formula <> ''
         |   and calc_flag = 'STATISTIC'
         | order by
         |   create_time
         |""".stripMargin)

    val point_set2 = df.map(x => (x.getAs[String]("point_code"), x.getAs[String]("create_time"))).collect().toMap[String, String]
    val point_set = ListMap(point_set2.toSeq.sortBy(_._2): _*)
    println(point_set)


    // 查测点数据
    val sql = "SELECT point_code, stat_date, calvalue " +
      " FROM hms_stat_caldata " +
      " WHERE stat_date between '" + startTime + "' and '" + endTime + "' and stat_type='" + stat_type + "' and sub_org_id in ('zncb','formalu')";
    println(sql)
    val reset = CmPoint.queryToRs(sql)


    // 处理每个测点的公式计算
    val formula = new FormaluPoint
    val list: util.ArrayList[PointData] = new util.ArrayList[PointData]()
    for (p <- point_set) {
      println("p+++" + p)
      val timeValueMap: util.Map[Integer, String] = formula.formulaPoint(reset, p._1, startTime, endTime, stat_type, step)
      println("timeValueMap计算完毕.")
      for (x <- timeValueMap) {
        println(x._1 + ":" + x._2)
        val start: String = sdfs.format(new Date(x._1 * 1000.toLong)) //(x._1*1000).toLong
        list.add(new PointData(p._1, start, x._2))
        reset.add(new PointData(p._1, start, x._2)) //递归公式
      }
    }
    val rdd = sc.parallelize(list, 1)
    val edosDF = rdd.map(x => {
      ColumnType2(x.getName, x.getTime, x.getValue)
    }).toDF()
    edosDF.createOrReplaceTempView("t_ods_formula")

    //将自定义函数注册到SparkSQL里
    val generateUUID = () => UUID.randomUUID().toString.replace("-", "")
    ss.udf.register("uuID", generateUUID)

    // 计算结果
    val resultDf = ss.sql(
      s"""
         | select
         |   uuID() as data_id,
         |   checkpoint as point_code,
         |   '$stat_type' as stat_type,
         |   createtime as stat_date,
         |   '' as end_date,
         |   round(pvalue,4) as calvalue,
         |   'GZB' as project_id,
         |   '10000' as org_id,
         |   'formalu' as sub_org_id,
         |   '' as create_person,
         |   now() as create_time,
         |   '' as modify_person,
         |   now() as modify_time
         | from
         |   t_ods_formula a
         | where
         |   pvalue <> 'NaN'
         |   and pvalue <> '-Infinity'
         |   and pvalue <> 'Infinity'
         |""".stripMargin)


    // 清除旧数据
    val tablename = "hms_stat_caldata"
    val delString = s"delete from $tablename where sub_org_id='formalu' and stat_date between '$startTime' and '$endTime' and stat_type='$stat_type'"
    MysqlDml.delete2(delString)

    // 计算结果写入数据库
    resultDf.write
      .format("jdbc")
      .mode("append")
      .option("url", MysqlDml.url2)
      .option("dbtable", tablename)
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .save()
  }

}
