package org.jxkj.util

import java.util
import org.apache.hadoop.hbase.client.Put
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.jxkj.app.MysqlDml
import org.jxkj.data.IDaoProvider
import org.jxkj.util.DataInsert.properties

import scala.collection.mutable

/**
 * 未使用
 */
@Deprecated
object ScriptTool {
  @Deprecated
  def calData(ss: SparkSession, sc: SparkContext, hTable: IDaoProvider, time: Array[String],step:Int): Unit = {


    val startTime = time(0)
    val endTime = time(1)
    val dimension = "day"
    val jdbcDF = ss.read
      .format("jdbc")
      .option("url", MysqlDml.url2)
      .option("dbtable", "hms_stat_caldata")
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .load()
    jdbcDF.createOrReplaceTempView("hms_stat_caldata")

    val jdbcDF2 = ss.read
      .format("jdbc")
      .option("url", MysqlDml.url2)
      .option("dbtable", "hms_cm_point")
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .load()
    jdbcDF2.createOrReplaceTempView("hms_cm_point")

    val df= ss.sql(s"select point_code,unix_timestamp(stat_date,'yyyy/MM/dd HH:mm:ss') stat_date,calvalue,point_hid" +
      s" from hms_stat_caldata a,hms_cm_point b" +
      s" where a.point_code=b.point_code stat_type='$dimension' and stat_date='$startTime'")
    df.show()
    insertDF2H(ss,sc,time,df)

  }
  @Deprecated
  def calDataHSYW(ss: SparkSession, sc: SparkContext, hTable: IDaoProvider, time: Array[String],step:Int): Unit = {

    val startTime = time(0)
    val endTime = time(1)
    val dimension = "day"
    val jdbcDF = ss.read
      .format("jdbc")
      .option("url", MysqlDml.url2)
      .option("dbtable", "hms_oil_level_convert")
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .load()
    jdbcDF.createOrReplaceTempView("hms_oil_level_convert")

    val jdbcDF2 = ss.read
      .format("jdbc")
      .option("url", MysqlDml.url2)
      .option("dbtable", "hms_cm_point")
      .option("user", MysqlDml.user2)
      .option("password", MysqlDml.password2)
      .option("driver", MysqlDml.driver2)
      .load()
    jdbcDF2.createOrReplaceTempView("hms_cm_point")

    val df= ss.sql(s"select b.point_code,cast(unix_timestamp(stat_time,'yyyy/MM/dd HH:mm:ss') as int) stat_date,cast(oil_trap_level as string) calvalue,point_hid" +
      s" from hms_oil_level_convert a join hms_cm_point b" +
      s" on substr(b.point_name,3,6)='F 换算油位' and a.asset_pid=b.assetid " +
      s" where stat_time between  '$startTime'  and  '$endTime'" +
      s" and oil_trap_level is not null")
    df.printSchema()
    df.show()
    insertDF2H(ss,sc,time,df)

  }


  def insertDF2H(ss: SparkSession, sc: SparkContext, time: Array[String],df:DataFrame): Unit = {


    val startTime = time(0)
    val endTime = time(1)
    val year: String = startTime.substring(0, 4)
    DataInsert.initialTable()
    //val properties = PropertiesUtils.initial("hbase.properties")
    val tableName = properties.getProperty("table_" + year)

    /*
   implicit val mapEncoder = org.apache.spark.sql.Encoders.kryo[Map[String, Any]]
   val tomap = new mutable.HashMap[String, String]()
   val maplist  = df.map(x => {
     x.getValuesMap[Any](List("point_code", "calvalue")) // 返回你指定需要的字段的k-v  Array(point_code->JKXT ,calvalue->22)
   }).collect().toList
   println(maplist)
   //List(Map(point_code -> HMS_DLFX_220GIS_GK_GEPO, calvalue -> 4643.0000), Map(point_code -> HMS_DLFX_DJ_6KV_626, calvalue -> 386.0000)

val map2=df.rdd
      .map(row => (row.getAs[String]("point_code"), row.getAs[String]("calvalue")))
      .collect()// 数据量较少
      .toMap[String, String]
    println(map2)

   val map3=df.rdd
     .map(row => {
       tomap += (row.getAs[String]("point_code") -> row.getAs[String]("calvalue"))//每行一个 去掉+
       (tomap)
     }
     )
   map3.foreach { println }

   Map(HMS_DLFX_220GIS_GK_GEPO -> 4643.0000)
   Map(HMS_DLFX_DJ_6KV_626 -> 386.0000, HMS_DLFX_220GIS_GK_GEPO -> 4643.0000)
   Map(HMS_DLFX_DJ_6KV_60913 -> 207.0000, HMS_DLFX_DJ_6KV_626 -> 386.0000, HMS_DLFX_220GIS_GK_GEPO -> 4643.0000)
   Map(HMS_DLFX_DJ_6KV_626 -> 386.0000, HMS_DLFX_DJ_6KV_60913 -> 207.0000, HMS_DLFX_220GIS_GK_GEPO -> 4643.0000, HMS_DLFX_SXL_6KV_6340 -> 469.0000)
   Map(HMS_DLFX_FDL_8F -> 1085.0000, HMS_DLFX_DJ_6KV_626 -> 386.0000, HMS_DLFX_DJ_6KV_60913 -> 207.0000, HMS_DLFX_220GIS_GK_GEPO -> 4643.0000, HMS_DLFX_SXL_6KV_6340 -> 469.0000)
*/
    import scala.collection.mutable.Map


    val point_set=df.select("point_code","point_hid").dropDuplicates().rdd.map(x=>(x.getAs[String]("point_code"),x.getAs[Int]("point_hid"))).collect().toMap[String,Int]
    println(point_set)
    val mutator= DataInsert.getMutator(startTime)
    for(p <- point_set){
      println(p)
    val timeValueMap2 =df.filter(s"point_code='${p._1}'").rdd.map(row => (row.getAs[Int]("stat_date"), row.getAs[String]("calvalue"))).collect().toMap[Int, String]
    println(timeValueMap2)
      val timeValueMap: mutable.Map[Int, String] = scala.collection.mutable.Map(timeValueMap2.toSeq:_*)//: _*将转化为参数序列
    val puts: util.ArrayList[Put] = DataInsert.addList(timeValueMap, p._1, p._2)
      //DataInsert.commit(puts,tableName)

      println("begin insert "+ 1)
      if(puts!=null)mutator.mutate(puts)
    }

  }

}
