package districtInfo

import java.text.SimpleDateFormat
import java.util.Date

import common.MyTableInputFormat
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

import scala.collection.mutable

object getDistrictInfo {
  /**
   * sql读取hbase中的数据
   *
   * @param hbaseConf
   * @param spark
   */
  def getHbase(hbaseConf: Configuration, spark: SparkSession, timestampSTART: String, timestampSTOP: String): Long = {

    // 表时间戳过滤(左闭右开)
    //    val timestampSTART = "1635411000"
    //    val timestampSTOP  = "1635411301"

    hbaseConf.set(TableInputFormat.SCAN, "")
    hbaseConf.set(TableInputFormat.SCAN_ROW_START, timestampSTART)
    hbaseConf.set(TableInputFormat.SCAN_ROW_STOP, timestampSTOP)

    hbaseConf.set("hbase.table.split.startKey", "3740100")
    hbaseConf.set("hbase.table.split.endKey", "3741727")
    hbaseConf.setInt("hbase.table.split.radix", 10)
    hbaseConf.set("hbase.table.split.concat", "|")

    // 表数据转为rdd
    val hbaseRDD = spark.sparkContext.newAPIHadoopRDD(
      hbaseConf,
      classOf[MyTableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result]
    )

    // 根据字段格式化RDD[Row]
    // 备注：Bytes.toBytes()源码仍然是调用了getBytes()，但是将转换完成的结果编码设为UTF-8,而getBytes将会使用默认编码
    val tupRDD = hbaseRDD.map({
      case (_, result) => {
        val id = Bytes.toString(result.getRow)
        //val data_date = fm.format(new Date(Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("dateStr"))).toLong))
        //val timestamp = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("datestr")))
        val ssqx = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("ssqx")))
        val kgdz = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("kgdz")))
        val drfdzl = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("drfdzl")))
        val gfbhxhcs = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("gfbhxhcs")))
        val ycgj = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("ycgj")))
        //val gfstl = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("gfstl")))
        //val xb = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("xb")))
        val gdyhs = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("gdyhs")))
        val gfzrl = Bytes.toString(result.getValue(Bytes.toBytes("info"), Bytes.toBytes("gfzrl")))

        // 转换成RDD[Row]
        (id, ssqx, kgdz, drfdzl, gfbhxhcs, ycgj, gdyhs, gfzrl)
      }
    })

    // rdd转dataframe
    import spark.implicits._
    val df = tupRDD.toDF("id", "ssqx", "kgdz", "drfdzl", "gfbhxhcs", "ycgj", "gdyhs", "gfzrl")
    //df.show()

    // 建立临时表
    df.createOrReplaceTempView("pvdistrict")

    // 缓存临时表
    spark.table("pvdistrict").cache()

    // 时间打印
    System.err.println("完成临时表生成", new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date))

    //执行SQl
    // 每个区县的用户数量，光伏容量，日发电量（小时执行）
    val time = fm.format(new Date(timestampSTART.toLong * 1000l))
    val res1 = spark.sql("select " +
      s"concat_ws('_',ssqx,${time}) as district_time," +
      s"LEFT(ssqx,5) as city_no," +
      s"ssqx as district_no," +
      s"${time} as specific_time," +
      s"kgdz as switch_action," +
      s"drfdzl as pv_generation," +
      s"gfbhxhcs as protection_signal," +
      s"ycgj as abnormal_warning," +
      s"gdyhs as overvoltage_users," +
      s"gfzrl as pv_capacity " +
      s"from pvdistrict ")

    //res1.show()

    // 时间打印
    System.err.println("完成SQL1", new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date))

    //保存数据表
    //    默认为SaveMode.ErrorIfExists模式，该模式下，若数据库中已经存在该表，则会直接报异常，导致数据不能存入数据库；
    //    SaveMode.Append 若表已经存在，则追加在该表中；若该表不存在，则会先创建表，再插入数据；
    //    SaveMode.Overwrite 重写模式，其本质是先将已有的表及其数据全都删除，再重新创建该表，然后插入新的数据；
    //    SaveMode.Ignore 若表不存在，则创建表，并存入数据；若表存在的情况下，直接跳过数据的存储，不会报错。

    // 服务器
    //val table_split = new SimpleDateFormat("yyyyMM").format(new Date).toString()
    val table_split = time.toString().substring(0, 6)
    try{
      res1.write.mode(SaveMode.Append).jdbc("jdbc:mysql://dw-Huake05:3306/gf?characterEncoding=UTF-8", "district_base_" + table_split, prop)
      run_res = "SUCCESS"
      l = res1.count()
    }catch{
      case e: ArithmeticException => println(e)
    }
    System.err.println("完成插入:", new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date))
    l
  }

  //日志参数初始化
  var start: Int = 0
  var end = 0
  var run_res = ""
  var l:Long = 0
  val fm = new SimpleDateFormat("yyyyMMddHHmm")

  //日志案例类
  case class table_log(spark_job: String, run_res: String, table_name: String, insert_num: Long, data_time: String, job_time_cost: String, execution_time: String)

  // mysql参数配置
  val prop = new java.util.Properties
  prop.setProperty("user", "root")
  //prop.setProperty("password", "123456")
  prop.setProperty("password", "huake@2021")
  prop.setProperty("driver", "com.mysql.jdbc.Driver")

  def main(args: Array[String]): Unit = {
    // 检验参数
    if (args.length != 2) {
      println("未输入时间戳参数 Param:first two")
      sys.exit(-1) // -1 非正常退出
    }

    // 接收时间戳参数，结束时间加一秒
    val Array(timestampSTART, timestampSTOP) = args
    val endTime = (timestampSTOP.toInt + 1).toString
    println("时间戳范围:[" + timestampSTART + "," + endTime + "]")

    // 时间打印
    start = new Date().getTime.toString().substring(0, 10).toInt
    System.err.println("程序启动:", new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date))
    run_res = "ERROR"
    //spark任务会话
    val spark = SparkSession
      .builder()
      .appName("getDistrictInfo")
      .getOrCreate()

    // Hbase服务器环境
    val hbaseConf = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    hbaseConf.set("hbase.zookeeper.quorum", "dw-Huake05")
    hbaseConf.set("hbase.master", "dw-Huake05:60010")
    hbaseConf.set(TableInputFormat.INPUT_TABLE, "pvdistrict")

    //  Hbase测试环境
    //    val hbaseConf = HBaseConfiguration.create()
    //    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")
    //    hbaseConf.set("hbase.zookeeper.quorum", "master")
    //    hbaseConf.set("hbase.master", "master:60010")
    //    hbaseConf.set(TableInputFormat.INPUT_TABLE, "pvarea")

    // Sql获取Hbase表信息 结果转存mysql，返回插入条数
    val insert_num = getHbase(hbaseConf, spark, timestampSTART, endTime)

    //操作日志记录==========================================================================================================
    import spark.implicits._
    end = new Date().getTime.toString().substring(0, 10).toInt
    val time = fm.format(new Date(timestampSTART.toLong * 1000l))
    val table_split = time.toString().substring(0, 6)
    val log_time = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date)
    val log = table_log("getDistrictInfo", run_res, "district_base_" + table_split, insert_num, time, (end - start).toString + "s", log_time)
    val logList = mutable.MutableList[table_log]()
    logList += log
    val logDF = logList.toDF()
    logDF.write.mode(SaveMode.Append).jdbc("jdbc:mysql://dw-Huake05:3306/gf?characterEncoding=UTF-8", "base_table_log", prop)
    //操作日志记录==========================================================================================================

    // 结束会话
    spark.stop()

  }
}
