package com.sugon.zs

import java.util

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}
import org.elasticsearch.spark.sql._

object SparkToEs {

  // Zookeeper connection properties
  private val props = new util.HashMap[String, Object]()
  props.put("bootstrap.servers", "node113.sugon.local:6667,node114.sugon.local:6667,node115.sugon.local:6667")
  props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
  props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
  private val producer = new KafkaProducer[String, String](this.props)

  val mapping01 = Map(
    "01" -> "黄",
    "02" -> "蓝",
    "03" -> "黑",
    "06" -> "黑",
    "07" -> "黄",
    "08" -> "黄",
    "15" -> "黄",
    "16" -> "黄",
    "23" -> "白",
    "51" -> "绿",
    "52" -> "绿",
    "小型汽车" -> "蓝",
    "大型汽车" -> "黄",
    "教练汽车" -> "黄",
    "领馆汽车" -> "黑",
    "使馆汽车" -> "黑"
  )

  val mapping02 = Map(
    "蓝" -> "蓝",
    "黄" -> "黄",
    "黑" -> "黑",
    "白" -> "白",
    "绿" -> "绿",
    "蓝色" -> "蓝",
    "黄色" -> "黄",
    "黑色" -> "黑",
    "白色" -> "白",
    "绿色" -> "绿",
    "黄绿色" -> "绿",
    "渐变绿" -> "绿",
    "BLUE" -> "蓝",
    "OLIVINE" -> "绿",
    "YELLOW" -> "黄",
    "GREEN" -> "绿",
    "WHITE" -> "白",
    "BLACK" -> "黑"
  )


  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("WriteToES")
      // es 节点地址
      .config("spark.es.nodes", "es server")
      .config("spark.es.port", "9200")
      // 自动创建索引
      .config("es.index.auto.create", "true")
      .config("spark.es.nodes.wan.only", "true")
      // 指定以id 为更新主键
      .config("es.mapping.id", "id")
      // 给es推送数据 排出 name 字段
      .config("es.mapping.exclude", "name")

      //      .master("local[*]")
      .enableHiveSupport()
      .getOrCreate()

    println("移动警务-违章...")


    spark.udf.register("hpzl_to_hpys", hpzl_to_hpys(_: String))
    spark.udf.register("hpys_normal", hpys_normal(_: String))

    val data: Dataset[Row] = spark.sql(
      """
            SELECT concat('o-', pzhm) as id, wzsj, wzdd_cn, wzsy_cn, hphm, gps_lat, gps_lon, '强制措施' as type,
            hpzl_bm as hpzl, dsrxm, zjlx_cn, zjhm, dwdz, zjcx, null as fkje, null as wfjfs, null as jkbj, null as jkrq
            from ysk.ysk_ga_ydjw_tn_jj_tb_obliges
            union
            select concat('f-', finenote), finedate, wzdd_cn, wzsy_cn, licensenumber, gps_lat, gps_lon, '简易处罚' as type,
            licensetype, fineename, zjlx_cn, idnumber, fineeaddress, null, finepenalt, finepoint, sfjk, jkrq
            from ysk.ysk_ga_ydjw_tn_jj_tb_fines
            union
            select concat('p-', peccid), wfsj, wfdd_ms, wfxw_cn, hphm, gps_lat, gps_lon, '违章停车' as type,
            hpzl, null, null, null, null, null, null, null, null, null
            from ysk.ysk_ga_ydjw_tn_jj_tb_pecc
            union
            (
            select concat('s-', xh), a.wfsj, a.wfdz, a.wfxw_mc, a.hphm, null, null, '非现场执法' as type,
            a.hpzl, b.dsr, null, b.jszh, null, null, a.fkje, null, a.jkbj, a.jkrq
            from ysk.ysk_v_vio_surveil_gxsj a
            left join ysk.ysk_v_vio_violation_gxsj b
            on a.wfbh = b.wfbh
            where b.xxly = '2'
            )
            """
    ).dropDuplicates("id")

    data.createOrReplaceTempView("union_info")

    val data2: DataFrame = spark.sql(
      """
            select t.*,
            coalesce(hpzl_to_hpys(hpzl), if(length(hphm)=8, '绿', '蓝')) as cpys,
            if(int(substr(zjhm, 7, 4)) between 1900 and year(current_date)
            and zjhm rlike '^[0-9]{10}(0[1-9]|1[012])(0[1-9]|[12][0-9]|3[01])([0-9]{3}[0-9X]|\\\*{4})$', zjhm, null) as zjhm_format,
            upper(hphm) as hphm_format,
            a.dmsm1 as hpzl_format,
            regexp_replace(from_unixtime(unix_timestamp(wzsj, 'yyyy-MM-dd HH:mm:ss'), 'yyyy-MM-dd HH:mm:ss'), ' ', 'T')
            as wzsj_format,
            regexp_replace(from_unixtime(unix_timestamp(jkrq, 'yyyy-MM-dd HH:mm:ss'), 'yyyy-MM-dd HH:mm:ss'), ' ', 'T')
            as jkrq_format,
            if(jkbj = '0', '未缴', '已缴') as jkbj_format
            from union_info t
            left join
            (
                select * from ysk.ysk_ga_jtsg_sharedata_v_frm_code
                where xtlb = '00' and dmlb = '1007'
            ) a
            on t.hpzl = a.dmz
            where t.hphm rlike '^[\u4e00-\u9fa5][A-Z][A-Z0-9]{4,5}[A-Z0-9\u4e00-\u9fa5]$'
            and unix_timestamp(wzsj, 'yyyy-MM-dd HH:mm:ss') > unix_timestamp(current_date, 'yyyy-MM-dd') - 24 * 3600
            and unix_timestamp(wzsj, 'yyyy-MM-dd HH:mm:ss') < unix_timestamp()
            """
    )

    var data3: DataFrame = data2

    // 例如 id_format,去掉 id 字段,重命名 id_format 为 id
    data2.columns.foreach(str => {
      if (str.contains("_format")) {
        data3 = data3.drop(str.substring(0, str.length - 7))
        data3 = data3.withColumnRenamed(str, str.substring(0, str.length - 7))
      }
    })

    // 入到es /index/type
    data3.saveToEs("/course/rdd")

    // 入到 kafka  topic 为test
    data3.toJSON.foreach(
      str => {
        kafkaProducerSend(str, "test")
      }
    )


    data3.write.format("org.elasticsearch.spark.sql")
      .option("spark.es.nodes","10.0.13.236")
      .option("spark.es.port","9200")
      .option("es.index.auto.create","true")
      .option("spark.es.nodes.wan.only", "true")
      .option("es.mapping.id", "id")
      .save("jgj_parking_new/nested_type")

    /**
      * hive （hive默认格式，数据文件纯文本无压缩存储）
      * parquet （spark默认采用格式）
      * orc
      * json
      * csv
      * text（若用saveAsTable只能保存只有一个列的df）
      * jdbc
      * libsvm
      */
    //data3.write.mode(SaveMode.Append).format("hive").saveAsTable("ysk.cs")
  }

  def kafkaProducerSend(args: String, topicName: String) {
    if (args != null) {
      val topic = topicName
      val message = new ProducerRecord[String, String](topic, null, args)
      producer.send(message)
    }
  }


  def hpys_normal(hpys: String): String = {

    return mapping02.get(hpys).orNull
  }


  def hpzl_to_hpys(hpzl: String): String = {

    return mapping01.get(hpzl).orNull
  }

  // 打包命令 mvn clean package
  // spark-submit --master yarn --driver-memory 8G --executor-memory 6g --executor-core 4 --num-executors  11 SparkDemo-jar-with-dependencies.jar

}
