package com.ctbri.manage.compute.scala.calculate

import com.ctbri.manage.compute.constant.DMConstant
import com.ctbri.manage.compute.scala.operator.HiveLoader
import com.ctbri.manage.compute.util.SqlUtil
import org.apache.poi.util.IOUtils
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.slf4j.{Logger, LoggerFactory}
import org.apache.spark.sql.functions._
import org.apache.hadoop.fs.{FileSystem, Path}


/**
 * @Author wangxuem
 * @create 2023/3/6 18:07
 */
object IdentityChecker {
  protected final val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def uploadToHive(sparkSession: SparkSession,file:String,table:String,provId:String,fields:String):Unit = {
    val df = createDf(sparkSession,file)
    val count = df.count()
    df.cache()
    logger.info("获取数据条数："+count)
    val partitions = Map("prov_id"->provId.toInt)
    HiveLoader.upload(sparkSession,table,partitions,isOverwrite = true,df,fields,2)
  }

  /**
   * 4省文件上传
   * @param sparkSession
   * @param filePath
   * @param table
   */
  def uploadToHivePlus1(sparkSession: SparkSession, filePath: String, table: String): Unit = {
    val hadoopConf = sparkSession.sparkContext.hadoopConfiguration
    val hdfs = FileSystem.get(hadoopConf)
    val path = new Path(filePath)
    if(hdfs.isDirectory(path)){
      val files = hdfs.listFiles(path, false)
      while (files.hasNext) {
        val fileName = files.next().getPath.getName
        if (fileName.endsWith(".csv")) {
          var province = fileName.substring(0, 2)
          if ("内蒙".equals(province)) {
            province = "内蒙古"
          }
          val style = fileName.substring(2,fileName.indexOf(".csv"))
          val fPath = filePath + "/" + fileName
          val df = createDf(sparkSession, fPath)
          val df1 = df.withColumn("province", lit(province)).withColumn("style",lit(style))
          val count = df1.count()
          df1.cache()
          var codeName = "RELATED_SIT_CODE"
          if("站址".equals(style)){
            codeName = "SIT_CODE"
          }
          val fields = codeName+",style,province"
          logger.warn("解析文件：" + fileName + "，获取数据条数：" + count)
          HiveLoader.upload(sparkSession, table, null, isOverwrite = false, df1, fields, 2)
        }

      }

    }
  }

  def uploadToHivePlus(sparkSession: SparkSession, filePath: String, table: String): Unit = {
    val hadoopConf = sparkSession.sparkContext.hadoopConfiguration
    val hdfs = FileSystem.get(hadoopConf)
    val path = new Path(filePath)
    var fields = ""
    if(hdfs.isFile(path)){
      val df = createDf(sparkSession, filePath)
      val count = df.count()
      df.cache()
      logger.warn("获取数据条数：" + count)
      fields = "`主数据站址编码`,`所属站址名称`,`省/自治区/直辖市`"
      HiveLoader.upload(sparkSession, table, null, isOverwrite = false, df, fields, 2)
    }else if(hdfs.isDirectory(path)){
      val list = List("上海")
      val bothList = List("江苏","海南","山东","湖南","天津","云南")
      val files = hdfs.listFiles(path, false)
      while (files.hasNext) {
        val fileName = files.next().getPath.getName
        if (fileName.endsWith(".xlsx")){
          var province = fileName.substring(0, 2)
          if ("内蒙".equals(province)) {
            province = "内蒙古"
          }
          val fPath = filePath + "/" + fileName
          var df = createDf(sparkSession, fPath)
          df = df.withColumn("province", lit(province))

          if (list.contains(province)) {
            fields = "`长编码`,`基站名称`,`province`"
          }else if(bothList.contains(province)){
            df = df.withColumn("code",
              when(df("基站编码").isNull, df("长编码"))
                .when(df("长编码").isNull, df("基站编码"))
                .when(length(df("基站编码")) >= length(df("长编码")), df("基站编码"))
                .otherwise(df("长编码")))
            fields = "`code`,`基站名称`,`province`"
          } else {
            fields = "`基站编码`,`基站名称`,`province`"
          }
          val count = df.count()
          logger.warn("解析文件：" + fileName + "，获取数据条数：" + count)
          df.cache()
          HiveLoader.upload(sparkSession, table, null, isOverwrite = false, df, fields, 2)
        }

      }
    }
  }

  def staticProvince(spark:SparkSession,path:String):Unit = {
    val sql =SqlUtil.getStaticSql
    logger.warn("sql is: "+sql)
    val df = spark.sql(sql)
    logger.warn("生成数据条数:"+df.count())
    df.cache()
    df.coalesce(1).write.option("header", "true").csv(path)
  }


  def exportDataDiff(spark: SparkSession,path:String ): Unit = {

    var sql = SqlUtil.getNotEqualSql(DMConstant.TABLE_MDM_MOBILE_BASE,DMConstant.TABLE_MDM_MOBILE_DATA,832)
    val df1 = spark.sql(sql)
//    sql = SqlUtil.getNotEqualSql(DMConstant.TABLE_MDM_STATION_BASE, DMConstant.TABLE_MDM_STATION_DATA, 832)
//    val df2 = spark.sql(sql)
    sql = SqlUtil.getNotEqualSql(DMConstant.TABLE_MDM_MOBILE_BASE, DMConstant.TABLE_MDM_MOBILE_DATA, 834)
    val df3 = spark.sql(sql)
    sql = SqlUtil.getNotEqualSql(DMConstant.TABLE_MDM_STATION_BASE, DMConstant.TABLE_MDM_STATION_DATA, 834)
    val df4 = spark.sql(sql)
    val df = df1.union(df3).union(df4)
    logger.info("获取数据条数："+df.count())
    df.cache()
    df.coalesce(1).write.option("header", "true").csv(path)
  }

  def createDf(spark: SparkSession, file: String): DataFrame = {
    var df : DataFrame = null
    if (file.endsWith(".csv")) {
      df = spark
        .read
        .option("header", "true")
        .option("multiLine", "true")
        .option("encoding", "utf-8")
        .csv(file)

    } else if (file.endsWith(".xlsx")) {
      IOUtils.setByteArrayMaxOverride(200000000)
      df = spark.read
        .format("com.crealytics.spark.excel")
        .option("header", "true")
        .load(file)
    }
    df
  }

  def createTempTable(spark: SparkSession, file: String, tableName: String): Long = {
    var count = 0L
    if (file.endsWith(".csv")) {
      val df = spark
        .read
        .option("header", "true")
        .option("multiLine", "true")
        .option("encoding", "utf-8")
        .csv(file)
      df.createOrReplaceTempView(tableName)
      logger.info("TableName is "+tableName)
      df show()
      count = df.count()
      df.cache()
    } else if (file.endsWith(".xlsx")) {
      IOUtils.setByteArrayMaxOverride(200000000)
      val df = spark.read
        .format("com.crealytics.spark.excel")
        .option("header", "true")
        .load(file)
      df.createOrReplaceTempView(tableName)
      logger.info("TableName is " + tableName)
      df show()
      count = df.count()
      df.cache()
    }
    count
  }
  

}
