package com.dmall.scf.action

import com.dmall.scf.SparkAction
import com.dmall.scf.dto.{ScoreFieldValue, ScoreModelDimension, SupplierSale}
import com.dmall.scf.enums.DayRangeEnum
import com.dmall.scf.utils.express.ExpressUtils
import com.dmall.scf.utils.{CronExpressUtil, DateUtils, MySQLUtils, SparkUtils}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import org.joda.time.DateTime

import scala.collection.JavaConverters._

/**
 * @ClassName SaleShareByDimensionAction
 * @Description 销售占比评分计算 - 导入scfc_score_field_value表
 * @Author wangxuexing
 * @Date 2020/1/4 22:31
 * @Version 1.0
 */
object SaleShareByDimensionAction extends SparkAction[ScoreFieldValue]{
  val CLASS_NAME = this.getClass.getSimpleName().filter(!_.equals('$'))

  /**
   * 处理
   *
   * @param spark
   * @return
   */
  override def action(spark: SparkSession, args: Array[String]): DataFrame = {
    if(args.length < 2){
      throw new Exception("请输入是否自动触发还是手动：true|false");
    }
    import spark.implicits._
    /* SparkUtils.env match {
       case Profile.DEV => {
         val saleData = spark.read.csv("C:\\data\\bak\\query-hive-946297.csv")
         saleData.show()
         val supSaleList = saleData.collect().map{row =>
           SupplierSale(row.getString(0).toInt, row.getString(1), row.getString(2), BigDecimal(row.getString(3)))
         }.toList
         getSaleShare(supSaleList, 1, spark)
       }
       case _ => {*/
        val sql = s"""SELECT
                   sm.company_id,
                   sm.`name` AS model_name,
                   sm.id AS score_model_id,
                   sm.frequency,
                   sm.frequency_cron,
                   smd.id dimension_id,
                   smd.score_rule,
                   smd.statis_time_type,
                   smd.statis_days,
                   smd.statis_begin_day,
                   smd.statis_end_day
                 FROM
                   scfc_score_model sm
                 	JOIN scfc_score_model_dimension smd ON smd.score_model_id = sm.id
                 WHERE
                   sm.`status` = 1
                   AND length(trim(smd.score_rule)) != 0
                   AND sm.company_id = 1
                   AND sm.yn = 1
                   AND smd.yn = 1"""
        val allDimension = MySQLUtils.getDFFromMysql(spark, sql)
        val beanList = SparkUtils.dataFrame2Bean[ScoreModelDimension](allDimension, classOf[ScoreModelDimension])
        val supSql =  s"""
                         SELECT
                            id
                          FROM
                            scfc_supplier_info
                          WHERE
                            company_id = 1
                          AND yn = 1
                           """
        val allSupplierIds = MySQLUtils.getDFFromMysql(spark, supSql)
          .map(_.getLong(0)).collectAsList().asScala.toList
        args(1) match {
          case "true" => {
            beanList.foreach(rule => {
              //匹配是否匹配cron表达式
              val hhmmss = "09:00:00"
              val triggerTime = s"${DateUtils.dateTimeToStr(DateTime.now, DateUtils.YYYY_MM_DD)} ${hhmmss}"
              val finalCron = CronExpressUtil.changeCronExpress(rule.frequencyCron, hhmmss)
              CronExpressUtil.isMatchWithCron(finalCron, triggerTime, DateUtils.DATE_TIME_FORMAT) match {
                case true => forEachDimension(rule, allSupplierIds, spark)
                case false => println(s"未匹配到模型获取周期，模型ID:${rule.scoreModelId}，模型名称：${rule.modelName}，取值cron表达式：${rule.frequencyCron}")
              }
            })
          }
          case _ => beanList.foreach(rule => forEachDimension(rule, allSupplierIds, spark))
        }

     /* }
    }*/
    spark.emptyDataFrame
  }

  /**
   * 遍历每个维度公式是否包含销售占比
   * @param rule
   * @param spark
   */
  def forEachDimension(rule: ScoreModelDimension,
                       allSupplierIds: List[Long],
                       spark: SparkSession) = {
    import spark.implicits._

    val staValMap = DayRangeEnum.getStaValMap
    ExpressUtils.getOutVarIds(rule.scoreRule).contains(42L) match {
      case true => {
        val whereCondition = rule.statisTimeType match {
          case Some(meta) if (meta == 1) =>
            s"""AND a.dt >= '${DateUtils.format(DateUtils.parseDay(rule.statisBeginDay), DateUtils.YYYYMMDD)}'
                        AND a.dt <= '${DateUtils.format(DateUtils.parseDay(rule.statisEndDay), DateUtils.YYYYMMDD)}'"""
          case Some(meta) if (meta == 2) => s"""AND a.dt >= date_format(date_sub(current_date, ${staValMap.get(rule.statisDays.get)}), 'yyyyMMdd')"""
          case None => s""
        }

        val sql =
          s"""SELECT cast(c.company_id as int) supplier_id,
                                    b.ekgrs goods_type,
                                    '' AS syn_date,
                                    sum(cast(a.saleamt as decimal(38,18))) field_value
                             FROM wumart2dmall.wm_dw_site_merch_sale_day a
                             JOIN wumart2dmall.wm_m_article b ON a.matnr = b.matnr
                             JOIN wumart2dmall.wm_ods_cx_supplier_card_info c ON substr(a.lifnr,5) = c.card_code
                             AND c.audit_status = '2'
                             WHERE length(a.lifnr)=10
                               ${whereCondition}
                             GROUP BY c.company_id,
                                      b.ekgrs"""
        println(sql)
        val saleData = spark.sql(sql)
        val supSaleList = SparkUtils.dataFrame2Bean[SupplierSale](saleData, classOf[SupplierSale])
        //计算销售占比
        getSaleShare(supSaleList, rule.dimensionId, allSupplierIds, spark)
      }
      case _ =>
    }
  }

  /**
   * 计算销售占比
   * @param supSaleList
   * @param dimensionId
   * @param spark
   */
  def getSaleShare(supSaleList: List[SupplierSale],
                   dimensionId: Long,
                   allSupplierIds: List[Long],
                   spark: SparkSession)={
    //计算每个分类再整个核心企业的销售总额
    val typeValMap = supSaleList.groupBy(_.goodsType).flatMap(x =>Map(x._1-> x._2.map(_.fieldValue).sum))
    //计算每个供应商下每个分类的销售总额
    val supTypeMap: Map[Int, Map[String, BigDecimal]] = supSaleList.groupBy(_.supplierId).
      flatMap(x=> Map(x._1-> x._2.groupBy(_.goodsType)
        .flatMap(x =>Map(x._1-> x._2.map(_.fieldValue).sum))))

    //计算最大销售占比
    val maxSaleShare = supTypeMap.flatMap(x => Map(x._1-> {
      x._2.flatMap(y => Map(y._1 -> {
        val comScale: Option[BigDecimal] = typeValMap.get(y._1)
        comScale match {
          case Some(z) => {
            if(z.compare(BigDecimal.valueOf(0.00)) == 0){
              BigDecimal.valueOf(0.00)
            } else {
              (y._2/z*100).setScale(2, BigDecimal.RoundingMode.HALF_UP)
            }
          }
          case None => BigDecimal.valueOf(0.00)
        }
      })).values.max
    }))

    val currentDay = DateUtils.getCurrentDate4Format
    //将供应商销售去重并替换为最大销售占比
    val resultList = distinct(supSaleList).map(x=>
      ScoreFieldValue(1L, x.supplierId, dimensionId, currentDay, 42L,
        maxSaleShare.getOrElse(x.supplierId, BigDecimal.valueOf(0.00))))

    val valSupList = resultList.map(_.supplierId).distinct
    //获取未获取值的供应商，并将销售占比置为0
    val noValList = allSupplierIds.filter(x => !valSupList.contains(x.toInt)).map(x => {
      ScoreFieldValue(1L, x.toInt, dimensionId, currentDay, 42L, BigDecimal.valueOf(0.00))
    })

    //有值与默认值的
    val allInsertList = resultList++noValList

    //根据模式字符串生成模式schema
    val fields = List(StructField("company_id", LongType, nullable = false),
      StructField("supplier_id", IntegerType, nullable = false),
      StructField("dimension_id", LongType, nullable = false),
      StructField("grab_date", StringType, nullable = false),
      StructField("field_id", LongType, nullable = false),
      StructField("field_value", DecimalType(20, 2), nullable = false))
    val schema = StructType(fields)
    //将RDD的记录转换为行
    val rowRDD = allInsertList.map(x=>Row(x.companyId, x.supplierId, x.dimensionId, x.grabDate, x.fieldId, x.fieldValue)).asJava
    //RDD转为DataFrame
    val df = spark.createDataFrame(rowRDD, schema)
    MySQLUtils.writeIntoMySql(df, saveTable._1, saveTable._2)
  }

  /**
   * List去重(泛型实现)
   * @param list
   * @return
   */
  def distinct(list: List[SupplierSale]) = list.foldLeft(List.empty[SupplierSale]){
    (seen, cur) =>
      if(seen.map(_.supplierId).contains(cur.supplierId)) (seen) else (seen :+ cur)
  }

  /**
   * 保存mode及表名
   * @return
   */
  override def saveTable: (SaveMode, String) = (SaveMode.Append, "scfc_score_field_value")
}
