package com.dmall.scf.action

import java.util.Date

import com.dmall.scf.dto.{SupplierRunFieldValue, SupplierSale}
import com.dmall.scf.utils.{DateUtils, SparkUtils}
import com.dmall.scf.{Profile, SparkAction}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}

import scala.collection.JavaConverters._

/**
 * @descrption 销售占比计算-run表
 * scf
 * @author wangxuexing
 * @date 2019/12/22
 */
object SaleShareAction extends SparkAction[SupplierRunFieldValue]{
  val CLASS_NAME = this.getClass.getSimpleName().filter(!_.equals('$'))

  /**
   * 处理
   * @param spark
   * @return
   */
  override def action(spark: SparkSession, args: Array[String]): DataFrame = {

    val dataSetTemp = SparkUtils.env match {
      case Profile.DEV => {
        val saleDate = spark.read.csv("C:\\data\\bak\\query-hive-946297.csv")
        saleDate.collect().map{row =>
          SupplierSale(row.getString(0).toInt, row.getString(1), row.getString(2), BigDecimal(row.getString(3)))
        }.toList
      }
      case _ => {
        if(args.length < 4){//1 类名: SaleShareAction; 2 isLastDay: true|false; 3 startDay: yyyyMMdd; 4,endDay: yyyyMMdd
          throw new Exception("请输入统计开始时间及结束时间！");
        }
        val lastDay = DateUtils.format( DateUtils.afterManyDay(new Date(), -1), DateUtils.DATE_SHORT_PATTERN)
        val (startDay, endDay) = args(1).toBoolean match {
          case true => (lastDay, lastDay)
          case _ => (args(2), args(3))
        }
        val saleDate = spark.sql(s"""SELECT c.company_id supplier_id,
                                               b.ekgrs goods_type,
                                               a.dt syn_date,
                                               sum(a.saleamt) field_value
                                        FROM wumart2dmall.wm_dw_site_merch_sale_day a
                                        JOIN wumart2dmall.wm_m_article b ON a.matnr = b.matnr
                                        JOIN wumart2dmall.wm_ods_cx_supplier_card_info c ON substr(a.lifnr,5) = c.card_code
                                        AND c.audit_status = '2'
                                        WHERE a.dt >='${startDay}' AND a.dt <='${endDay}'
                                          AND length(a.lifnr)=10
                                        GROUP BY c.company_id,
                                                 b.ekgrs,
                                                 a.dt""")
        saleDate.collect().map{row =>
          SupplierSale(row.getLong(0).toInt, row.getString(1), row.getString(2), row.getDecimal(3))
        }.toList
      }
    }

    val dataSet = dataSetTemp.toList

    //计算每个分类再整个核心企业的销售总额
    val typeValMap = dataSet.groupBy(_.goodsType).flatMap(x =>Map(x._1-> x._2.map(_.fieldValue).sum))
    //计算每个供应商下每个分类的销售总额
    val supTypeMap: Map[Int, Map[String, BigDecimal]] = dataSet.groupBy(_.supplierId).
      flatMap(x=> Map(x._1-> x._2.groupBy(_.goodsType)
        .flatMap(x =>Map(x._1-> x._2.map(_.fieldValue).sum))))

    //计算最大销售占比
    val maxSaleShare = supTypeMap.flatMap(x => Map(x._1-> {
      x._2.flatMap(y => Map(y._1 -> {
        val comScale: Option[BigDecimal] = typeValMap.get(y._1)
        comScale match {
          case Some(z) => {
            if(z.compare(BigDecimal.valueOf(0.00)) == 0){
              BigDecimal.valueOf(0.00)
            } else {
              (y._2/z).setScale(2, BigDecimal.RoundingMode.HALF_UP)
            }
          }
          case None => BigDecimal.valueOf(0.00)
        }
      })).values.max
    }))

    //将供应商销售去重并替换为最大销售占比
    val resultList = distinct(dataSet).map(x=>
      SupplierRunFieldValue(1L, x.supplierId, x.synDate, 42L,
        maxSaleShare.getOrElse(x.supplierId, BigDecimal.valueOf(0.00))))

    //根据模式字符串生成模式schema
    val fields = List(StructField("company_id", LongType, nullable = false),
      StructField("supplier_id", IntegerType, nullable = false),
      StructField("syn_date", StringType, nullable = false),
      StructField("field_id", LongType, nullable = false),
      StructField("field_value", DecimalType(20, 2), nullable = false))
    val schema = StructType(fields)
    //将RDD的记录转换为行
    val rowRDD = resultList.map(x=>Row(x.companyId, x.supplierId, x.synDate, x.fieldId, x.fieldValue)).asJava
    //RDD转为DataFrame
    spark.createDataFrame(rowRDD, schema)
  }

  /**
   * List去重(泛型实现)
   * @param list
   * @return
   */
  def distinct(list: List[SupplierSale]) = list.foldLeft(List.empty[SupplierSale]){
    (seen, cur) =>
      if(seen.map(_.supplierId).contains(cur.supplierId)) (seen) else (seen :+ cur)
  }

  /**
   * 保存mode及表名
   * @return
   */
  override def saveTable: (SaveMode, String) = (SaveMode.Append, "scfc_supplier_run_field_value")
}
