package com.dmall.scf.action.kanban

import com.dmall.scf.Profile
import com.dmall.scf.dto.{KanbanFieldValue, SupplierSale}
import com.dmall.scf.utils.{DateUtils, SparkUtils}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * @descrption
 * scf-spark 根据销售占比最大分类计算供应商数量
 * @author wangxuexing
 * @date 2020/1/10
 */
object SupplierCounterByTypeAction extends KanbanAction{
  val CLASS_NAME = this.getClass.getSimpleName().filter(!_.equals('$'))

  /**
   * 处理
   * @param spark
   * @return
   */
  override def action(spark: SparkSession, args: Array[String]): DataFrame = {
    import spark.implicits._

    if(args.length < 2){
      throw new Exception("请指定是当前年(值为1)还是去年(值为2)：1|2")
    }

    val lastDay = DateUtils.addSomeDays(-1)
    val (starDate, endDate, filedId) = args(1) match {
      case "1" => (DateUtils.formatNormal2ShortDateStr(DateUtils.getFirstDateOfCurrentYear),
                   DateUtils.formatNormal2ShortDateStr(lastDay), 40)
      case "2" => (DateUtils.getLastYearFirstStr(DateUtils.YYYYMMDD),
                   DateUtils.getLastYearLastStr(DateUtils.YYYYMMDD), 41)
      case _ =>  throw new Exception("请传入正确的参数：是当前年(值为1)还是去年(值为2)：1|2")
    }

    val supSaleList = SparkUtils.env match {
      case Profile.DEV => {
        val saleDate = spark.read.csv("C:\\data\\bak\\supplier_sale.csv")
        saleDate.collect().map { row =>
          SupplierSale(row.getString(0).toInt, row.getString(1), row.getString(2), BigDecimal(row.getString(3)))
        }.toList
      }
      case _ => {
        val saleData = spark.sql(
          s"""SELECT cast(c.company_id as int) supplier_id,
                                               b.ekgnam goods_type,
                                               '1' as syn_date,
                                               sum(cast(a.saleamt as decimal(38,18))) field_value
                                        FROM wumart2dmall.wm_dw_site_merch_sale_day a
                                        JOIN wumart2dmall.wm_m_article b ON a.matnr = b.matnr
                                        JOIN wumart2dmall.wm_ods_cx_supplier_card_info c ON substr(a.lifnr,5) = c.card_code
                                        AND c.audit_status = '2'
                                        WHERE a.dt >='${starDate}'
                                          AND a.dt <='${endDate}'
                                          AND length(a.lifnr)=10
                                        GROUP BY c.company_id,
                                                 b.ekgnam""")
        val supplierIds = spark.sql(
          s"""
                   SELECT DISTINCT(a.company_id) supplier_ids
                    FROM wumart2dmall.wm_ods_cx_supplier_card_info a
                    JOIN wumart2dmall.wm_ods_jrbl_loan_dkzhxx b ON a.card_code = b.gshkahao
                    WHERE a.audit_status = '2'
                      AND b.jiluztai = '0'""").collect().map(_.getLong(0))
        SparkUtils.dataFrame2Bean[SupplierSale](saleData, classOf[SupplierSale])
          .filter(x => supplierIds.contains(x.supplierId.toLong))
      }
    }

    //计算每个分类再整个核心企业的销售总额
    val typeValMap = supSaleList.groupBy(_.goodsType).flatMap(x =>Map(x._1-> x._2.map(_.fieldValue).sum))
    //计算每个供应商下每个分类的销售总额
    val supTypeMap: Map[Int, Map[String, BigDecimal]] = supSaleList.groupBy(_.supplierId).
      flatMap(x=> Map(x._1-> x._2.groupBy(_.goodsType)
        .flatMap(x =>Map(x._1-> x._2.map(_.fieldValue).sum))))

    //计算最大销售占比
    val supMaxValMap = supTypeMap.flatMap(x => Map(x._1-> {
      var maxValue = BigDecimal(0)
      var maxType = ""
      x._2.flatMap(y => Map(y._1 -> {
        val comScale: Option[BigDecimal] = typeValMap.get(y._1)
        comScale match {
          case Some(z) => {
            if(z.compare(BigDecimal.valueOf(0.00)) == 0){
              BigDecimal.valueOf(0.00)
            } else {
              (y._2/z*100).setScale(2, BigDecimal.RoundingMode.HALF_UP)
            }
          }
          case None => BigDecimal.valueOf(0.00)
        }
      })).foreach(z => {
        if(z._2.compare(maxValue) >= 0){
          maxType = z._1
        }
      })
      maxType
    }))

    val resultList = supMaxValMap.values.toList.distinct
      .flatMap(x => Map(x -> supMaxValMap.count(y => y._2 == x))).map(x => {
      KanbanFieldValue(1, lastDay, filedId, x._1, x._2.toString, "")
    })

    getDataFrame(resultList, spark)
  }
}
