package com.hucais.agg.service

import com.hucais.agg.bean.DwtSalesTrend
import com.hucais.agg.dao.SourceIndexDataDao
import com.hucais.core.utils.DefaultPropertiesUtil
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark.sql.EsSparkSQL

object SalesTrendService {
  def action(ssc: SparkContext, sparkSession: SparkSession): Unit = {
    import sparkSession.implicits._
    // 获取book_base_info数据
    val bookBaseInfoDS = SourceIndexDataDao.getSearchInfoData(sparkSession)
      .mapPartitions(partitions => {
        partitions.filter(item => "正向选品".equals(item.second_channel))
      })
    bookBaseInfoDS.cache()
    bookBaseInfoDS.createOrReplaceTempView("dwd_book_base_info")
    // 获取开卷数据
    val openDataDS = SourceIndexDataDao.getOpenData(sparkSession)
    openDataDS.cache()
    openDataDS.createOrReplaceTempView("ods_openbooks")

    // 输出统计数据
    val resultDS = sparkSession.sql(
      s"""
         |select
         |		channel,coalesce (product_type,'全部') as product_type,
         |		coalesce (year,'全部') as year,month,sum(sales) as value,date_format(date_add(current_date,-1),'yyyy-MM-dd') as create_time
         |from
         |(
         |	select '开卷数据' as channel,category as product_type,SUBSTR(sale_time,1,4) as year,SUBSTR(sale_time,6,2) as month,month_sales as sales
         |	from ods_openbooks
         |	union all
         |	select '正向选品' as channel,category as product_type,SUBSTR(copyright_month ,1,4) as year,SUBSTR(copyright_month,6,2) as month,sales
         |	from dwd_book_base_info where second_channel ='正向选品' and category is not null and sales is not null
         |)a
         |GROUP BY channel,product_type,year,month
         |GROUPING SETS ((channel,month),(channel,product_type,month),(channel,year,month),(channel,product_type,year,month))
         |""".stripMargin).as[DwtSalesTrend]
    EsSparkSQL.saveToEs(resultDS, DefaultPropertiesUtil.get("index.sales.trend"))

  }

}
