package com.hucais.etl.common.dao

import com.hucais.etl.common.bean.DwdBookBaseInfoWithAuthorPopularity
import com.hucais.etl.job.OdsDoubanScore
import org.apache.spark.sql.{Dataset, SparkSession}

/**
 * 获取Hive数据库的DWD层数据
 */
object DwdHiveDao {

  /**
   * 获取书籍详情信息附带作者知名度(需要过滤掉2折以下的书籍数据)
   * @param sparkSession
   * @param startDate 开始日期
   * @param endDate 结束日期
   * @return
   */
  def getDwdBookBaseInfoWithAuthorPopularity(sparkSession: SparkSession, startDate: String, endDate: String): Dataset[DwdBookBaseInfoWithAuthorPopularity] ={
    import sparkSession.implicits._
    sparkSession.sql(
      s"""
         |select
         |	a.first_channel,a.second_channel,a.isbn,a.book_name,a.first_category,
         |	a.second_category,a.third_category,a.fourth_category,a.author,a.publishing_house,
         |	a.publishing_time,a.selling_price,a.min_selling_price,a.avg_selling_price,a.store_cnt,
         |	a.store_pricing,a.year_sales,a.discount,a.brand,a.douban_score,a.grader_cnt,
         |  b.total_followers,b.published_num,b.university
         |from (
         |	select
         |		first_channel,second_channel,isbn,book_name,first_category,
         |		second_category,third_category,fourth_category,author,publishing_house,
         |		publishing_time,cast(selling_price as float) as selling_price,cast(min_selling_price as float) as min_selling_price,
         |	  cast(avg_selling_price as float) as avg_selling_price,store_cnt,cast(store_pricing as float) as store_pricing,year_sales,
         |    cast(discount as float) as discount,brand,cast(douban_score as Double) as douban_score,grader_cnt
         |	from published.dwd_book_base_info
         |	where create_time is not null and create_time !=''
         |	and discount is not null and discount>2.00
         |	and create_time>='${startDate}' and create_time<'${endDate}'
         |)a left join (
         |	select author,total_followers,published_num,university from published.dwd_author_popularity
         |)b on a.author = b.author
         |""".stripMargin).as[DwdBookBaseInfoWithAuthorPopularity]
  }

  /**
   * 获取豆瓣评分数据
   *
   * @param sparkSession
   * @param startDate 开始日期
   * @param endDate   结束日期
   * @return
   */
  def getDoubanScore(sparkSession: SparkSession, startDate: String, endDate: String): Dataset[OdsDoubanScore] = {
    import sparkSession.implicits._
    sparkSession.sql(
      s"""
         |select isbn,query_book,author,cast(score as float) as score,grader_cnt
         |from published.dwd_douban_score
         |where create_time>='${startDate}' and create_time<'${endDate}'
         |""".stripMargin).as[OdsDoubanScore]
  }

}
