package com.o2o.yumidami

import org.apache.spark.sql.SparkSession


case object yumiQuanwang_seven {

  def main(args: Array[String]): Unit = {

    //spark Postgre连接配置
    val spark = SparkSession.builder()
      .master("local[*]")
      .config("spark.debug.maxToStringFields", "10000")
      .appName("JdbcSparkConnectorIntro")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()

    val jdbcDFym = spark.read.format("jdbc")
      .option("driver", "org.postgresql.Driver")
      .option("url", "jdbc:postgresql://192.168.2.234:8000/postgres") // 接口和库名
            .option("dbtable",
              """
                |"test_duan"."yumi_2019_2020"
                |""".stripMargin) //两张表名
//      .option("dbtable",
//        """
//          |"test_duan"."g_dami_2019_2020"
//          |""".stripMargin) //两张表名
      .option("user", "sysadmin") //登陆的权限和用户名，照着写即可
      .option("password", "Bigdata@123") //登陆密码
      .load()
      .createOrReplaceTempView("yumi")
    //      .show(1)


    val jdbcDFdz = spark.read.format("jdbc")
      .option("driver", "org.postgresql.Driver")
      .option("url", "jdbc:postgresql://192.168.2.234:8000/postgres") // 接口和库名
      .option("dbtable",
        """
          |dbadmin."address_mapping_p_c_d_show_2018"
          |""".stripMargin) //两张表名
      .option("user", "sysadmin") //登陆的权限和用户名，照着写即可
      .option("password", "Bigdata@123") //登陆密码
      .load()
      .createOrReplaceTempView("addr")
    //      .show(1)

    spark.sql(
      """
        |select administrative_region, province from addr where timestamp >= '1580313600'
        |and timestamp <= '1609257600'
        |group by administrative_region , province
        |""".stripMargin).createOrReplaceTempView("thisyearaddr")
    spark.sql(
      """
        |select administrative_region, province from addr where timestamp >= '1548777600'
        |and timestamp <= '1577635200'
        |group by administrative_region , province
        |""".stripMargin).createOrReplaceTempView("lastyearaddr")

    /**
      * 按产地分七大地理区零售额占比及同比、零售量占比及同比
      */

    // 1. 先用 yumi 表找到关联条件 ycd
    spark.sql(
      """
        |select *,
        |(case
        |when base_info like '%湖南%' then '湖南省'
        |when base_info like '%河南%' then '河南省'
        |when base_info like '%云南%' then '云南省'
        |when base_info like '%甘肃%' then '甘肃省'
        |when base_info like '%江苏%' then '江苏省'
        |when base_info like '%黑龙江%' then '黑龙江省'
        |when base_info like '%天津%' then '天津市'
        |when base_info like '%广西%' or base_info like '%壮族%' then '广西壮族自治区'
        |when base_info like '%陕西%' then '陕西省'
        |when base_info like '%宁夏%' or base_info like '%回族%' then '宁夏回族自治区'
        |when base_info like '%湖北%' then '湖北省'
        |when base_info like '%江西%' then '江西省'
        |when base_info like '%重庆%' then '重庆市'
        |when base_info like '%青海%' then '青海省'
        |when base_info like '%山西%' then '山西省'
        |when base_info like '%四川%' then '四川省'
        |when base_info like '%福建%' then '福建省'
        |when base_info like '%河北%' then '河北省'
        |when base_info like '%海南%' then '海南省'
        |when base_info like '%新疆%' or base_info like '%维吾尔%' then '新疆维吾尔自治区'
        |when base_info like '%广东%' then '广东省'
        |when base_info like '%北京%' then '北京市'
        |when base_info like '%辽宁%' then '辽宁省'
        |when base_info like '%安徽%' then '安徽省'
        |when base_info like '%吉林%' then '吉林省'
        |when base_info like '%浙江%' then '浙江省'
        |when base_info like '%内蒙古%' then '内蒙古自治区'
        |when base_info like '%西藏%' then '西藏自治区'
        |when base_info like '%上海%' then '上海市'
        |when base_info like '%山东%' then '山东省'
        |when base_info like '%贵州%' then '贵州省'
        |else '其他'
        |end) ycd
        |from yumi
        |""".stripMargin).createOrReplaceTempView("mid")

    // 2. 根据时间戳拿出2020年的数据
    spark.sql(
      """
        |select
        |t2.administrative_region,sum(t1.sellcount) sellsum,sum(t1.salesamount) salesum
        |from
        |mid t1
        |left join thisyearaddr t2
        |on t1.ycd = t2.province
        |where t2.province is not null
        |and t2.administrative_region in ('华北地区','华中地区','华南地区','东北地区','西北地区','华东地区','西南地区')
        |and t1.timestamp >= '1580313600'
        |and t1.timestamp <= '1609257600'
        |group by t2.administrative_region
        |""".stripMargin)
      //      .show(1)
      .createOrReplaceTempView("thisyear")

    // 3. 求 2020 年的总数据，用来计算占比
    spark.sql(
      """
        |select sum(sellsum) sellsum,sum(salesum) salesum from thisyear
        |""".stripMargin)
      .createOrReplaceTempView("sumyear")

    // 4. 用时间戳关联出去年的数据，求同比
    spark.sql(
      """
        |select
        |t2.administrative_region,sum(t1.sellcount) sellsum,sum(t1.salesamount) salesum
        |from
        |mid t1
        |left join lastyearaddr t2
        |on t1.ycd = t2.province
        |where t2.province is not null
        |and t2.administrative_region in ('华北地区','华中地区','华南地区','东北地区','西北地区','华东地区','西南地区')
        |and t1.timestamp >= '1548777600'
        |and t1.timestamp <= '1577635200'
        |group by t2.administrative_region
        |""".stripMargin)
      .createOrReplaceTempView("lastyear")

    // 5. 总体计算
    spark.sql(
      """
        |select r1.administrative_region ,
        |r1.sellsum/10000 ,
        |r1.salesum/10000 ,
        |r1.sellsum/r2.sellsum ,
        |r1.salesum/r2.salesum ,
        |r1.sellsum/r3.sellsum -1 ,
        |r1.salesum/r3.salesum -1
        |from
        |thisyear r1 CROSS join
        |sumyear r2 on 1 = 1
        |left join
        |lastyear r3 on r1.administrative_region = r3.administrative_region
        |order by r1.salesum desc
        |""".stripMargin)
      //      .show()
      .repartition(1).write.csv("D:/test")
  }
}
