package com.o2o.etl_data.jd.brandv1.UpdataTable_new

import com.o2o.etl_data.jd.brandv1.UpdataTable_new.utils.{brand_modify_util, brand_state_util, laozihao_util, newAdd_brand_util}
import org.apache.spark.sql.{SQLContext, SparkSession}

class brand_join_res{
  brand_join_res
}

object brand_join_res {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
//      .master("local[*]")
      .appName("brand_join_res")
      .config("spark.hadoop.fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
      .config("spark.hadoop.fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
//      .config("spark.hadoop.fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
      .getOrCreate()
    val sc = spark.sparkContext
    sc.setLogLevel("ERROR")
    val sqlContext = new SQLContext(sc)

    // ************************************************************************************************************
    //val platformName = "tmall"
    val platformName = "Mryx"
    val timeStamp = "2020年3月"



    //    跑完分类的数据路径【自己的原始数据路径】
    val data_path = a_aa_amainpackage.a_o2odata_deal.config.config.cate_path
    //val data_path = s"s3a://o2o-dataproces-group/han_congcong/2020/3/Mryx_cateresult"
    //    原始数据结果路径【关联完品牌的结果路径，可以自行更改】
    val source_result = a_aa_amainpackage.a_o2odata_deal.config.config.source_result
    //val source_result = s"s3a://o2o-dataproces-group/han_congcong/2020/3/Mryx_brandresult"
    //    老品牌表路径
    val old_brand_path = a_aa_amainpackage.a_o2odata_deal.config.config.old_brand_path
    //val old_brand_path = s"s3a://o2o-dataproces-group/li_yinchao/Table/2020/3/Mryx"
    //    新增品牌路径【该路径不需要修改】
    val brand_newAdd = a_aa_amainpackage.a_o2odata_deal.config.config.brand_newAdd
    //val brand_newAdd = s"s3a://o2o-dataproces-group/li_yinchao/Table/newAddBrand/2020/3/Mryx"
    //  品牌结果路径【该路径不需要修改】
    val result_path = a_aa_amainpackage.a_o2odata_deal.config.config.result_path
    //val result_path = s"s3a://o2o-dataproces-group/li_yinchao/Table/2020/4/Mryx"



    // **************************************************************************************************************************
    //     读取原始数据(跑完分类的数据)
      val source_data = spark.read.json(data_path)

      //    读取老品牌表
      val old_brand_data = spark.read.json(old_brand_path)
      println("--------------提取新增品牌--------------------------")
      val addBrand = new newAdd_brand_util
      val new_brand =  addBrand.new_brand_excat(source_data,old_brand_data,spark,timeStamp)
      println("--------------修改品牌名称-----------------------------")
      val brand_modify = new brand_modify_util
      val brand_stap_2 = brand_modify.brand_modify_1(new_brand,spark,platformName)
      brand_stap_2.cache()
      println("--------------修改品牌来源国-----------------------------")
      val state = new brand_state_util
      val stateResult = state.brand_state(brand_stap_2,spark,platformName)
      println("--------------修改老字号---------------------------")
      val laozihao = new laozihao_util
      val frame_3 = laozihao.brand_add_laozihaoaddress(stateResult, spark, sqlContext)
//    新增品牌文件输出
//    println("修改老字号完毕--缓存")
     frame_3.cache()
//    println("输出新增品牌结果")

     frame_3.repartition(4).write.json(brand_newAdd)

    sqlContext.read.json(brand_newAdd).registerTempTable("frame_3")
//    println("读取新增品牌路径")
      //    将老品牌表注册成临时表
     old_brand_data.registerTempTable("old_brand_data")
//println("合并新老品牌")
   val result =  sqlContext.sql(
      """
        |select brandCcId,brandName,brandName_cn,brandName_en,brandValueId,brand_isLaoZiHao,brand_type,firstCategoryId,platform,timeStamp,brand_state from frame_3
        |union
        |select brandCcId,brandName,brandName_cn,brandName_en,brandValueId,brand_isLaoZiHao,brand_type,firstCategoryId,platform,timeStamp,brand_state from old_brand_data
      """.stripMargin)
//    println("将新增品牌与老品牌表进行合并")
    //      将新增品牌与老品牌表进行合并
    result.repartition(1).write.json(result_path)
//    println("将新增品牌与老品牌表进行合并完成")

      //    品牌关联
    println("关联品牌表入库")
      val join_data = utils.brand_join_util.brand_join(source_data, spark, result_path, sqlContext)
      join_data.repartition(8).write.orc(source_result)




    sc.stop()

  }
}
