package a_aa_amainpackage.Up_new

import org.apache.spark.sql.{DataFrame, SQLContext, SparkSession}


object brand_join_res {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
//            .master("local[*]")
      .appName("brand_join_res")
      .config("spark.hadoop.fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
      .config("spark.hadoop.fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
//      .config("spark.hadoop.fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
      .getOrCreate()
    val sc = spark.sparkContext
    val sqlContext = new SQLContext(sc)

// ************************************************************************************************************
      //    平台名称[part-00003-0abefecd-3a1c-4613-94b2-94b8b7f00f01-c000.json]
      val platformName = "tmall"
      val timeStamp = "2019年11月"
      val month = 11
      //    跑完分类的数据路径【自己的原始数据路径】
//      val data_path = "D:\\员工需求\\1.json"
      //val data_path = s"s3a://o2o-dataproces-group/panzonghao/cate_data_20191104"
      val data_path = s"s3a://o2o-dataproces-group/panzonghao/cate_data_20191102/"

    //     原始数据【结果路径】【关联完品牌的结果路径，可以自行更改】
      val source_result = s"s3a://o2o-dataproces-group/panzonghao/brand_test/result"


    //    老品牌表路径
     val old_brand_path = s"s3a://o2o-dataproces-group/li_yinchao/Table/2019/11/tmall/"
    //  品牌结果路径【该路径不需要修改】
    //val result_path =  s"s3a://o2o-dataproces-group/li_yinchao/Table/${year}/${month + 1}/${platform}"
    //双十一
    val result_path = s"s3a://o2o-dataproces-group/panzonghao/brand_test/brand_result"
    //    新增品牌路径【该路径不需要修改】
      //val brand_newAdd =  s"s3a://o2o-dataproces-group/li_yinchao/Table/newAddBrand/${year}/${month}/${platform}"
    //双十一
      val brand_newAdd = s"s3a://o2o-dataproces-group/panzonghao/brand_test/new_brand"

// ************************************************************************************************************
//     读取原始数据(跑完分类的数据)
     val source_data = spark.read.json(data_path)
     val brand = brand_handle(source_data,spark,old_brand_path,result_path,brand_newAdd,platformName,source_result,sqlContext,timeStamp)

    sc.stop()
  }

  // 处理品牌 合并老表做当月品牌表
  private def brand_handle(source_data:DataFrame,spark:SparkSession,old_brand_path:String,result_path:String,brand_newAdd:String,platformName:String,
  source_result:String,sqlContext:SQLContext,timeStamp:String)={

//    读取老品牌表
    val old_brand_data = spark.read.json(old_brand_path)
    println("--------------新增品牌提取--------------------------")
    val new_brand = utils.newAdd_brand_util.new_brand_excat(source_data,old_brand_data,spark,timeStamp)
    println("--------------修改品牌-----------------------------")
    val brand_stap_2 = utils.brand_modify_util.brand_modify_1(new_brand,spark,platformName)
    println("--------------老字号处理---------------------------")
    val frame_3 = utils.laozihao_util.brand_add_laozihaoaddress(brand_stap_2,old_brand_data,spark,brand_newAdd,sqlContext)


//    读取老品牌表
    val old_brand_1 = spark.sparkContext.textFile(old_brand_path)
//    将处理完毕的新增品牌与老表进行合并
    val all_brand = frame_3.toJSON.rdd.union(old_brand_1)
//    这个是将新表和老表进行合并的结果路径
    all_brand.repartition(4).saveAsTextFile(result_path)
    spark.read.json(all_brand)

//    品牌关联
    val join_data = utils.brand_join_util.brand_join(source_data,spark,result_path,sqlContext)

    //join_data.write.orc(source_result)
    join_data.write.json(source_result)



  }
}
