package com.atguigu.userprofile.app;


import com.atguigu.userprofile.common.bean.TagInfo;
import com.atguigu.userprofile.common.dao.TagInfoDAO;
import com.atguigu.userprofile.common.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskMergeApp {

    //1 从 mysql中 读取  tag_info  task_info  关联
    //2  标签宽表的建立
    //        问题 1     每天一张表  ？    一共一张表 ？   表名：  xxxxxxxxxxx_20200614   xxxxxxxxxxxxxx_20200615
    //        问题2     自动创建
    //
    //3 读取多个标签单表
    //   1 多个标签表 转为 一个标签高表    union all
    //   2  高表转宽表   pivot     旋转列的值需要动态拼接
    //        任务启用状态的3及标签编码   tag_info  task_info  关联
    //
    //
    //4  写入标签宽表
    public static void main(String[] args) {
        String taskId=args[0];
        String busiDate=args[1];

        // spark 环境
        SparkConf sparkConf = new SparkConf().setAppName("task_merge_app"); //.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();



        //1 从 mysql中 读取  tag_info  task_info  关联  List<TagInfo>
        List<TagInfo> tagInfoList = TagInfoDAO.getTagInfoListWithOn();

        System.out.println(tagInfoList);

        //2  标签宽表的建立
        //        问题 1     每天一张表  ？    一共一张表 ？   表名：  xxxxxxxxxxx_20200614   xxxxxxxxxxxxxx_20200615
        //        问题2     自动创建
        // create table  if not exists $tableName
        // (uid string ,$field_name)
        // partitioned  无
        // ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
        //  location   hdfs://xxx:8020/xxx/库名/表名

        String tableName = "user_tag_merge_"+busiDate.replace("-","");

        List<String> filedList = tagInfoList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " string ").collect(Collectors.toList());
        String fieldNames = StringUtils.join(filedList, ",");

        Properties properties = MyPropertiesUtil.load("config.properties");
        String hdfsPath = properties.getProperty("hdfs-store.path");
        String dwName = properties.getProperty("data-warehouse.dbname");
        String upName = properties.getProperty("user-profile.dbname");

        String createTableSQL="     create table     "+upName+"."+tableName+"\n" +
                "        (uid string ,"+fieldNames+")\n" +
                "        ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'\n" +
                "        location   '"+hdfsPath+"/"+upName+"/"+tableName+"'";

        String dropTableSql="drop table if   exists   "+upName+"."+tableName;
        System.out.println(dropTableSql);
        System.out.println(createTableSQL);

        sparkSession.sql(dropTableSql);

        sparkSession.sql(createTableSQL);

        //3 读取多个标签单表
        //   1 多个标签表 转为 一个标签高表    union all
        //  select  uid , 'tg_person_base_gender' as  tag_code,tag_value  from tg_person_base_gender where dt=$busiDate
        //   union all
        //  select  uid , 'tg_person_base_agegroup' as  tag_code,tag_value  from tg_person_base_agegroup where dt=$busiDate
        //  union all
        //  ....

        List<String> tagSQLList = tagInfoList.stream().map(tagInfo -> " select  uid , '" + tagInfo.getTagCode().toLowerCase() + "' as  tag_code,tag_value  from  "+upName+"." + tagInfo.getTagCode().toLowerCase() + " where dt='" + busiDate + "'").collect(Collectors.toList());
        String unionAllSQL = StringUtils.join(tagSQLList, " union all");
        System.out.println(unionAllSQL);

        //   2  高表转宽表   pivot     旋转列的值需要动态拼接
        //        任务启用状态的3及标签编码   tag_info  task_info  关联
        //      维度列 ：uid  聚合列： max(tag_value)  旋转列: tag_code
        //    select * from (排除3种列以外字段的子查询) pivot (  聚合函数（维度列 ）  for  旋转列 in (旋转值) )
        //  select * from  ($unionAllSQl) pivot (  max(tag_value)   for  tag_code  in  ( $tagCodes ))

        List<String> tagCodeList = tagInfoList.stream().map(tagInfo -> "'" + tagInfo.getTagCode() + "'").collect(Collectors.toList());
        String tagCodes = StringUtils.join(tagCodeList, ",");

        String pivotSQL=" select * from  ("+unionAllSQL+") pivot (  max(tag_value)   for  tag_code  in  ( "+tagCodes.toLowerCase()+" ))";

       // System.out.println(pivotSQL);

        //4  写入标签宽表
        String insertSQL="insert overwrite table "+upName+"."+tableName + pivotSQL;
        System.out.println(insertSQL);
        sparkSession.sql(insertSQL);
    }
}
