package com.atguigu.userprofile.app;

import com.atguigu.userprofile.common.bean.TagInfo;
import com.atguigu.userprofile.common.dao.TagInfoDAO;
import com.atguigu.userprofile.common.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskMergeApp {


    //1  获得当前启用标签的列表     通过查询mysql   tag_info 和task_info 的status=1 来得到启用标签的列表
    //
    //2  要写入的目标宽表是否要建立？
    //     手动建立 自动建立
    //      建立一个表 每天新分区  ？
    //       每天建表？  每天自动建立一个表  表的字段依据当前有多少个启用状态的标签
    //
    //3  把多个标签单表落成一个标签高表
    //
    //4 利用pivot的sql进行旋转  高表变宽表
    //
    //5  insert select pivot
    public static void main(String[] args) {
        String taskId=args[0];
        String busiDate=args[1];
     //   System.setProperty("hadoop.home.dir", "d:\\hadoop");//打包时注释掉

        SparkConf sparkConf = new SparkConf().setAppName("task_merge_app");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();


        //1  获得当前启用标签的列表     通过查询mysql   tag_info 和task_info 的status=1 来得到启用标签的列表
        //
        List<TagInfo> tagInfoList = TagInfoDAO.getTagInfoListWithOn();


        //2  create table  $merge_table_name_20220814
        //  (uid string , $field_name_sql )
        //  partitioned by  不分区
        // comment '标签宽表'
        //   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'
        // location    hdfs根目录/库/表

        String tableName="up_tag_merge_"+busiDate.replace("-","");

        List<String> fieldList = tagInfoList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " string").collect(Collectors.toList());
        String fieldSQL = StringUtils.join(fieldList, ",");

        Properties properties = MyPropertiesUtil.load("config.properties");
        String hdfsPath = properties.getProperty("hdfs-store.path");
        String dwDbName = properties.getProperty("data-warehouse.dbname");
        String upDbName = properties.getProperty("user-profile.dbname");

        String dropTableSQL="drop table if exists " +upDbName+"."+tableName;

        String createTableSQL="create table  " +upDbName+"."+tableName+
                "     (uid string , "+fieldSQL+")\n" +
                "         comment '标签宽表'\n" +
                "      ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'\n" +
                "       location   '"+hdfsPath+"/"+upDbName+"/"+tableName+"'";

        System.out.println(dropTableSQL);
        sparkSession.sql(dropTableSQL);

        System.out.println(createTableSQL);
        sparkSession.sql(createTableSQL);

        //3  把多个标签单表落成一个标签高表
         //  select uid,tag_value from tg_xxxxx1 where dt=''
        //  union all
        //   select uid ,tag_value from tg_xxxx2 where dt=''
        // union all
        // ...

        List<String> tagQueryList = tagInfoList.stream().map(tagInfo -> "select uid,'"+tagInfo.getTagCode().toLowerCase()+"' as tag_code,tag_value from " + tagInfo.getTagCode().toLowerCase() + " where dt='" + busiDate + "'").collect(Collectors.toList());
        String tagUnionSQL = StringUtils.join(tagQueryList, " union all ");

        System.out.println(tagUnionSQL);


        //4 利用pivot的sql进行旋转  高表变宽表
        //select * from $unionSQL
        //   pivot (max(tag_value) tv for tag_code in ('gender','age','amount'-->$tagCodeSQL)   )

        List<String> tagCodeList = tagInfoList.stream().map(tagInfo -> "'" + tagInfo.getTagCode().toLowerCase() + "'").collect(Collectors.toList());
        String tagCodeSQL = StringUtils.join(tagCodeList, ",");
        String pivotSQL=" select * from   ("+tagUnionSQL +") tt"+
                "         pivot (max(tag_value) tv for tag_code in ( "+ tagCodeSQL+")   )\n";
        System.out.println(pivotSQL);
        //5  insert select pivot
        String insertSQL="insert overwrite table "+upDbName+"."+tableName +" "+pivotSQL;
        System.out.println(insertSQL);
        sparkSession.sql("use "+upDbName);
        sparkSession.sql(insertSQL);
    }
}
