package com.atguigu.userprofile.task;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.constant.ConstCode;
import com.atguigu.userprofile.dao.TagInfoDao;
import com.atguigu.userprofile.util.MyPropsUtil;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.stream.Collectors;

/**
 * 标签宽表合并
 *
 * 1. 获取外部传入的参数:  taskId 、 busiDate
 *
 * 2. 明确有哪些标签表? 通过要计算哪些标签来确定
 *
 * 3. 创建标签宽表
 *
 * 4. 组织SQL:  insert (标签宽表) .... select(各个标签表)
 *
 * 5. 准备SparkSql环境， 执行SQL
 *
 */
public class TaskMerge {
    public static void main(String[] args) {
        // 1. 获取外部传入的参数:  taskId 、 busiDate
        String taskId = args[0] ;
        String busiDate = args[1] ;

        // 2. 明确有哪些标签表? 通过要计算哪些标签来确定
        List<TagInfo> tagInfos = TagInfoDao.selectTagInfosWithTaskEnable();

        //3. 创建标签宽表
        /*
           create table if not exists [upDbName].[tableName]
           (
             uid string ,
             [tagColumns]
           )
           row format delimited fields terminated by '\t'
           location '[hdfsPath]/[upDbName]/[tableName]'
         */

        String upDbName = MyPropsUtil.get(ConstCode.USER_PROFILE_DBNAME);
        // tag_merge_20200614
        String tableName = "tag_merge_" + busiDate.replace("-" , "") ;
        //动态处理标签列
        // tagInfo =>  tagInfo.getTagCode()  string/Decimal/bigint/date => string
        String tagColumns = tagInfos.stream().map(
                tagInfo -> tagInfo.getTagCode().toLowerCase() + " string"
        ).collect(Collectors.joining(" , "));

        String hdfsPath = MyPropsUtil.get(ConstCode.HDFS_STORE_PATH);

        String dropTable = " drop table if exists " + upDbName + "." + tableName ;

        String createTable =
                " create table if not exists " + upDbName + "." + tableName  +
                " (" +
                " uid string , " + tagColumns +
                " )" +
                " row format delimited fields terminated by '\\t'" +
                " location '" + hdfsPath + "/" + upDbName +"/" + tableName + "'";

        System.out.println("createTable => " + createTable );


        //4. 组织 pivot SQL:  insert (标签宽表) .... select(各个标签表)
        // select * from
        // (
        // select uid ,  'tg_person_base_gender' as tag_code   , tag_value from  user_profile0620.tg_person_base_gender where dt = '2020-06-14'
        // union all
        // select uid , 'tg_person_base_agegroup' as tag_code  , tag_value from  user_profile0620.tg_person_base_agegroup where dt = '2020-06-14'
        // )
        // pivot ( ... )

        String unionSql = tagInfos.stream().map(
                tagInfo -> "select uid , '" +tagInfo.getTagCode().toLowerCase()+ "' as tag_code  ,  tag_value from " + upDbName + "." + tagInfo.getTagCode().toLowerCase() + " where dt = '" + busiDate + "'"
        ).collect(Collectors.joining(" union all "));

        String pivotIn = tagInfos.stream().map(
                tagInfo -> "'" + tagInfo.getTagCode().toLowerCase() + "'"
        ).collect(Collectors.joining(" , "));

        String pivotSql = " select * from ( " + unionSql + " ) pivot ( max(tag_value) as tag_value  for tag_code in (" + pivotIn+"))" ;

        System.out.println("pivotSql ==> " + pivotSql);

        String insertSql = " insert into " + upDbName + "." + tableName + pivotSql ;

        //5. 准备SparkSql环境， 执行SQL
        SparkConf sparkConf = new SparkConf().setAppName("task_merge_app");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();

        //删表
        sparkSession.sql(dropTable) ;
        //建表
        sparkSession.sql(createTable);
        //insert select
        sparkSession.sql(insertSql);
     }
}
