package com.atguigu.userprofile.task;

import com.atguigu.userprofile.beans.TagInfo;
import com.atguigu.userprofile.constant.ConstCode;
import com.atguigu.userprofile.dao.TagInfoDao;
import com.atguigu.userprofile.utils.MyPropsUtil;
import com.atguigu.userprofile.utils.MySQLUtil;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.stream.Collectors;

/**
 * @Classname TaskSql
 * @Date 2022/10/5 10:53
 * @Created by arun
 * <p>
 * 标签表合并为标签宽表
 * 1、获取外部传入的参数
 * 2、明确有哪些标签表
 * 3、创建标签宽表
 * 4、组织sql：union all ===> select...pivot ===> insert
 * 5、执行sql
 */
public class TaskMerge {

    public static void main(String[] args) {
        // 1、获取外部传入的参数
        String taskId = args[0];
        String businessDate = args[1];

        // 2、明确有哪些标签表
        List<TagInfo> tagInfos = TagInfoDao.selectTagInfosWithEnable();

        // 3、创建标签宽表
        /*
            create table [upDbName].[tableName] (
                uid string,
                [tagColumns]
            )
            row format delimited fields terminated by '\t'
            location '[hdfsPath]/[upDbName]/[tableName]'
         */

        String hdfsPath = MyPropsUtil.get(ConstCode.HDFS_STORE_PATH);
        String upDbName = MyPropsUtil.get(ConstCode.USER_PROFILE_DBNAME);
        String tableName = "up_merge_" + businessDate.replace("-", "");
        String tagColumns = tagInfos.stream().map(
                // 一般需要这样操作，但是为了后面数据迁移的方便，这里统一把字段类型改为string
                // tagInfo -> tagInfo.getTagCode().toLowerCase() + " " + tagInfo.getTagValueType() + ","
                tagInfo -> tagInfo.getTagCode().toLowerCase() + " string"
        ).collect(Collectors.joining(", "));

        String createTable = " create table " + upDbName + "." + tableName +
                "(" +
                "uid string," + tagColumns +
                ")" +
                "row format delimited fields terminated by '\\t'" +
                " location '" + hdfsPath + "/" + upDbName + "/" + tableName + "'";

        //System.out.println(createTable);

        // 删表语句
        String dropTable = "drop table if exists " + upDbName + "." + tableName;

        // 4、组织sql：union all ===> select...pivot ===> insert
        String unionSql = tagInfos.stream().map(
                tagInfo -> "select uid ,'" + tagInfo.getTagCode().toLowerCase() + "'as tag_code , tag_value from " + upDbName + "." + tagInfo.getTagCode().toLowerCase() + " where dt = '" + businessDate + "'"
        ).collect(Collectors.joining(" union all "));

        //System.out.println("unionSql ===> " + unionSql);

        String pivotValues = tagInfos.stream().map(
                tagInfo -> "'" + tagInfo.getTagCode().toLowerCase() + "'"
        ).collect(Collectors.joining(","));

        String selectPivot = " select * from (" + unionSql + ") pivot (max(tag_value) as tag_value for tag_code in (" + pivotValues + "))";

        String insertSql = "insert overwrite " + upDbName + "." + tableName + selectPivot;

        SparkConf conf = new SparkConf().setAppName("task_merge_app");
        SparkSession spark = SparkSession.builder().enableHiveSupport().config(conf).getOrCreate();

        spark.sql(dropTable);
        spark.sql(createTable);
        spark.sql(insertSql);

    }

}
