package com.atguigu.userprogfile.app;

import com.atguigu.bean.TagInfo;
import com.atguigu.dao.TagInfoDAO;
import com.atguigu.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskMergeApp {
    public static void main(String[] args) {
        /**
         * 1.首先在查询每个标签表的时候加上常量（tag_code）
         * 2.将多个标签表（启用的标签）union起来
         * 3.使用pivot函数旋转，对tag_code旋转，聚合列则是tag_value，维度列则是uid。
         * 4.编写建表语句
         * 5.插入语句
         * 6.执行
         */

        //创建spark环境
        SparkConf sparkConf = new SparkConf().setAppName("TaskMergeApp");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();

        Properties properties = MyPropertiesUtil.load("config.properties");
        String hdfsPath = properties.getProperty("hdfs-store.path");
        String upDbName = properties.getProperty("user-profile.dbname");


        //获取业务日期
        String busiDate = args[1];

        //查询所有已启用的标签
        List<TagInfo> tagInfoList = TagInfoDAO.getTagInfoWithOn();
//        System.out.println(tagInfoList);


        //首先在查询每个标签表的时候加上常量（tag_code）
        List<String> queryTagSQLList = tagInfoList.stream().map(tagInfo -> "select uid,cast(tag_value as String),'"+tagInfo.getTagCode().toLowerCase()+"' as tag_code from " + tagInfo.getTagCode().toLowerCase() + " where dt='" + busiDate + "'").collect(Collectors.toList());

//        System.out.println(queryTagSQLList);

        // 将多个标签表（启用的标签）union起来
        String queryUnionSQL = StringUtils.join(queryTagSQLList, " union ");

//        System.out.println(queryUnionSQL);

        //使用pivot函数旋转，对tag_code旋转，聚合列则是tag_value，维度列则是uid。
        /**
         * select * from tablename pivot ( sum(聚合列) as 列标识  for 旋转列 in( 旋转列值1 ,旋转列值2,旋转列值3) )
         */

        List<String> tagCodeList = tagInfoList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase()).collect(Collectors.toList());

        String tagCodeSQL = StringUtils.join(tagCodeList, "','");


        String querySQL = "select * from (" + queryUnionSQL + ") pivot ( max(tag_value) as tag_value  for tag_code in ('"+tagCodeSQL+"') )";

//        System.out.println(querySQL);

        //编写建表语句
        /**
         *  create table if not exists up_tag_merge_20200614
         *   (uid String,
         *     tg_person_base_gender string,
         *     tg_person_base_agegroup string)
         *   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
         *   location 'hdfs://hadoop102:8020/user_profile/user_profile/up_tag_merge_20200614'
         */

        String tableName = "up_tag_merge_" + busiDate.replace("-", "");
//        System.out.println(tableName);

        List<String> fieldsSQLList = tagInfoList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " string").collect(Collectors.toList());

        String fieldSQL = StringUtils.join(fieldsSQLList, ",");

        String dropTableSQL = "drop table if exists " + tableName;

        String createTableSQL=" create table if not exists "+tableName+"\n" +
                "           (uid String,\n" +
                "              "+fieldSQL+")\n" +
                "            ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'\n" +
                "            location '"+hdfsPath+"/"+upDbName+"/"+tableName+"'";

        System.out.println(createTableSQL);

        //插入语句
        String insertSQL = "insert overwrite table " + tableName + " " + querySQL;


        System.out.println(insertSQL);

        //指定使用哪个库
        sparkSession.sql("use " + upDbName);

        //执行删除语句
        sparkSession.sql(dropTableSQL);

        //执行建表语句
        sparkSession.sql(createTableSQL);

        //执行插入语句
        sparkSession.sql(insertSQL);


    }
}
