package com.atguigu.userprofile.app;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.dao.TagInfoDAO;
import com.atguigu.userprofile.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskMergeApp {
    public static void main(String[] args) {
        SparkConf sparkConf = new SparkConf().setAppName("TaskMergeApp");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();


        //提取配置文件中的内容
        Properties properties = MyPropertiesUtil.load("config.properties");
        //用户画像库名
        String upDatabaseName = properties.getProperty("user-profile.dbname");
        //存储路径
        String hdfsPath = properties.getProperty("hdfs-store.path");
        //电商数仓库名
        String dwDBName = properties.getProperty("data-warehouse.dbname");

        //获取外部传参的业务日期
        String busiDate = args[1];

        //查询Mysql中相关信息
        //获取所有已启用状态的标签信息
        List<TagInfo> tagInfoListWithOn = TagInfoDAO.getTagInfoListWithOn();

        //将每个标签的字段设置为字符串类型这样是为了方便平移到Clickhouse中，因为Clickhouse中的数据类型只有String和Hive中的类型一致，并且我们定义标签时的 整型，浮点型，字符型，日期型都可以很好的转为String类型
        List<String> tagCodeListSQL = tagInfoListWithOn.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " string").collect(Collectors.toList());

        //集合中的每个元素大致为这样 tg_person_base_gender string
        // 将集合中的每个元素按照,拼接
        String tagCodeSQL = StringUtils.join(tagCodeListSQL, ",");


        //创建标签表合并后的宽表
        /**
         * create table if not exists up_tag_merge_20200614
         *   (uid String,
         *     tg_person_base_gender string,
         *     tg_person_base_agegroup string)
         *   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
         *   location 'hdfs://hadoop102:8020/user_profile/user_profile/up_tag_merge_20200614'
         */

        String tableName = "up_tag_merge_" + busiDate.replace("-", "");

        System.out.println(tableName);

        //主要是为了保证重复跑的时候的幂等性
        String droptableSQL = "drop table if exists " +upDatabaseName+ "."+tableName;
        sparkSession.sql(droptableSQL);


        String createSQL="create table if not exists "+upDatabaseName+"."+tableName+"\n" +
                "           (uid String,\n" +
                "             "+tagCodeSQL+")\n" +
                "            ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'\n" +
                "            location '"+hdfsPath+"/"+upDatabaseName+"/"+tableName+"'";
        System.out.println(createSQL);

        sparkSession.sql(createSQL);

        //编写查询语句
        //动态拼接union all语句
        /**
         * select
         * uid,tag_value,'tg_person_base_gender' as tag_code from user_profile.tg_person_base_gender where dt='2020-06-14'
         * union all
         * select uid,tag_value,'tg_person_base_agegroup' as tag_code from user_profile.tg_person_base_agegroup where dt='2020-06-14'
         */
        List<String> unionSQLList = tagInfoListWithOn.stream().map(tagInfo -> "select uid,tag_value,'" + tagInfo.getTagCode().toLowerCase() + "' as tag_code from " + upDatabaseName + "." + tagInfo.getTagCode().toLowerCase() + " where dt='" + busiDate + "'").collect(Collectors.toList());

        String unionSQL = StringUtils.join(unionSQLList, " union all ");
        System.out.println(unionSQL);

        //动态拼接查询pivot语句
        /**
         * select * from (
         *     select uid,tag_value,'tg_person_base_gender' as tag_code from user_profile.tg_person_base_gender where dt='2020-06-14'
         *     union all
         *     select uid,tag_value,'tg_person_base_agegroup' as tag_code from user_profile.tg_person_base_agegroup where dt='2020-06-14') tg
         *   pivot (max(tag_value) as tag_value for tag_code in('tg_person_base_gender','tg_person_base_agegroup'))
         */

        //获取所有已启用的标签编码
        List<String> tagCodeWithOnList = tagInfoListWithOn.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase()).collect(Collectors.toList());

        String tagCodeWithOnSQL = StringUtils.join(tagCodeWithOnList, "','");


        String querySQL = "select * from (" + unionSQL + ") tg \n" +
                "   pivot (max(tag_value) as tag_value for tag_code in('" + tagCodeWithOnSQL + "'))";
        System.out.println(querySQL);

        //编写插入语句
        /**
         * insert overwrite table user_profile.up_tag_merge_20200614
         *   select * from (
         *     select uid,tag_value,'tg_person_base_gender' as tag_code from user_profile.tg_person_base_gender where dt='2020-06-14'
         *     union all
         *     select uid,tag_value,'tg_person_base_agegroup' as tag_code from user_profile.tg_person_base_agegroup where dt='2020-06-14') tg
         *   pivot (max(tag_value) as tag_value for tag_code in('tg_person_base_gender','tg_person_base_agegroup'))
         */

        String inserSQL = "insert overwrite table "+upDatabaseName+"."+tableName+" " + querySQL;

        sparkSession.sql(inserSQL);
    }
}
