package com.atguigu.userprofile.app;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.dao.TagInfoDAO;
import com.atguigu.userprofile.utils.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskMergeApp {
    public static void main(String[] args) {
        SparkConf sparkConf = new SparkConf().setAppName("TaskMergeApp");//.setMaster("local[*]");


        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();


        //获取配置文件中的相关参数
        Properties properties = MyPropertiesUtil.load("config.properties");
        String hdfsPath = properties.getProperty("hdfs-store.path");
        String dwDbName = properties.getProperty("data-warehouse.dbname");
        String upDbName = properties.getProperty("user-profile.dbname");

        //获取外部的传参业务日期
        String busiDate = args[1];

        //获取已启用标签
        List<TagInfo> tagInfoWithOn = TagInfoDAO.getTagInfoWithOn();
        System.out.println(tagInfoWithOn);


        //在hive中建宽表
        /**
         * create table if not exists up_tag_merge_20200614
         *   (uid String,
         *     tg_person_base_gender string,
         *     tg_person_base_agegroup string)
         *   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
         *   location 'hdfs://hadoop102:8020/user_profile/user_profile/up_tag_merge_20200614'
         */

        //获取表名
        String tableName = "up_tag_merge_" + busiDate.replace("-", "");
//        System.out.println(tableName);

        //获取tag_code相关值
        List<String> filedSQLList = tagInfoWithOn.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " string").collect(Collectors.toList());

        String filedSQL = StringUtils.join(filedSQLList, ",");


        String createTableSQL = "  create table if not exists " + tableName + " \n" +
                "         (uid String,\n" +
                "          " + filedSQL + ") \n" +
                "        ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' \n" +
                "      location '" + hdfsPath + "/" + upDbName + "/" + tableName + "'";

        System.out.println(createTableSQL);

        //拼接查询语句
        /**
         * select * from (
         * select uid, cast(tag_value as string), 'tg_person_base_gender' as tag_code  from tg_person_base_gender
         * union
         * select uid, cast(tag_value as string), 'tg_person_base_agegroup' as tag_code                   from tg_person_base_agegroup
         * ) pivot (max(tag_value) as tag_value for tag_code in ('tg_person_base_gender','tg_person_base_agegroup'))
         */
        //拼接插入语句
        //先拼union语句
        List<String> unionSQLList = tagInfoWithOn.stream().map(tagInfo -> "select uid,cast(tag_value as string),'" + tagInfo.getTagCode().toLowerCase() + "' as tag_code from " + tagInfo.getTagCode().toLowerCase() + " where dt = '" + busiDate + "'").collect(Collectors.toList());

        String unionSQL = StringUtils.join(unionSQLList, " union ");
        System.out.println(unionSQL);

        //拼接 in 里面的常量值
        List<String> tagCodeWithOnList = tagInfoWithOn.stream().map(tagInfo -> "'" + tagInfo.getTagCode().toLowerCase() + "'").collect(Collectors.toList());

        String tagCodeWithOnSQL = StringUtils.join(tagCodeWithOnList, ",");

        //拼接查询语句
        String querySQL = " select * from (" + unionSQL + ") pivot (max(tag_value) as tag_value for tag_code in (" + tagCodeWithOnSQL + "))";

        System.out.println(querySQL);


        //编写插入语句
        String insertSQL = "insert overwrite table " + tableName + " " + querySQL;

        //执行sql
        sparkSession.sql("use " + upDbName);
        sparkSession.sql(createTableSQL);
        sparkSession.sql(insertSQL);


    }
}
