package com.atguigu.userprofile;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.dao.TagInfoDAO;
import com.atguigu.userprofile.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;
import java.util.stream.Stream;

public class TaskMergeApp {
    public static void main(String[] args) {
        Properties properties = MyPropertiesUtil.load("config.properties");
        String hdfsPath = properties.getProperty("hdfs-store.path");
        String upDbName = properties.getProperty("user-profile.dbname");


        //spark环境
        SparkConf sparkConf = new SparkConf().setAppName("TaskMergeApp");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();

        sparkSession.sql("use " + upDbName);


        //1.获取外部传参的业务日期
        String buisDate = args[1];

        //2.获取TagCode
        List<TagInfo> tagInfoList = TagInfoDAO.getTagInfoListWithOn();

        //动态拼接建表语句字段
        List<String> tagCodeList = tagInfoList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " string").collect(Collectors.toList());

        String tagCodeSQL = StringUtils.join(tagCodeList, ",");

        System.out.println(tagCodeSQL);

        //3.建表语句
        /**
         * create table if not exists up_tag_merge_20200614
         *   (uid String,
         *     tg_person_base_gender string,
         *     tg_person_base_agegroup string)
         *   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
         *   location 'hdfs://hadoop102:8020/user_profile/user_profile/up_tag_merge_20200614'
         */

        String tableName = "up_tag_merge_" + buisDate.replace("-", "");

        String dropSQL = "drop table if exists " + tableName;
        System.out.println(dropSQL);
        sparkSession.sql(dropSQL);

        String createSQL = "create table if not exists " + tableName + " \n" +
                "  (uid String,\n" +
                "  " + tagCodeSQL + ") \n" +
                "  ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' \n" +
                "  location '" + hdfsPath + "/" + upDbName + "/" + tableName + "'";

        System.out.println(createSQL);
        sparkSession.sql(createSQL);

        //4.UnionSQL
        /**select
         uid,tag_value,'tg_person_base_gender' as tag_code from user_profile.tg_person_base_gender where dt='2020-06-14'
         union all
         select uid,tag_value,'tg_person_base_agegroup' as tag_code from user_profile.tg_person_base_agegroup where dt='2020-06-14'
         *
         */
        List<String> unionList = tagInfoList.stream().map(tagInfo -> "select uid,tag_value,'" + tagInfo.getTagCode().toLowerCase() + "' as tag_code from " + upDbName + "." + tagInfo.getTagCode().toLowerCase()+" where dt='"+buisDate+"'").collect(Collectors.toList());

        String unionSQL = StringUtils.join(unionList, " union all ");

//        System.out.println(unionSQL);

        //5.查询数据SQL
        /**
         *   select * from (
         *     select uid,tag_value,'tg_person_base_gender' as tag_code from user_profile.tg_person_base_gender where dt='2020-06-14'
         *     union all
         *     select uid,tag_value,'tg_person_base_agegroup' as tag_code from user_profile.tg_person_base_agegroup where dt='2020-06-14') tg
         *   pivot (max(tag_value) as tag_value for tag_code in('tg_person_base_gender','tg_person_base_agegroup'))
         */

        List<String> selectTagCodeList = tagInfoList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase()).collect(Collectors.toList());

        String selectTagCodeSQL = StringUtils.join(selectTagCodeList, "','");


        String selectSQL = " select * from ("+unionSQL+") tg\n" +
                "  pivot (max(tag_value) as tag_value for tag_code in('"+selectTagCodeSQL+"'))";
//        System.out.println(selectSQL);

        //6.插入数据语句
        String inserSQL="insert overwrite table " + upDbName + "." + tableName + selectSQL;
        System.out.println(inserSQL);

        sparkSession.sql(inserSQL);
    }
}
