package com.atguigu.userprofile.app;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.dao.TagInfoDAO;
import com.atguigu.userprofile.utils.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * ClassName: TaskMergeAPP
 * Package: com.atguigu.userprofile
 * Description:
 *
 * @Author ChenJun
 * @Create 2023/3/31 12:01
 * @Version 1.0
 */
public class TaskMergeAPP {
        public static void main(String[] args) {

                //获取配置文件参数
                Properties properties = MyPropertiesUtil.load("config.properties");
                String hdfsPath = properties.getProperty("hdfs-store.path");
                String dwDBName = properties.getProperty("data-warehouse.dbname");
                String upDBName = properties.getProperty("user-profile.dbname");

                //0.获取spark环境
//                SparkConf sparkConf = new SparkConf().setAppName("TaskMergeAPP").setMaster("local[*]");
                SparkConf sparkConf = new SparkConf().setAppName("TaskMergeAPP");//.setMaster("local[*]");
                SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();

                //1.获取外部传参
                String taskId = args[0];
                String busiDate = args[1];


                //2.
                // 找到mysql中task_info表中task_status字段为1的 然后再拿到对应的task_id 然后再根据task_id获取到tag_info表中的tag_code
                //具体方式：
                //在TagInfoDAO中创建一个方法这个方法用来查询所有已启用标签的数据
                /**
                 * SELECT tg.id,
                 * tg.tag_code,
                 * tg.tag_name,
                 * tg.tag_level,
                 * tg.parent_tag_id,
                 * tg.tag_type,
                 * tg.tag_value_type,
                 * tg.tag_value_limit,
                 * tg.tag_value_step,
                 * tg.tag_task_id,
                 * tg.tag_comment,
                 * tg.update_time,
                 * tg.create_time
                 *  FROM tag_info tg JOIN task_info tk ON tg.`tag_task_id`=tk.`id` AND task_status = 1
                 */

                List<TagInfo> tagInfoWithOnList = TagInfoDAO.getTagInfoWithOn();
//                System.out.println(tagInfoWithOnList);

                //4.进而再获取到所有已启动的tag_code并拼写为建表语句中的字段&字段类型
                List<String> createSQLFieldList = tagInfoWithOnList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase() + " string").collect(Collectors.toList());

                //拼接多个createSQLField
                String createSQlFieldStr = StringUtils.join(createSQLFieldList, ",");

//                System.out.println(createSQlFieldStr);


                //5.拼写建表语句 一天创建一个宽表 不要建宽表 因为每天启用的标签可能不一样，那么就导致宽表的字段可能每天都不一样
                /**
                 * create table if not exists up_tag_merge_20200614
                 *   (uid String,
                 *     tg_person_base_gender string,
                 *     tg_person_base_agegroup string
                 *     )
                 *   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
                 *   location 'hdfs://hadoop102:8020/user_profile/user_profile/up_tag_merge_20200614'
                 */
                //动态获取表名
                String tableName = "up_tag_merge_"+busiDate.replaceAll("-","");

//                System.out.println(tableName);

                //拼接建表语句
                String createTableSQL = "create table if not exists "+tableName+"\n" +
                        "   (uid String,\n" +
                        "     "+createSQlFieldStr +
                        ")\n" +
                        "   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'\n" +
                        "   location '"+hdfsPath+"/"+upDBName+"/"+tableName+"'";

//                System.out.println(createTableSQL);


                //6.拼写pivot查询语句
                /**
                 * select  * from (
                 *                    select uid, cast(tag_value as STRING) as tag_value, 'tg_person_base_gender' as tag_code
                 *                    from tg_person_base_gender
                 *                    where dt = '2020-06-14'
                 *                    union all
                 *                    select uid, cast(tag_value as STRING) as tag_value, 'tg_person_base_agegroup' as tag_code
                 *                    from tg_person_base_agegroup
                 *                    where dt = '2020-06-14'
                 *                ) pivot (max(tag_value) as tag_value for tag_code in ('xxx','xxx'))
                 */
                //拼写unionall语句
                List<String> subSQLList = tagInfoWithOnList.stream().map(tagInfo -> "select uid,cast(tag_value as String) as tag_value,'" + tagInfo.getTagCode().toLowerCase() + "' as tag_code from " + tagInfo.getTagCode().toLowerCase() + " where dt = '" + busiDate + "'").collect(Collectors.toList());

                String unionAllSQL = StringUtils.join(subSQLList , " union all ");

//                System.out.println(unionAllSQL);

                //拼写所有已启用的标签的tag_code
                List<String> tagCodeWithOnList = tagInfoWithOnList.stream().map(tagInfo -> tagInfo.getTagCode().toLowerCase()).collect(Collectors.toList());

                String  tagCodeWithOnSQL = StringUtils.join(tagCodeWithOnList, "','");

                String querySQL = "select  * from ("+unionAllSQL+") pivot (max(tag_value) as tag_value for tag_code in ('"+tagCodeWithOnSQL+"'))";

//                System.out.println(querySQL);


                //7.拼写插入语句 注意！！！！ 每天任务跑失败的话脏数据怎么处理。
                //如果不能用覆盖写的话怎么保证数据的一致性，没有脏数据  可以在每天跑之前把表先删掉
                /**
                 * insert 上面的语句
                 */

                String insertSQL ="insert overwrite table "+tableName+" \n" + querySQL;
                System.out.println(insertSQL);

                //执行sql
                sparkSession.sql("use " + upDBName);
                sparkSession.sql("drop table if exists " + tableName);
                sparkSession.sql(createTableSQL);
                sparkSession.sql(insertSQL);
                
        }
}
