package com.atguigu.userprofile.app;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.bean.TaskInfo;
import com.atguigu.userprofile.bean.TaskTagRule;
import com.atguigu.userprofile.constant.ConstCodes;
import com.atguigu.userprofile.dao.TagInfoDAO;
import com.atguigu.userprofile.dao.TaskInfoDAO;
import com.atguigu.userprofile.dao.TaskTagRuleDAO;
import com.atguigu.userprofile.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * 1.	接受spark-submit中传进来的参数 taskid、busiDate
 * 2.	根据taskid查询mysql中tag_info,task_info,task_tag_rule信息
 * 3.	获取标签名称、标签值、sql、规则
 * 4.	针对运行的标签任务创建画像库中对应标签的表，如果有的话直接写入
 * 5.	编写sql查询语句
 * 6.	拼接成为插入数据的sql语句
 * 7.	执行sql
 */
public class TaskSqlApp {
    public static void main(String[] args) {
        Properties properties = MyPropertiesUtil.load("config.properties");
        String upDbName = properties.getProperty("user-profile.dbname");
        String hdfsPath = properties.getProperty("hdfs-store.path");
        String dwDbName = properties.getProperty("data-warehouse.dbname");

        //spark环境 注意！！！！ 如果打包上穿到集群运行，一定要把setMaster注释掉
        SparkConf sparkConf = new SparkConf().setAppName("TaskSqlApp");//.setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();


        //1.接收spark-submit中传进来的参数 taskid、busiDate
        //sparksubmit  --num-executor 2 .....    1    2022-08-14
        //获取业务参数
        String taskId = args[0];
        String busiDate = args[1];

        // 2.根据taskid查询mysql中tag_info,task_info,task_tag_rule信息
        TagInfo tagInfo = TagInfoDAO.getTagInfoByTaskId(taskId);
        TaskInfo taskInfo = TaskInfoDAO.getTaskInfo(taskId);
        List<TaskTagRule> taskTagRuleList = TaskTagRuleDAO.getTaskTagRuleList(taskId);

//        System.out.println(tagInfo);
//        System.out.println(taskInfo);
//        System.out.println(taskTagRuleList);

        // 3.获取标签名称、标签值、sql、规则
        String tableName = tagInfo.getTagCode().toLowerCase();
        String fieldType = null;
        if (ConstCodes.TAG_VALUE_TYPE_LONG.equals(tagInfo.getTagValueType())){
            fieldType = "bigint";
        }else if (ConstCodes.TAG_VALUE_TYPE_DECIMAL.equals(tagInfo.getTagValueType())){
            fieldType = "decimal";
        }else if (ConstCodes.TAG_VALUE_TYPE_STRING.equals(tagInfo.getTagValueType())){
            fieldType = "string";
        }else if (ConstCodes.TAG_VALUE_TYPE_DATE.equals(tagInfo.getTagValueType())){
            fieldType = "string";
        }

        String comment = tagInfo.getTagName();

        String taskSql = taskInfo.getTaskSql();


        // 4.针对运行的标签任务创建画像库中对应标签的表，如果有的话直接写入
        /**
         * create table if not exists user_profile.tg_person_base_gender
         *   (uid String,tag_value ??)
         *  partitioned by (dt String)
         *   comment '性别'
         *   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
         *   location 'hdfs://hadoop102:8020/user_profile/user_profile/tg_person_base_gender'
         */


        String createTableSQL =
                " create table if not exists "+upDbName+"."+tableName+"" +
                        " (uid String,tag_value "+fieldType+")" +
                        " partitioned by (dt String)" +
                        "  comment '"+comment+"'" +
                        "  ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t'" +
                        "  location '"+hdfsPath+"/"+upDbName+"/"+tableName+"'";
        System.out.println(createTableSQL);
        sparkSession.sql(createTableSQL);

        // 5.编写sql查询语句
        /**
         * select
         *  uid,
         *  case query_value when 'F' then '女'  when 'M' then '男'  when 'U' then '未知'  end as query_value
         *  from
         *  (
         *    select
         *     id as uid,
         *     if(gender<>"",gender,"U")  as query_value
         *    from
         *     dim_user_zip where dt='9999-12-31'
         *   )ti
         */

        //拼接 when then 语句
        List<String> whenThenList = taskTagRuleList.stream().map(taskTagRule -> "when '" + taskTagRule.getQueryValue() + "' then '" + taskTagRule.getSubTagValue() + "' ").collect(Collectors.toList());
        //将list集合中的元素按照空格拼接
        String whenThenSQL = StringUtils.join(whenThenList, " ");

        String selectSQL = "select\n" +
                " uid,\n" +
                " case query_value "+whenThenSQL+"  end as query_value\n" +
                " from\n" +
                " ("+taskSql+")ti";

        System.out.println(selectSQL);

        // 6.拼接成为插入数据的sql语句
        String insertSQL = "insert overwrite table " + upDbName + "." + tableName + " partition(dt='" + busiDate + "') " + selectSQL;
        System.out.println(insertSQL);
        // 7.执行sql
        sparkSession.sql("use " + dwDbName);
        Dataset<Row> sql = sparkSession.sql(insertSQL);

    }
}
