package com.atguigu.userprofile.app;

import com.atguigu.userprofile.bean.TagInfo;
import com.atguigu.userprofile.bean.TaskInfo;
import com.atguigu.userprofile.bean.TaskTagRule;
import com.atguigu.userprofile.dao.TagInfoDao;
import com.atguigu.userprofile.dao.TaskInfoDao;
import com.atguigu.userprofile.dao.TaskTagRuleDao;
import com.atguigu.userprofile.util.MyPropertiesUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;

import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

public class TaskSqlApp {
    public static void main(String[] args) {
        Properties properties = MyPropertiesUtil.load("config.properties");
        String hdfsPath = properties.getProperty("hdfs-store.path");
        String dwDBName = properties.getProperty("data-warehouse.dbname");
        String upDBName = properties.getProperty("user-profile.dbname");

        //0.创建spark环境 打包上传到集群运行的时候 一定要注释掉setMaster
        SparkConf sparkConf = new SparkConf().setAppName("TaskSqlApp").setMaster("local[*]");
        SparkSession sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate();

        //默认使用电商数仓库
        sparkSession.sql("use gmall");

// 1.获取传进来的参数（taskId、busiDate）
        String taskId = args[0];
        String busiDate = args[1];

// 2.根据TaskId获取task_info/tag_info/task_tag_rule 这三张表的数据  JDBC
        TagInfo tagInfo = TagInfoDao.getTagInfo(taskId);
        TaskInfo taskInfo = TaskInfoDao.getTaskInfo(taskId);
        List<TaskTagRule> tagRuleList = TaskTagRuleDao.getTaskTagRule(taskId);

//        System.out.println(tagInfo);
//        System.out.println(taskInfo);
//        System.out.println(tagRuleList);

// 3.提取 标签值的映射关系 sql语句、tag_code、tag_value_type、tag_name
        String subSql = taskInfo.getTaskSql();
        String tagCode = tagInfo.getTagCode();
        String tagValueType = tagInfo.getTagValueType();
        String tagName = tagInfo.getTagName();
//        System.out.println(subSql);
// 4.动态拼接建表语句 因为这些标签表的结构是一样的所以可以动态拼接
        /**
         * create table if not exists user_profile.tg_person_base_gender
         *   (uid String,tag_value string)
         *  partitioned by (dt String)
         *   comment '性别'
         *   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
         *   location 'hdfs://hadoop102:8020/user_profile/user_profile/tg_person_base_gender'
         */
        //获取表名
        String tableName = tagCode.toLowerCase();

        //标签值类型
        String tagValueTypeSQL = null;
        if ("1".equals(tagValueType)){
            tagValueTypeSQL = "bigint";
        }else if ("2".equals(tagValueType)){
            tagValueTypeSQL = "decimal";
        }else if ("3".equals(tagValueType)){
            tagValueTypeSQL = "String";
        }else if ("4".equals(tagValueType)){
            tagValueTypeSQL = "String";
        }

        String createTableSQL = "create table if not exists "+upDBName+"." + tableName + " \n" +
                "  (uid String,tag_value " + tagValueTypeSQL + ")\n" +
                "   partitioned by (dt String)\n" +
                "   comment '" + tagName + "' \n" +
                "   ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' \n" +
                "   location '" + hdfsPath + "/" + upDBName + "/" + tableName + "'";

//        sparkSession.sql(createTableSQL);

//        System.out.println(createTableSQL);
// 5.动态拼接查询语句
        /**
         * select
         *  uid,
         *  case query_value when 'F' then '男'  when 'M' then '女'  when 'U' then '未知'  end as query_value
         *  from
         *  (
         *    select
         *     id as uid,
         *     if(gender<>"",gender,"U")  as query_value
         *    from
         *     dim_user_zip where dt='9999-12-31'
         *   )ti
         */

        //拼接case when then end
//        StringBuilder stringBuilder = new StringBuilder();
//        for (TaskTagRule taskTagRule : tagRuleList) {
//            stringBuilder.append("when '" + taskTagRule.getQueryValue() + "' then '" + taskTagRule.getSubTagValue() + "' ");
//        }

//        System.out.println(stringBuilder);

        //或者下面
        List<String> whenThenList = tagRuleList.stream().map(taskTagRule -> "when '" + taskTagRule.getQueryValue() + "' then '" + taskTagRule.getSubTagValue() + "'").collect(Collectors.toList());
        //将上面集合中的元素 （when 'F' then '女'）拼接起来
        String whenThenSQL = StringUtils.join(whenThenList, " ");
//        System.out.println(whenThenSQL);

        String querySQL = null;

        if (tagRuleList.size()>0){
            //有四级标签，也就是说标签对应的有标签值的时候需要对查询出来的数据做进一步处理
            querySQL= "select\n" +
                    "    uid,\n" +
                    "     case query_value "+whenThenSQL+" end as tag_value\n" +
                    "     from\n" +
                    "     (" + subSql + ")ti";
        }else {
            //这里面意味着没有四级标签，就是没有标签值
            querySQL= "select\n" +
                    "    uid,\n" +
                    "    query_value as tag_value\n" +
                    "     from\n" +
                    "     (" + subSql + ")ti";
        }
//        System.out.println(querySQL);

// 6.拼接插入语句
        /**
         * insert overwrite table user_profile0409.tg_person_base_gender partition(dt='2020-06-14') select
         *  uid,
         *  case query_value when 'F' then '女'  when 'M' then '男'  when 'U' then '未知'   end as query_value
         *  from
         *  (select id as uid,if(gender<>"",gender,"U") as query_value from dim_user_zip where dt='9999-12-31')ti
         */
        String insertSQL = "insert overwrite table " +upDBName+"."+ tableName + " partition(dt='" + busiDate + "') " + querySQL;
//        System.out.println(insertSQL);
// 7.执行sql
//        sparkSession.sql(insertSQL);

    }
}
