package com.atguigu.upp.app;

import com.atguigu.upp.bean.TagInfo;
import com.atguigu.upp.service.CKDBService;
import com.atguigu.upp.service.MySQLDBService;
import com.atguigu.upp.utils.UPPUtil;
import jodd.util.PropertiesUtil;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;

import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * Created by Smexy on 2022/12/2
 *
 *  ①查询当天计算的标签有哪些？
 *      查询Mysql的task_info表中 task_status = 1 的任务所对应的标签即可
 *  ②使用pivot语法，把当天计算的标签的表合并为一张宽表
 *         列： uid, 标签....
 *  ③在ck中建表，将查询的结果，写入ck
 *      ck中建表：
 *          列:  uid, 标签
 *          引擎:  mergeTree
 *          order by : uid
 *          分区: 无法做成分区表，由于每天计算的标签不同。
 *                  每一天都生成一张单独的表
 *
 *          表名： 体现业务日期。不支持 - ，支持_
 *
 *   -----------------------------
 *    每一天计算的标签时不一样的！
 *      举例:
 *              6-14:  a,b,c    宽表: uid,a,b,c
 *              6-15:  d,e,a,b  宽表: uid,a,b,d,e
 *
 *       如果时分区表，这个表的列是固定的！只是每天分区的字段值不同
 *
 *
 */
public class MergeWideTableApp
{
    public static void main(String[] args) throws IOException {

        //①接收参数
        String taskId = args[0];
        String doDate = args[1];

       /* String taskId = "3";
        String doDate = "2020-06-14";*/

        //②查询当天计算的标签
        SqlSessionFactory sqlSessionFactory = UPPUtil.createSqlSessionFactory("mysql_config.xml");
        MySQLDBService mySQLDBService = new MySQLDBService(sqlSessionFactory.openSession());
        SqlSessionFactory ckSSF = UPPUtil.createSqlSessionFactory("ck_config.xml");
        CKDBService ckdbService = new CKDBService(ckSSF.openSession());
        List<TagInfo> tags = mySQLDBService.getTagInfoExecute();

        //③使用pivot语法，生成查询语句
        String querySql = generateQuerySql(tags, doDate);

        //④数据写入到ck
        SparkSession sparkSession = UPPUtil.createSparkSession("MergeWideTableApp");
        writeToCK(ckdbService,doDate,tags,querySql,sparkSession);



    }

    private static void writeToCK(CKDBService ckdbService,String doDate, List<TagInfo> tags,String querySql,SparkSession sparkSession){

        //确认今天要写入的宽表名
        String table = UPPUtil.getProperty("upwideprefix") + doDate.replace("-","_");

        //先删除表
        ckdbService.dropWideTable(table);

        //根据今天计算的标签，生成列的信息
        // 列的类型需要根据标签的tagValueType确定，宽表只是中间的表。使用兼容的类型String，省事
        // 计算的标签的类型:  文本，日期，整数，浮点
        String column = tags.stream()
                             .map(tag -> tag.getTagCode().toLowerCase() + " String ")
                             .collect(Collectors.joining(","));

        //建表
        ckdbService.createWideTable(table,column);

        //先查询
        Dataset<Row> data = sparkSession.sql(querySql);

        //再写出
        Properties properties = new Properties();

        /*
            写出时，自动帮你建表。如果在建表时，发现表已经存在了，就报错！
                默认的写出模式: ErrorIfExists

             CK是一个OLAP的数据库，没有事务的，设置关闭事务！
         */
        data.write()
            .mode(SaveMode.Append)
            .option("driver", UPPUtil.getProperty("ck.jdbc.driver.name"))
            .option("batchsize",500)
            .option("isolationLevel","NONE")   //事务关闭
            .option("numPartitions", "4") // 设置并发
            .jdbc(UPPUtil.getProperty("ck.jdbc.url"),table,properties);

    }

    /*
    select
    *
from
(
    select uid,tagvalue,'tag_population_attributes_nature_gender' tagCode from upp220704.tag_population_attributes_nature_gender where dt='2020-06-14'
    union all
    select uid,tagvalue,'tag_population_attributes_nature_period' tagCode  from upp220704.tag_population_attributes_nature_period where dt='2020-06-14'
    union all
    select uid,tagvalue,'tag_consuming_behavior_order_amount7d' tagCode  from upp220704.tag_consuming_behavior_order_amount7d where dt='2020-06-14'
)tmp
pivot(
   min(tagvalue)
   for tagCode in ('tag_population_attributes_nature_gender','tag_population_attributes_nature_period','tag_consuming_behavior_order_amount7d')
);

     */
    private static String generateQuerySql(List<TagInfo> tags,String doDate){

        String template = "  select * from  (%s) tmp  pivot(   min(tagvalue)  for tagCode in (%s) )";

        //查询今天要计算的标签的结果，之后union all合并
        String sql1 = "    select uid,tagvalue,'%s' tagCode from %s.%s where dt='%s' ";
        String dbName = UPPUtil.getProperty("updbname");

        String tmpSql = tags.stream()
                             .map(tag -> String.format(sql1, tag.getTagCode().toLowerCase(), dbName, tag.getTagCode(), doDate))
                             .collect(Collectors.joining(" union all "));

        String pivotColumnValueSql = tags.stream()
                             .map(tag -> "'" + tag.getTagCode().toLowerCase() + "'")
                             .collect(Collectors.joining(","));

        String sql = String.format(template, tmpSql, pivotColumnValueSql);

        System.out.println(sql);

        return sql;

    }
}
