package com.swsc.ai.tasks;

import com.swsc.ai.config.SparkSqlConf;
import com.swsc.ai.enums.TFEnum;
import com.swsc.ai.taskfactory.Task;
import com.swsc.ai.util.HDFSUtil;
import com.swsc.ai.util.SparkUtil;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.sql.SQLException;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;

import static org.apache.spark.sql.functions.*;
import static org.apache.spark.sql.functions.col;

public abstract class UserPreferenceTemplateTask implements Task {

    @Override
    public void run(Map<String, String> configMap) throws RuntimeException {
        String isTest = configMap.getOrDefault("isTest", TFEnum.FALSE.getName());
        SparkSession session = SparkUtil.getSession(isTest, configMap.get("className"));
        String taskDate = configMap.getOrDefault("dt", LocalDate.now().format(DateTimeFormatter.ofPattern("yyyyMMdd")));
        String projectName = configMap.getOrDefault("projectName", "test");
        String taskName = configMap.getOrDefault("taskName", "test");
        String subTaskName = configMap.getOrDefault("subTaskName", "test");
        //宽表数据HDFS文件路径
        String wideTablePath = configMap.getOrDefault("wideTableSubTaskName", "test");
        //交易数据HDFS文件路径
        String actionPath = configMap.getOrDefault("actionSubTaskName", "test");


        // 结果数据HDFS文件路径
        String outputPath = HDFSUtil.getOutHDFSPath(projectName, taskName, subTaskName, taskDate);
        // 昨日历史数据HDFS文件路径
        String yesterday = LocalDate.parse(taskDate, DateTimeFormatter.ofPattern("yyyyMMdd")).minusDays(1).format(DateTimeFormatter.ofPattern("yyyyMMdd"));
        String historyPath = HDFSUtil.getOutHDFSPath(projectName, taskName, subTaskName, yesterday);

        Dataset<Row> result = decayHistory(session, historyPath);
        try {
            createAction(session, actionPath, wideTablePath);
            createAsset(session);
            result = mergeActionPreference(session, taskDate);
        } catch (Exception e) {
            e.printStackTrace();
            //历史数据不受其他异常影响，每日保存
            throw new RuntimeException(e.getMessage());
        } finally {
            saveHistory(session, result, outputPath);
        }
    }

    /**
     * 创建用户行为数据视图
     *
     * @param session
     */
    public abstract void createAction(SparkSession session, String actionPath, String wideTablePath) throws RuntimeException;

    /**
     * 创建历史标签计算视图
     *
     * @param session
     * @return
     */
    public Dataset<Row> decayHistory(SparkSession session, String historyYesPath) {
        Dataset<Row> history = null;
        StructType schema = new StructType(new StructField[]{
                new StructField("user_id", DataTypes.StringType, false, Metadata.empty()),
                new StructField("tag", DataTypes.StringType, false, Metadata.empty()),
                new StructField("tag_type", DataTypes.StringType, false, Metadata.empty()),
                new StructField("tar_value", DataTypes.DoubleType, false, Metadata.empty()),
                new StructField("cal_date", DataTypes.StringType, false, Metadata.empty())

        });
        try {
            history = SparkSqlConf.getDataByCVS(session, "user_id STRING, tag STRING, tag_type STRING, tar_value DOUBLE, cal_date STRING", historyYesPath);
        } catch (Exception e) {
            history = session.createDataset(Collections.emptyList(), Encoders.row(schema));
        }
        history.createOrReplaceTempView("user_score_history");
        //前一天历史数据做时间衰减
        Dataset<Row> historyDecay = session.sql("select user_id,tag,tag_type,(tar_value * EXP(- 0.01 * 1))tar_value,cal_date from user_score_history");
        historyDecay.show();
        historyDecay.createOrReplaceTempView("user_score_history");
        return historyDecay;
    }

    /**
     * 创建用户总资产视图
     */
    public void createAsset(SparkSession session) throws SQLException {
        Dataset<Row> dfAsset = SparkSqlConf.getDataByPartition(session, "dws_cust_portrait_ast_info", "", "client_id AS user_id,total_ast_peak AS asset", "where 1=1");
        dfAsset.createOrReplaceTempView("user_asset");
    }

    /**
     * 执行sparksql
     *
     * @param session
     * @return
     */
    public Dataset<Row> mergeActionPreference(SparkSession session, String dt) {
        String taskDate = LocalDate.parse(dt, DateTimeFormatter.ofPattern("yyyyMMdd")).format(DateTimeFormatter.ofPattern("yyyy-MM-dd"));
        // 使用Spark SQL执行查询
        Dataset<Row> sqlResult = session.sql("SELECT\n" +
                "   ust.user_id,\n" +
                "   ust.tag,\n" +
                "   ust.tag_type,\n" +
                "   any_value (IFNULL(usht.tar_value,0)) + sum(ust.amount) / any_value(uat.asset) * IFNULL(any_value(t1.count), 1) tar_value\n" +
                "FROM\n" +
                " user_action_data ust\n" +
                "LEFT JOIN (\n" +
                "   SELECT\n" +
                "      tag,\n" +
                "      any_value(tar_value) AS tar_value,\n" +
                "      tag_type,\n" +
                "      (\n" +
                "         SELECT\n" +
                "            COUNT(*)\n" +
                "         FROM\n" +
                "            user_score_history\n" +
                "      ) / COUNT(*) count\n" +
                "   FROM\n" +
                "      user_score_history\n" +
                "   GROUP BY\n" +
                "      tag_type,\n" +
                "      tag\n" +
                ") t1 ON t1.tag_type = ust.tag_type\n" +
                "AND t1.tag = ust.tag\n" +
                "LEFT JOIN user_score_history usht ON usht.tag = ust.tag\n" +
                "AND usht.tag_type = ust.tag_type\n" +
                "AND usht.user_id = ust.user_id\n" +
                "LEFT JOIN user_asset uat ON uat.user_id = ust.user_id\n" +
                "WHERE\n" +
                "   ust.buy_date = '" + taskDate + "'\n" +
                "GROUP BY\n" +
                "   ust.user_id,\n" +
                "   ust.tag,\n" +
                "   ust.tag_type\n");
        //和宽表联查计算结果保存到hdfs
        Dataset<Row> rowDataset = sqlResult.withColumn("cal_date", lit(dt)).withColumn("tar_value", when(col("tag").isNull(), 0).otherwise(col("tar_value")));
        rowDataset.show();
        return rowDataset;
    }

    /**
     * 保存历史数据
     *
     * @param session
     */
    public void saveHistory(SparkSession session, Dataset<Row> result, String historyPath) {
        Dataset<Row> data = session.sql("select user_id,tag,tag_type,tar_value,cal_date from user_score_history");
        StructType schema = new StructType(new StructField[]{
                new StructField("user_id", DataTypes.StringType, false, Metadata.empty()),
                new StructField("tag", DataTypes.StringType, false, Metadata.empty()),
                new StructField("tag_type", DataTypes.StringType, false, Metadata.empty()),
                new StructField("tar_value", DataTypes.DoubleType, false, Metadata.empty()),
                new StructField("cal_date", DataTypes.StringType, false, Metadata.empty())

        });
        if (result == null) {
            result = session.createDataset(Collections.emptyList(), Encoders.row(schema));
        }
        List<Row> rows = result.collectAsList();
        Function<Row, String> function = row -> row.getString(0) + row.getString(1) + row.getString(2);
        Map<String, List<Row>> collect = rows.stream().collect(Collectors.groupingBy(function));
        Dataset<Row> map = data.map((MapFunction<Row, Row>) row -> {
            String userId = row.getString(0);
            String tag = row.getString(1);
            double tarValue = row.getDouble(3);
            String tagType = row.getString(2);
            String calDate = row.getString(4);
            List<Row> rows1 = collect.get(userId + tag + tagType);
            if (rows1 != null) {
                //计算结果已经在历史数据存在，存计算结果数据
                Row first = rows1.get(0);
                return RowFactory.create(first.getString(0), first.getString(1), first.getString(2), first.getDouble(3), first.getString(4));
            } else {
                return RowFactory.create(userId, tag, tagType, tarValue, calDate);
            }
        }, Encoders.row(schema));
        Dataset<Row> rowDataset = map.union(result).distinct();
        rowDataset.coalesce(1).write()
                .format("csv")
                .option("header", "true")
                .option("mode", "overwrite")
                .save(historyPath);

    }
}
