package com.demo;


import com.demo.constant.EnvConstant;
import com.demo.constant.SqlConstant;
import com.demo.mapper.NewUserRedisMapper;
import com.demo.model.NewUser;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class MainDs {

    /**
     * @param args 0:zkQuorum 1:znodeParent 2:bootstrap.servers 3:group.id 4:topic 5: redisHost
     * @throws Exception
     */
    public static void main(String[] args) throws Exception {
        EnvironmentSettings envSettings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()
                .inStreamingMode()
                .build();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        CheckpointConfig config = env.getCheckpointConfig();
        env.enableCheckpointing(60000);
        config.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.setStateBackend(new RocksDBStateBackend(EnvConstant.HDFS_PATH));
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, envSettings);

        tableEnv.createTemporaryFunction("time_format",new TimeFormatFunction());

        //注册维表
        TableRegister.registerDim(tableEnv, args[0], args[1]);
        //注册kafka表
        TableRegister.registerKafkaTable(tableEnv, args[2], args[3], args[4]);

        // 注册输出的表
        tableEnv.executeSql(SqlConstant.duaByOsDDL).print();
        tableEnv.executeSql(SqlConstant.duaByChannelDDL);
        tableEnv.executeSql(SqlConstant.feeByOsDDL);
        tableEnv.executeSql(SqlConstant.feeByChannelDDL);;

        // DUA 分平台
        tableEnv.executeSql(SqlConstant.duaByOs);

        // DUA 分渠道
        tableEnv.executeSql(SqlConstant.duaByChannel);

        //fee 分平台
        tableEnv.executeSql(SqlConstant.feeByOs);

        //fee 分渠道
        tableEnv.executeSql(SqlConstant.feeByChannel);


        // 创建登录用户信息表， 用于统计新增用户数
        Table userLoginTable = tableEnv.sqlQuery(SqlConstant.loginUserSelect);
        tableEnv.createTemporaryView("loginUser", userLoginTable);
        // 构建redis的链接配置
        String redisHost = args[5].split(":")[0];
        Integer redisPort = Integer.parseInt(args[5].split(":")[1]);
        FlinkJedisPoolConfig conf = new FlinkJedisPoolConfig.Builder()
            .setHost(redisHost)
            .setPort(redisPort)
            .build();
        //每个游戏按不同指标的新增用户， 其中create_time是日期， 相当于按天统计这些纬度 key: HLL_time_pubid_appid_channelid
        Table newUserTable = tableEnv.sqlQuery("SELECT 'BYNOMAL_' || CONCAT_WS('_', create_time, pub_id ,app_id, channel_id) as dist_key, user_id FROM loginUser");

        DataStream<NewUser> newUserDataStream = tableEnv.toAppendStream(newUserTable, NewUser.class)
            // 过滤掉历史用户, 讲当前用户插入到PUBID的hll中，如果存在则插入返回结果为0， 说明已存在， 过滤不作处理
            // 如果不存在，则sink到redis中对应的hll中
            .filter(new FilterOldUserFunction(redisHost, redisPort));
        newUserDataStream.print();

        newUserDataStream.addSink(new RedisSink<NewUser>(conf, new NewUserRedisMapper()));
        env.execute();
    }
}