import com.feifan.api.model.UmsLog;
import com.feifan.common.GsonUtil;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommand;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommandDescription;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

/**
 * @program: FinalProject
 * @author: Xiaofan
 * @createTime: 2021-08-13 15:15
 * @description: Functions of this class is
 **/
public class UVReceiver {
    public static void main(String args[]) {
        String topic = "fuck";//大数据flink主题的名字
        String host = "master";
        int port = 9092;//Kafka端口，固定的
        int database_id = 14;//redis的数据库为多少号
        final StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();//获得执行环境
        environment.enableCheckpointing(TimeUnit.MINUTES.toMillis(1)); //即一分钟做一次检查点
        environment.setParallelism(7);//设置线程并行度

        CheckpointConfig checkpointConfig =  environment.getCheckpointConfig();
        //防止最后处理的结果或数据丢失
        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);//一旦flink数据被cancel后，会自动保留，以便可以恢复指定的checkpoint处理
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, host + ":" + port);
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "app-uv-stat");
        //从topic中，指定的上次消费的地方开始消费，所以必须配置groupid
        FlinkKafkaConsumerBase<String> kafkaConsumer = new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), properties)
                .setStartFromGroupOffsets();

        //开始配置flink和redis
        FlinkJedisPoolConfig config = new FlinkJedisPoolConfig.Builder().setDatabase(database_id).setHost(host).build();

        environment.addSource(kafkaConsumer).map(c -> {
            //"20783","2021-01-06","2021-01-06","37","192.168.1.199","查询订单","{'pageNum':['1']&&'pageSize':['10']}","GET","/order/list","80","17"
            //开始对数据进行拆解
            StringBuilder sb = new StringBuilder(1000);
            try {
                //把数据中的所有以，分割的元素放入到集合中
                List<String> list = Arrays.asList(c.split(","));
                sb.append("{");
                Field f[] = UmsLog.class.getDeclaredFields();

                for (int i = 0; i < list.size(); i++) {
                    sb.append(f[i].getName() + ":" + list.get(i) + ",");//将每个字段取出来,组合成一个键值对的形式，汇总成字符串
                    //组合成类似于以下这种Jason格式
                    //{id:"54",createBy:"system",createTime:"2020-02-05 18:05:15.0",delFlag:"0",
                    // updateBy:"system",updateTime:"2020-02-05",costTime:"23",ip:"127.0.0.1",ipInfo:"",
                    // name:"品牌列表",
                    // requestParam:"{""pageNum"":[""1""]&&""pageSize"":[""100""]}",requestType:"GET",requestUrl:"/brand/list",
                    // userid:"3",logType:"5"}
                }

                sb.deleteCharAt(sb.lastIndexOf(",")); //然后把最后一笔数据的逗号给去掉

                sb.append("}");
            } catch (Exception e) {

                e.printStackTrace();
            }
//            System.out.println(sb.toString());
            //将sb字符串转换成对象
            return GsonUtil.fromJson(sb.toString(), UmsLog.class);

        }).keyBy("updateTime", "logType")//通过日期和类型来进行分组
                .map(new RichMapFunction<UmsLog, Tuple2<String, Long>>() {//然后将其转换成tuple2类型

                    //存储当前key对应的userId的集合
                    private MapState<String, Boolean> userIdState;//表示某一个用户是否参与过该日志，true/false

                    private ValueState<Long> uvState;//只是一个状态

                    @Override
                    public Tuple2<String, Long> map(UmsLog umsLog) throws Exception {
                        if (uvState.value() == null) {
                            uvState.update(0L);
                        }
                        //userId如果不包含当前访问的userId，表示该用户今天还未访问过该页面，
                        //则将userId put到该集合中，并把UserView的值+1
                        if (!userIdState.contains(umsLog.getUserid().toString())) {
                            //如果不存在
                            userIdState.put(umsLog.getUserid().toString(), null);
                            uvState.update(uvState.value() + 1);
                        }
                        //如果存在，就可以发数据了
                        //先生成redis的key 格式：日期_LogType
                        String redisKey = umsLog.getUpdateTime() + "_" + umsLog.getLogType();
                        System.out.println(redisKey + "   :::   " + uvState.value());
                        System.out.println("由" + umsLog.getUserid() + "号用户，在" + umsLog.getUpdateTime() +
                                "时间操作了地址【" + umsLog.getRequestUrl() + "】1次！" + "；数据库的序号是：" + umsLog.getId());
                        System.out.println("【" + umsLog.getRequestUrl() + "】，共操作了" + uvState.value() + "次！");


                        return Tuple2.of(redisKey, uvState.value());
                    }

                    @Override
                    public void open(Configuration parameters) throws Exception {
                        super.open(parameters);
                        //如果数据传输中间断了的话，要先恢复原始的状态，userIdState
                        userIdState=getRuntimeContext().getMapState(
                                new MapStateDescriptor<>("userIdState",
                                TypeInformation.of(new TypeHint<String>() {
                                }),
                                TypeInformation.of(new TypeHint<Boolean>() {
                                })));
                        //开始从状态中恢复uvState
                        uvState = getRuntimeContext().getState(
                                new ValueStateDescriptor<Long>("uvState",
                                        TypeInformation.of(new TypeHint<Long>() {})));

                    }
                }).addSink(new RedisSink<>(config,new RedisSetSink()));
        try {
            environment.execute("Redis Set UV Stat");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    public static class RedisSetSink implements RedisMapper<Tuple2<String, Long>> {

        @Override
        public RedisCommandDescription getCommandDescription() {
            return new RedisCommandDescription(RedisCommand.SET);//指用的是哪一个指令
        }

        @Override
        public String getKeyFromData(Tuple2<String, Long> stringLongTuple2) {
            return stringLongTuple2.f0;
        }

        @Override
        public String getValueFromData(Tuple2<String, Long> stringLongTuple2) {
            return stringLongTuple2.f1.toString();
        }
    }
}
