package cn.doitedu.api;

import beans.UserAction;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.mysql.cj.jdbc.MysqlXADataSource;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.jdbc.JdbcConnectionOptions;
import org.apache.flink.connector.jdbc.JdbcExactlyOnceOptions;
import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
import org.apache.flink.connector.jdbc.JdbcSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.util.Collector;
import org.apache.flink.util.function.SerializableSupplier;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;

import javax.sql.XADataSource;
import java.time.Duration;
import java.util.Map;

/**
 * @Author: 深似海
 * @Site: <a href="www.51doit.com">多易教育</a>
 * @QQ: 657270652
 * @Date: 2024/2/25
 * @Desc: 学大数据，上多易教育
 * 统计用户行为
 * 每个用户各类事件的发生次数
 * 将结果写入mysql
 **/
public class _18_JdbcSink_Demo {
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        env.enableCheckpointing(5000);
        env.getCheckpointConfig().setCheckpointStorage("file:///d:/ckpt");

        env.setStateBackend(new HashMapStateBackend());


        // 构造一个kafka的source对象
        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers("doitedu:9092")
                .setTopics("doit46")
                .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST))
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .setGroupId("g03")
                .setClientIdPrefix("doit-")
                .build();

        // 添加数据源，并设置watermark生成策略及时间抽取器
        DataStreamSource<String> stream = env.fromSource(
                source,
                WatermarkStrategy.
                        <String>forBoundedOutOfOrderness(Duration.ZERO).
                        withTimestampAssigner(new ActionTimeExtractor()),
                "kfk-source");


        // json解析成javabean
        DataStream<UserAction> mapped = stream.map(json -> JSON.parseObject(json, UserAction.class));



        // 按用户keyBy
        SingleOutputStreamOperator<String> resultStream =
                mapped
                        .keyBy(UserAction::getUid)
                        .process(new KeyedProcessFunction<Integer, UserAction, String>() {

                            MapState<String, Integer> mapState;

                            @Override
                            public void open(Configuration parameters) throws Exception {
                                mapState = getRuntimeContext().getMapState(new MapStateDescriptor<String, Integer>("event_cnt", String.class, Integer.class));
                            }

                            @Override
                            public void processElement(UserAction userAction, KeyedProcessFunction<Integer, UserAction, String>.Context ctx, Collector<String> out) throws Exception {

                                String eventId = userAction.getEvent_id();

                                Integer oldValue = mapState.get(eventId);

                                int newValue = oldValue == null ? 1 : oldValue + 1;

                                mapState.put(eventId, newValue);

                                // 遍历mapState，输出结果数据
                                for (Map.Entry<String, Integer> entry : mapState.entries()) {
                                    String eid = entry.getKey();
                                    Integer cnt = entry.getValue();

                                    JSONObject resObject = new JSONObject();
                                    resObject.put("user_id", userAction.getUid());
                                    resObject.put("event_id", eid);
                                    resObject.put("event_cnt", cnt);

                                    out.collect(resObject.toJSONString());
                                }

                            }
                        });



        // 构造一个jdbc sink
        SinkFunction<String> sink = JdbcSink.<String>sink(
                "insert into user_action_count (user_id, event_id, event_cnt) values (?, ?, ? ) on duplicate key update event_cnt = ? ",
                (statement, json) -> {
                    JSONObject jsonObject = JSON.parseObject(json);

                    statement.setLong(1, jsonObject.getIntValue("user_id"));
                    statement.setString(2, jsonObject.getString("event_id"));
                    statement.setLong(3, jsonObject.getLongValue("event_cnt"));
                    statement.setLong(4, jsonObject.getLongValue("event_cnt"));
                },
                JdbcExecutionOptions.builder()
                        .withBatchSize(1000)
                        .withBatchIntervalMs(200)
                        .withMaxRetries(5)
                        .build(),
                new JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
                        .withUrl("jdbc:mysql://doitedu:3306/doit46")
                        //.withDriverName("com.mysql.driver.Con")
                        .withUsername("root")
                        .withPassword("root")
                        .build()
        );


        JdbcSink.<String>exactlyOnceSink(
                "insert into user_action_count (user_id, event_id, event_cnt) values (?, ?, ? ) on duplicate key update event_cnt = ? ",
                (statement, json) -> {
                    JSONObject jsonObject = JSON.parseObject(json);

                    statement.setLong(1, jsonObject.getIntValue("user_id"));
                    statement.setString(2, jsonObject.getString("event_id"));
                    statement.setLong(3, jsonObject.getLongValue("event_cnt"));
                    statement.setLong(4, jsonObject.getLongValue("event_cnt"));
                },
                JdbcExecutionOptions.builder()
                        .withBatchSize(1000)
                        .withBatchIntervalMs(200)
                        .withMaxRetries(5)
                        .build(),
                JdbcExactlyOnceOptions.builder()
                        .withTransactionPerConnection(false)
                        .build(),
                new SerializableSupplier<XADataSource>() {
                    @Override
                    public XADataSource get() {
                        MysqlXADataSource xaDataSource = new MysqlXADataSource();
                        xaDataSource.setUrl("jdbc:mysql://doit01:3306/abc");
                        xaDataSource.setUser("root");
                        xaDataSource.setPassword("ABC123.abc123");
                        return xaDataSource;
                    }
                }
        );


        resultStream.addSink(sink);


        env.execute();

    }
}


class ActionTimeExtractor implements SerializableTimestampAssigner<String> {

    @Override
    public long extractTimestamp(String json, long recordTimestamp) {

        JSONObject jsonObject = JSON.parseObject(json);
        return jsonObject.getLongValue("timestamp");
    }
}

