package com.doit.day02;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.sql.*;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.Properties;

/**
 * 需求：创建一个topic xxoo 3个分区 1个副本
 * 往里面写数据   1，zss，18，male
 * 用消费者来读取这个kafka中的数据
 * 在mysql中创建一张user_info的表   id  name   age  gender
 * 将这个数据切完了，一条一条写进去
 *
 *
 * 将偏移量提交到mysql数据库中  ==》 得用一张表存起来
 * 表结构怎么设计：
 * group.id
 * topic
 * partition
 * offset
 *
 */
public class _01_kafka2mysql {
    private static final String HOST = "linux01:9092,linux02:9092,linux03:9092";
    private static final String GROUP_ID = "G02";
    public static void main(String[] args) throws Exception {
        Connection conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/test", "root", "123456");
        PreparedStatement pps = conn.prepareStatement("insert into user_info values(?,?,?,?)");
        PreparedStatement offset_pps = conn.prepareStatement("insert into tp_offset values (?,?,?,?) on DUPLICATE key update `offset` = ?;");
        PreparedStatement get_offset = conn.prepareStatement("select `offset` from tp_offset where group_id = ? and `topic` = ? and `partition` = ?");
        //设置mysql不自动提交事务
        conn.setAutoCommit(false);

        Properties props = new Properties();
        props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,_01_kafka2mysql.HOST);
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG,_01_kafka2mysql.GROUP_ID);
        props.setProperty(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG,"true");
        props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
        consumer.subscribe(Arrays.asList("mysql"), new ConsumerRebalanceListener() {
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                //回收前的分区
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                //在分配完成之后的具体情况
                for (TopicPartition tp : partitions) {
                    try {
                        int partition = tp.partition();
                        String topic = tp.topic();
                        get_offset.setString(1,_01_kafka2mysql.GROUP_ID);
                        get_offset.setString(2,topic);
                        get_offset.setInt(3,partition);
                        ResultSet resultSet = get_offset.executeQuery();
                        if (resultSet.next()){
                            long offset = resultSet.getLong("offset");
                            consumer.seek(tp,offset+1);
                        }else {
                            consumer.seek(tp,0);
                        }
                    } catch (SQLException e) {
                        e.printStackTrace();
                    }
                }
            }
        });




        while (true){
            ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(Integer.MAX_VALUE));

            for (ConsumerRecord<String, String> record : poll) {
              try {
                  String value = record.value();
                  String[] arr = value.split(",");
                  //将字段写入到数据库中   给sql中的？设置值
                  pps.setInt(1,Integer.parseInt(arr[0]));
                  pps.setString(2,arr[1]);
                  pps.setInt(3,Integer.parseInt(arr[2]));
                  pps.setString(4,arr[3]);
                  //执行sql
                  pps.execute();

                  if ("4".equals(arr[0])){
                      throw new Exception("老子自己造的异常，来打我呀！！！");
                  }

                  //提交偏移量的sql设置
                  offset_pps.setString(1,_01_kafka2mysql.GROUP_ID);
                  offset_pps.setString(2,record.topic());
                  offset_pps.setInt(3,record.partition());
                  offset_pps.setLong(4,record.offset());
                  offset_pps.setLong(5,record.offset());
                  //执行sql
                  offset_pps.execute();

                  //提交事务
                  conn.commit();
              }catch (Exception e){
                  //回滚事务
                  conn.rollback();
                  System.out.println(e);
              }

            }
        }



    }
}
