package com.gsy.springboot.start.ipana.serviceImpl;

import com.gsy.springboot.start.ipana.servive.IPAnalysisService;
import com.gsy.springboot.start.mapper.auto.TbKafkaTopicMapper;
import com.gsy.springboot.start.pojo.TbKafkaTopic;
import kafka.admin.AdminUtils;
import kafka.admin.RackAwareMode;
import kafka.utils.ZkUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.security.JaasUtils;
import org.apache.tomcat.util.security.MD5Encoder;
import org.springframework.beans.factory.annotation.Autowired;

import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.Properties;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;

public class IPAnalysisServiceImpl implements IPAnalysisService {
    @Autowired
    TbKafkaTopicMapper tbKafkaTopicMapper;
    @Override
    public HashMap<String, String> reigister(HashMap<String, String> hashMap) {
        HashMap<String,String> re = new HashMap<>();
        TbKafkaTopic tbKafkaTopic = new TbKafkaTopic();
        tbKafkaTopic.setSubscriberName(hashMap.get("subscriberName"));
        tbKafkaTopic.setTopic(hashMap.get("topic"));
        tbKafkaTopic.setRegexPattern(hashMap.getOrDefault("regex","((25[0-5]|2[0-4]\\d|((1\\d{2})|([1-9]?\\d)))\\.){3}(25[0-5]|2[0-4]\\d|((1\\d{2})|([1-9]?\\d)))"));
        tbKafkaTopic.setExpireDate(new Date(new Date().getTime() + 31536000000l));
        int a = tbKafkaTopicMapper.insert(tbKafkaTopic);
        if (a == 1){
            re.put("status","success");
        }else {
            re.put("status","failed");
        }
        return re;
    }
    @Value("${kafka.port}")
    public String kafkaStr;


    @Autowired
    Producer<String, String> producer;

    @Override
    public HashMap<String, String> pushIPText(HashMap<String, String> hashMap) {
        String text = hashMap.get("text");
        String topic = hashMap.get("topic");
        producer.send(new ProducerRecord<String, String>(topic, text, text));
        hashMap.put("status" , "success");
        return hashMap;
    }

    @Bean
    public Producer<String, String> buildProducer(){
        //准备配置属性
        Properties props = new Properties();
        //kafka集群地址
        props.put("bootstrap.servers", kafkaStr);
        //acks它代表消息确认机制   // 1 0 -1 all
        /**
         * acks = 1: 表示leader副本必须应答此produce请求并写入消息到本地日志，之后produce请求被认为成功. 如果leader挂掉有数据丢失的风险
         * acks = 0: 表示produce请求立即返回，不需要等待leader的任何确认。
         *          这种方案有最高的吞吐率，但是不保证消息是否真的发送成功。
         *
         * acks = -1或者all: 表示分区leader必须等待消息被成功写入到所有的ISR副本(同步副本)中才认为produce请求成功。
         *                  这种方案提供最高的消息持久性保证，但是理论上吞吐率也是最差的。
         */
        props.put("acks", "0");

        //重试的次数, 0表示不重试
        props.put("retries", 3);
        //缓冲区的大小  //默认32M
        props.put("buffer.memory", 33554432);
        //批处理数据的大小，每次写入多少数据到topic   //默认16KB
        props.put("batch.size", 16384);
        //可以延长多久发送数据   //默认为0 表示没有16KB不等待 ，立即发送
        props.put("linger.ms", 1); //1表示1毫秒
        //指定key和value的序列化器
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        Producer<String, String> producer = new KafkaProducer<String, String>(props);
        return producer;

    }

    public void createKafaTopic(HashMap<String,String> topic) {
        ZkUtils zkUtils = ZkUtils.
                apply(kafkaStr, 30000, 30000, JaasUtils.isZkSecurityEnabled());

        AdminUtils.createTopic(zkUtils,
                topic.get("topic"),
                Integer.parseInt(topic.getOrDefault("partiation","3")),
                Integer.parseInt(topic.getOrDefault("replication","1")),
                new Properties(),
                new RackAwareMode.Enforced$());
        zkUtils.close();
    }

    public static void deleteKafaTopic(String ZkStr,HashMap<String,String> topic) {
        ZkUtils zkUtils = ZkUtils.
                apply(ZkStr, 30000, 30000,JaasUtils.isZkSecurityEnabled());

        AdminUtils.deleteTopic(zkUtils, topic.get("topic"));
        zkUtils.close();
    }
}
