package kafka;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import pros.propties;
import testjson.phoenix;


import java.io.*;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;

public class TopicPartitionThread  extends Thread{

    private static Logger logger = LoggerFactory.getLogger(TopicPartitionThread.class);
    /*
    配置文件的路径
     */
    private static  String confPath= System.getProperty("user.dir") + File.separator + "conf/0327.properties";
    private ExecutorService workerExecutorService;

    private Semaphore semaphore;

    private Map<TopicPartition, OffsetAndMetadata> offsetsMap = new HashMap<>();




    private List<Future<String>> taskList = new ArrayList<>();


    public TopicPartitionThread(ExecutorService workerExecutorService, Semaphore semaphore){
        this.workerExecutorService = workerExecutorService;
        this.semaphore = semaphore;
    }

    /**
     *用于加载配置文件
     */
    private static Properties properties(){

        Properties  properties=new Properties();
        File file=new File(confPath);
        if(!file.exists()) {
            InputStream in= TopicPartitionThread.class.getClassLoader().getResourceAsStream("0327.properties");

            try {
                properties.load(in);

            } catch (IOException e) {
                e.printStackTrace();
            }
        } else {
            try {
                properties.load(new FileInputStream(confPath));
            } catch (IOException e) {
                e.printStackTrace();
            }
        }

        return  properties;



    }


    public static void savefile(Map<TopicPartition, OffsetAndMetadata> maps ,String filepaths){
     //   OutputStreamWriter writer=null;

        offsett off=new offsett();

        try {

            OutputStreamWriter     writer = new OutputStreamWriter(new FileOutputStream(new File(filepaths)), "UTF-8");

            for(TopicPartition key:maps.keySet()){
                off.setTopicPartition(key);
                off.setOffsetAndMetadata(maps.get(key));
                System.out.println("llllllllllllllllllllllllllllllllllllllllllllllllllllllllll"+off.toString());
                writer.write(off.toString());
                writer.flush();
                writer.close();

            }



        } catch (UnsupportedEncodingException e) {
            e.printStackTrace();
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        }


//        try {
//
//            String str="";
//for(TopicPartition key:maps.keySet()){
//
////str+=key+ maps.get(key)
//
//
//}
//
//
//        } catch (FileNotFoundException e) {
//            e.printStackTrace();
//        }
//
//
//
//
//
   }













    @Override
    public void run() {

        /**
         * afka和zookeeper的配置信息
         */
        Properties properties=properties();
        String brokers = properties.getProperty("kafka.brokers");
        String  topics = properties.getProperty("kafka.topics");
        String zookeeperserver=properties.getProperty("zookeeperlist.server");
        String  zookeeperport=properties.getProperty("zookeeperlist.port");
        logger.info("kafka.brokers:" + brokers);
        logger.info("kafka.brokers:" + brokers);
        logger.info("kafka.topics:" + topics);
        logger.info("zookeeperlist.server:" + zookeeperserver);
        logger.info("zookeeperlist.port:" + zookeeperport);
        if(StringUtils.isEmpty(brokers)|| StringUtils.isEmpty(topics)) {
            System.out.println("未配置Kafka信息...");
            System.exit(0);
        }












        //启动kafka消费
//        Properties props = new Properties();
//        props.put("bootstrap.servers", "192.168.0.14:9092,192.168.0.15:9092");
//        props.put("group.id", "test");
//        props.put("enable.auto.commit", "false");
//        props.put("session.timeout.ms", "30000");
//        props.put("max.poll.records", 100); //每次poll最多获取100条数据
//        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringSerializer");
        propties pro=new propties();
        final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(pro.properties1());
        consumer.subscribe(Arrays.asList(topics),
                new ConsumerRebalanceListener(){

                    @Override
                    public void onPartitionsRevoked(
                            Collection<TopicPartition> partitions) {
                        logger.info("threadId = {}, onPartitionsRevoked.", Thread.currentThread().getId());

                        consumer.commitSync(offsetsMap);

                    }

                    @Override
                    public void onPartitionsAssigned(
                            Collection<TopicPartition> partitions) {
                        logger.info("threadId = {}, onPartitionsAssigned.", Thread.currentThread().getId());


                        try{
                            File file = new File("a.txt");
                            if(file.delete()){
                                System.out.println(file.getName() + " 文件已被删除！");
                            }else{
                                System.out.println("文件删除失败！");
                            }
                        }catch(Exception e){
                            e.printStackTrace();
                        }




                        offsetsMap.clear();

                        //清空taskList列表
                        taskList.clear();
                    }});

       // testmain test=new testmain(zookeeperserver,zookeeperport);//连接hbase到并数据插入到hbase表中
       // phoenixmain phoenix=new phoenixmain();
        phoenix phoenixs=new phoenix();//连接phoenix并插入数据到Phoenix表中
        //接收kafka消息
        while (Cache.getInstance().isKafkaThreadStatus()) {
            try {
                //使用100ms作为获取超时时间
                ConsumerRecords<String, String> records = consumer.poll(100);
                for (final ConsumerRecord<String, String> record : records) {
                    semaphore.acquire();
                    //记录当前 TopicPartition和OffsetAndMetadata
                    System.out.println(record.value());
                    phoenixs.JSONphoenix(record.value());
                 //   phoenix.JSONtophoenix(record.value());
                   // test.testJSON(record.value());

                    TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());

                    OffsetAndMetadata offset = new OffsetAndMetadata(record.offset());


                    offsetsMap.put(topicPartition, offset);
                    savefile(offsetsMap,"/root/a.txt");
                    //提交任务到线程池处理
                    taskList.add(workerExecutorService.submit(new WorkThread(record.topic(), record.value(), semaphore)));
                }

                //判断kafka消息是否处理完成
                for(Future<String> task : taskList){
                    //阻塞，直到消息处理完成
                    task.get();
                }

                //同步向kafka集群中提交offset
                consumer.commitSync();
            } catch (Exception e) {
                logger.error("TopicPartitionThread run error.", e);
            } finally{
                //清空taskList列表
                taskList.clear();
            }
        }

        //关闭comsumer连接
        consumer.close();
    }







}
