package com.staryea.handler;

import com.staryea.hive.HiveConnectFactory;
import com.staryea.hive.HiveDAO;
import com.staryea.nadp.util.kafka.KafkaSender;

import java.io.File;
import java.io.FileInputStream;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicLong;

/**
 * create by gonghl
 * 2018/11/14
 */
public class HiveToKafkaHandler {
    private Properties properties = null;
    private KafkaSender kafkaSender = null;
    private ExecutorService executorService = null;
    private HiveDAO hiveDAO = null;

    public void init() throws Exception {
        String path = System.getProperty("user.dir");
        properties = new Properties();
        path += "/config.properties";
        properties.load(new FileInputStream(new File(path)));

        HiveConnectFactory.setDriverClassName(properties.getProperty("driver"));
        HiveConnectFactory.setUrl(properties.getProperty("url"));
        HiveConnectFactory.setUser(properties.getProperty("user"));
        HiveConnectFactory.setPassword(properties.getProperty("password"));
        kafkaSender = new KafkaSender(properties.getProperty("brokers"), null, properties.getProperty("topic"));
        hiveDAO = new HiveDAO();
        int maxThreadNumber = Integer.parseInt(properties.getProperty("maxThreadNumber"));
        executorService = Executors.newFixedThreadPool(maxThreadNumber);
    }

    public void startTransferData() {
        startSingeTransferData();
    }

    public void startSingeTransferData() {
        long queryLimitNumber = Long.parseLong(properties.getProperty("queryLimitNumber"));
        String index = properties.getProperty("index");
        String sql = properties.getProperty("sql");
        long total = Long.parseLong(properties.getProperty("total"));
        String logLevel = properties.getProperty("logLevel");

        System.out.println("开始查询总数:" + total);

        AtomicLong counter = new AtomicLong(0);
        do {
            long start = counter.getAndAdd(queryLimitNumber);
            long end = counter.get();
            String limitSql = sql + " and "
                    + index + " >= " + start + " and " + index + " < " + end;

            if (logLevel.equals("debug")) {
                System.out.println("querySql:" + limitSql);
            }
            List<Byte[]> list = hiveDAO.queryList(limitSql);

            int count = 0;
            if (list != null) {
                count = list.size();
            }
            System.out.println("listSize ---------> " + count);
            sendToKafka(list);
            float complete = end * 100f / total;
            complete = complete > 100 ? 100.00f : complete;
            System.out.println("total:" + total
                    + ",current:" + start + " ----> " + end + " complete: " + String.format("%.2f", complete) + "%");

//            executorService.submit(() -> {
//                if (logLevel.equals("debug")) {
//                    System.out.println("querySql:" + limitSql);
//                }
//                List<Byte[]> list = hiveDAO.queryList(limitSql,areaCode);
//                System.out.println("listSize ---------> " + list.size());
//                sendToKafka(list);
//                float complete = end * 100f / total;
//                complete = complete > 100 ? 100.00f : complete;
//                System.out.println("areaCode:" + areaCode + " total:" + total
//                        + ",current:" + start + " ----> " + end + " complete: " + String.format("%.2f",complete) + "%");
//            });
        } while (counter.get() < total);
    }

    public synchronized void sendToKafka(List list) {
        for (int i = 0; list != null && i < list.size(); i++) {
            int s = i * 1000;
            int e = s + 1000;
            if (e < list.size()) {
                kafkaSender.work(list.subList(s, e));
            } else {
                kafkaSender.work(list.subList(s, list.size()));
                break;
            }
        }
    }


}
