package com.zshield.consumer;

import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.zshield.config.KafkaConfig;
import com.zshield.run.KafkaPreCompute;
import com.zshield.util.ESclient;
import com.zshield.util.Etime;
import com.zshield.util.KafkaClusterStatus;
import com.zshield.util.MarkUpgrade;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.log4j.Logger;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.xcontent.XContentType;

import java.time.LocalDateTime;
import java.util.Collections;
import java.util.Map;

public class Consumer implements Runnable {
    private JsonParser jp;
    private RestHighLevelClient highClient;
    private KafkaConsumer<String, String> consumer;
    private static boolean is_es_init = false;
    private static Logger logger = Logger.getLogger(Consumer.class);

    public Consumer() {
        jp = new JsonParser();
        consumer = new KafkaConsumer<>(KafkaConfig.getConsumerConfig());
        init_es();
    }

    public void init_es() {
        highClient = ESclient.getHighClient();

        if (is_es_init) {
            return;
        }
        is_es_init = true;

        String template = "{\"template\":\"datamap_precompute*\",\"order\":10,\"settings\":{\"index\":{\"refresh_interval\":\"5s\",\"number_of_replicas\":\"" + KafkaPreCompute.es_number_of_replica + "\"}},\"mappings\":{\"_default_\":{\"dynamic_templates\":[{\"string_fields\":{\"match\":\"*\",\"match_mapping_type\":\"string\",\"mapping\":{\"type\":\"text\",\"fields\":{\"raw\":{\"type\":\"keyword\",\"ignore_above\":256}}}}}]}}}";

        while (true) {
            String pathAll = "/_template/" + KafkaConfig.es_tmpl;
            try {
                highClient.getLowLevelClient().performRequest("DELETE", pathAll);
            } catch (Throwable e) {
                logger.error("[delete template exception] [The reason for error {" + e + "}]");
                KafkaConfig.printErrorLog(logger,e);
            }

            try {
                Map<String, String> params = Collections.emptyMap();
                HttpEntity entity = new StringEntity(template, ContentType.APPLICATION_JSON);
                Response resp = highClient.getLowLevelClient().performRequest("PUT", pathAll, params, entity);
            } catch (Throwable e) {
                logger.error("[put template exception] [The reason for error {" + e + "}]");
                KafkaConfig.printErrorLog(logger,e);
                try {
                    Thread.sleep(5000);
                } catch (Throwable e1) {

                }
                continue;
            }
            break;
        }
    }

    public UpdateRequest getUpdateRequest(String key, String value) {
        JsonObject obj = jp.parse(value).getAsJsonObject();
        String time = obj.get("TIME").getAsString();
        LocalDateTime dateTime = LocalDateTime.parse(time.substring(0,19));
        String index;
        String fileSearch = key.substring(0, 11);
        if (fileSearch.equals("FilesysTree")) {
            index = "datamap_precompute_tree" + "-" + dateTime.format(Etime.DATE_FORMATTER_INDEX);
        } else {
            index = "datamap_precompute" + "-" + dateTime.format(Etime.DATE_FORMATTER_INDEX);
        }
        UpdateRequest req = new UpdateRequest(index, KafkaConfig.es_type, key);
        req.doc(value, XContentType.JSON);
        req.docAsUpsert(true);
        return req;
    }

    @Override
    public void run() {
        long conStartTime = System.currentTimeMillis();
        long conEndTime;
        long bulkStartTime = System.currentTimeMillis();
        long bulkEndTime;
        long startUpadeTime = System.currentTimeMillis();

        consumer.subscribe(Collections.singletonList(KafkaPreCompute.medium_topic));
        BulkRequest request = new BulkRequest();

        while (true) {
            if (KafkaPreCompute.is_upgrade) {
                long elapseTime = (System.currentTimeMillis() - startUpadeTime) / 60000;
                if (elapseTime > 30) {
                    logger.info("[all data processing of this upgrade has been completed]");
                    KafkaClusterStatus.deleteTopic();
                    MarkUpgrade mark = new MarkUpgrade();
                    mark.createFile();
                    System.exit(0);
                }
            }

            try {
                ConsumerRecords<String, String> records = consumer.poll(60 * 1000);
                for (ConsumerRecord<String, String> record : records) {
                    UpdateRequest req = getUpdateRequest(record.key(), record.value());
                    request.add(req);
                }
                int retryTimes = 1;
                conEndTime = System.currentTimeMillis();
                if ((request.numberOfActions() >= KafkaConfig.BULK_SIZE) || ((conEndTime - conStartTime) >= KafkaConfig.BULK_INTERVAL)) {
                    if (request.numberOfActions() > 0) {
                        startUpadeTime = System.currentTimeMillis();
                        while (true) {
                            try {
                                highClient.bulk(request);
                                bulkEndTime = System.currentTimeMillis();
                                consumer.commitSync();
                                logger.info("[Consumer calculate speed] [Count:" + request.numberOfActions() + "|Time:" + String.format("%.2f", (bulkEndTime - bulkStartTime) / 1000.0) + "s|Speed:" + String.format("%.2f", (request.numberOfActions() * 1000.0 / (bulkEndTime - bulkStartTime))) + "]");
                                break;
                            } catch (Exception e) {
                                logger.error("[Bulk to es exception or consumer commit exception] [The reason for error {" + e + "}]");
                                KafkaConfig.printErrorLog(logger,e);
                                logger.error("[retry " + retryTimes + " times because of exception]");
                                try {
                                    Thread.sleep(10000 * retryTimes);
                                    retryTimes++;
                                    if (retryTimes == 4) {
                                        retryTimes = 1;
                                        break;
                                    }
                                } catch (Exception e1) {
                                    logger.error("[thread sleep exception] [The reason for error {" + e + "}]");
                                }
                            }

                        }
                        request = new BulkRequest();
                        bulkStartTime = System.currentTimeMillis();
                        conStartTime = System.currentTimeMillis();
                    }
                }
            } catch (Throwable e) {
                KafkaConfig.printErrorLog(logger,e);
            }
        }
    }
}
