package com.techsun.source;

import com.alibaba.fastjson.JSONObject;
import org.apache.flume.Context;
import org.apache.flume.PollableSource;
import org.apache.flume.conf.Configurable;
import org.apache.flume.event.SimpleEvent;
import org.apache.flume.instrumentation.SourceCounter;
import org.apache.flume.source.AbstractSource;
import org.apache.kudu.ColumnSchema;
import org.apache.kudu.Type;
import org.apache.kudu.client.*;
import org.apache.log4j.Logger;

import java.sql.Timestamp;
import java.util.HashMap;
import java.util.List;

import static org.apache.kudu.client.SessionConfiguration.FlushMode.MANUAL_FLUSH;

public class KuduSource extends AbstractSource implements Configurable, PollableSource {
    Logger logger = Logger.getLogger(KuduSource.class);

    private String kuduMaster;
    private String tableName;
    private String topicPrefix;
    // select 语句查询的时间范围, 默认查询一天前到现在的时间(86400), 单位秒
    private Long selectSecondBefore;
    // 轮询最小间隔, 默认 30 秒
    private Long pollIntervalSecond;
    // metric 的统计类
    private SourceCounter sourceCounter;
    // kudu 批量插入数据的size
    private int kuduBatchSize;
    // kudu 批量插入数据的计数器
    private int kuduBatchCounter = 0;


    @Override
    public void configure(Context context) {
        this.kuduMaster = context.getString("kuduMaster");
        this.topicPrefix = context.getString("topicPrefix", "") + "_";
        this.selectSecondBefore = context.getLong("selectSecondBefore", 86400L) * 1000;
        this.pollIntervalSecond = context.getLong("pollIntervalSecond", 30L);
        this.kuduBatchSize = context.getInteger("kuduBatchSize", 50);
        this.tableName = context.getString("tableName");

        if (this.sourceCounter == null) {
            this.sourceCounter = new SourceCounter(this.getName());
        }
    }

    @Override
    public synchronized void start() {
        this.sourceCounter.start();
        super.start();
    }


    @Override
    public Status process() {
        KuduClient kuduClient = null;
        KuduSession kuduSession = null;
        KuduTable kuduTable = null;
        try {
            // 初始化kudu相关类
            kuduClient = new KuduClient.KuduClientBuilder(kuduMaster).build();
            kuduTable = kuduClient.openTable(this.tableName);
            kuduSession = kuduClient.newSession();
            // 改为手动提交的模式
            kuduSession.setFlushMode(MANUAL_FLUSH);
            // kudu的时间戳类型值为16位的微秒级时间戳
            long currentTimeMillis = new Timestamp(System.currentTimeMillis()).getTime();
            // where action_time > 当前时间 - selectSecondBefore  and action_time < 当前时间 and use_flag IS null
            KuduPredicate predicate1 = KuduPredicate.newComparisonPredicate(kuduTable.getSchema().getColumn("action_time"),
                    KuduPredicate.ComparisonOp.GREATER_EQUAL, currentTimeMillis - this.selectSecondBefore * 1000);
            KuduPredicate predicate2 = KuduPredicate.newComparisonPredicate(kuduTable.getSchema().getColumn("action_time"),
                    KuduPredicate.ComparisonOp.LESS, currentTimeMillis * 1000);
            KuduPredicate predicate3 = KuduPredicate.newIsNullPredicate(kuduTable.getSchema().getColumn("use_flag"));
            KuduScanner.KuduScannerBuilder kuduScannerBuilder = kuduClient.newScannerBuilder(kuduTable);
            kuduScannerBuilder.addPredicate(predicate1).addPredicate(predicate2).addPredicate(predicate3);
            KuduScanner kuduScanner = kuduScannerBuilder.build();
            HashMap<String, String> header = new HashMap<>();
            // 遍历数据
            while (kuduScanner.hasMoreRows()) {
                RowResultIterator results = kuduScanner.nextRows();
                while (results.hasNext()) {
                    this.sourceCounter.incrementEventReceivedCount();
                    RowResult result = results.next();
                    Update update = kuduTable.newUpdate();
                    JSONObject jsonObject = new JSONObject();
                    // 构造发往kafka的数据
                    constructData(kuduTable, result, update, jsonObject);
                    String jsonString = jsonObject.toJSONString();
                    // 发送到channel
                    SimpleEvent event = new SimpleEvent();
                    event.setBody(jsonString.getBytes());
                    // 取出数据的 project 字段,拼接成发往kafka 的 topic
                    header.put("topic", this.topicPrefix + jsonObject.getString("project"));
                    event.setHeaders(header);
                    kuduSession.apply(update);
                    getChannelProcessor().processEvent(event);
                    kuduBatchCounter += 1;
                    // 达到阈值就 flush 入库, 没有达到也没事, finally 里面的 session.close() 方法会主动flush
                    if (kuduBatchCounter >= kuduBatchSize) {
                        List<OperationResponse> flush = kuduSession.flush();
                        for (OperationResponse response : flush) {
                            if (response.hasRowError()) {
                                logger.error(String.format("kudu flush data has error: [%s]", response.getRowError().getMessage()));
                            }
                        }
                        kuduBatchCounter = 0;
                    }
                    this.sourceCounter.incrementEventAcceptedCount();
                }
            }
            kuduScanner.close();
        } catch (KuduException e) {
            logger.error("process event has kudu exception: ", e);
            return Status.BACKOFF;
        } catch (Exception e) {
            logger.error("process event has exception: ", e);
            return Status.BACKOFF;
        } finally {
            // 关闭kudu资源
            try {
                if (kuduSession != null) kuduSession.close();
                if (kuduClient != null) {
                    kuduClient.close();
                }
            } catch (Exception e) {
                logger.error("close kudu connection has error: ", e);
            }
            // 休眠一会儿
            try {
                Thread.sleep(this.pollIntervalSecond * 1000);
            } catch (InterruptedException e) {
                logger.error("thread sleep has error: ", e);
            }
        }
        return Status.READY;
    }

    @Override
    public long getBackOffSleepIncrement() {
        return 0;
    }

    @Override
    public long getMaxBackOffSleepInterval() {
        return 0;
    }

    @Override
    public synchronized void stop() {
        super.stop();
        this.sourceCounter.stop();
    }

    private void constructData(KuduTable kuduTable, RowResult result, Update update, JSONObject jsonObject) {
        for (ColumnSchema column : kuduTable.getSchema().getColumns()) {
            String name = column.getName();
            if (result.isNull(name))
                continue;
            Type type = column.getType();
            switch (type) {
                case INT8:
                case INT16:
                    jsonObject.put(name, result.getInt(name));
                    if (column.isKey()) update.getRow().addInt(name, result.getInt(name));
                    break;
                case INT32:
                case INT64:
                    jsonObject.put(name, result.getLong(name));
                    if (column.isKey()) update.getRow().addLong(name, result.getLong(name));
                    break;
                case UNIXTIME_MICROS:
                    jsonObject.put(name, result.getLong(name) / 1000);
                    if (column.isKey()) update.getRow().addLong(name, result.getLong(name) / 1000);
                    break;
                case BINARY:
                    jsonObject.put(name, result.getBinary(name));
                    if (column.isKey()) update.getRow().addBinary(name, result.getBinary(name));
                    break;
                case STRING:
                    jsonObject.put(name, result.getString(name));
                    if (column.isKey()) update.getRow().addString(name, result.getString(name));
                    break;
                case BOOL:
                    jsonObject.put(name, result.getBoolean(name));
                    if (column.isKey()) update.getRow().addBoolean(name, result.getBoolean(name));
                    break;
                case FLOAT:
                    jsonObject.put(name, result.getFloat(name));
                    if (column.isKey()) update.getRow().addFloat(name, result.getFloat(name));
                    break;
                case DOUBLE:
                    jsonObject.put(name, result.getDouble(name));
                    if (column.isKey()) update.getRow().addDouble(name, result.getDouble(name));
                    break;
                case DECIMAL:
                    jsonObject.put(name, result.getDecimal(name));
                    if (column.isKey()) update.getRow().addDecimal(name, result.getDecimal(name));
                    break;
                default:
                    logger.error(String.format("table column type mismatch: [%s]%n", type));
            }
            // 设置该条数据的 use_flag
            update.getRow().addInt("use_flag", 1);
        }
    }
}
