package com.gukun.springboot.tdengine.service.backuprecover.backup;

import cn.hutool.core.date.DateUtil;
import com.gukun.springboot.tdengine.domain.backuprecover.BackupRecoverTaskEntity;
import com.gukun.springboot.tdengine.service.backuprecover.entity.Table;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.RateLimiter;

import java.io.File;
import java.io.IOException;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;

/**
 * @author zhang
 */
public class BackupDbDataTask implements Runnable {
    private static final String SELECT_DATA_BY_TIME = "SELECT * FROM %s.%s where _c0 >= %s and _c0 <= %s";
    private final BackupRecoverTaskEntity task;
    private final HikariDataSource tdEngineConnPool;
    private final BlockingQueue<Table> tableNameQueue;
    private final String finalFolder;
    private final AtomicLong dataNum;
    public BackupDbDataTask(BlockingQueue<Table> tableNameQueue, BackupRecoverTaskEntity task, String finalFolder, AtomicLong dataNum, HikariDataSource tdEngineConnPool) {
        this.task = task;
        this.tdEngineConnPool = tdEngineConnPool;
        this.tableNameQueue = tableNameQueue;
        this.finalFolder = finalFolder;
        this.dataNum = dataNum;
    }

    @Override
    public void run() {
        String dbName = task.getDbName();
        long startTime = DateUtil.parse(task.getDataStartTime(), "yyyy-MM-dd HH:mm:ss").getTime();
        long endTime = DateUtil.parse(task.getDataEndTime(), "yyyy-MM-dd HH:mm:ss").getTime();
        ResultSet resultSet = null;
        Statement statement = null;
        GenericRecord record = null;
        List<GenericRecord> buffer = new ArrayList<>(2000);
        long count = 0;
        RateLimiter rateLimiter = RateLimiter.create(600000);
        try(Connection connection = tdEngineConnPool.getConnection()){
            statement = connection.createStatement();
            statement.setFetchSize(3000);
            while(true){
                Table table = tableNameQueue.poll(100, TimeUnit.MILLISECONDS);
                if(table == null && tableNameQueue.isEmpty()){
                    break;
                }
                if(table == null){
                    continue;
                }
                String tableName = table.getTableName();
                int tableIndex = table.getIndex();
                String stableName = table.getStableName();
                String selectSql = String.format(SELECT_DATA_BY_TIME, dbName, tableName, startTime, endTime);
                Schema schema = getSchema(table);
                try (DataFileWriter<GenericRecord> dataFileWriter = createFile(schema,tableIndex,table,dbName)) {
                    resultSet = statement.executeQuery(selectSql);
                    while (resultSet.next()) {
                        rateLimiter.acquire();
                        record = new GenericData.Record(schema);
                        record.put("ts", resultSet.getLong("ts"));
                        record.put("v", resultSet.getString("v"));
                        record.put("stable", stableName);
                        buffer.add(record);
                        if (buffer.size() >= 10000) {
                            for(GenericRecord genericRecord : buffer){
                                dataFileWriter.append(genericRecord);
                                count++;
                            }
                            buffer.clear();
                        }
                    }
                    if (!buffer.isEmpty()) {
                        for(GenericRecord genericRecord : buffer){
                            dataFileWriter.append(genericRecord);
                            count++;
                        }
                        dataFileWriter.flush();
                        buffer.clear();
                    }
                    resultSet.close();
                }finally {
                    if (resultSet != null) {
                        resultSet.close();
                    }
                }
            }
        }catch (Exception e){
            e.printStackTrace();
            throw new RuntimeException(e);
        }finally {
            dataNum.getAndAdd(count);
            if(statement!=null){
                try {
                    statement.close();
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }

    /**
     * 获取表格对应avro Schema
     */
    public Schema getSchema(Table table) {
        SchemaBuilder.RecordBuilder<Schema> recordBuilder = SchemaBuilder.record(table.getTableName()).namespace("com.sciyon");
        SchemaBuilder.FieldAssembler<Schema> fieldAssembler = recordBuilder.fields();
        fieldAssembler.name("ts").type().longType().noDefault();
        fieldAssembler.name("v").type(Schema.Type.STRING.getName()).noDefault();
        fieldAssembler.name("stable").type().stringType().stringDefault(table.getStableName());
        return fieldAssembler.endRecord();
    }

    public DataFileWriter<GenericRecord> createFile(Schema schema,int tableIndex, Table table, String dbName) throws IOException {
        int folderIndex = tableIndex / 20000;
        String fileName = finalFolder + "/"+dbName+"_"+folderIndex + "/" + table.getTableName() + ".avro";
        File file = new File(fileName);
        if (!file.getParentFile().exists()) {
            file.getParentFile().mkdirs();
        }
        return createDataFileWriter(file,schema);
    }

    public DataFileWriter<GenericRecord> createDataFileWriter(File file,Schema schema) throws IOException {
        // 创建DatumWriter，指定你的生成类
        DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(new GenericDatumWriter<>(schema));
        dataFileWriter.setCodec(CodecFactory.snappyCodec());
        dataFileWriter.create(schema,file);
        return dataFileWriter;
    }
}
