package com.patsnap.data.npd.dw.etl.job;

import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.PropertyNamingStrategy;
import com.alibaba.fastjson.serializer.SerializeConfig;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
import com.amazonaws.services.s3.AmazonS3Client;
import com.google.common.collect.Sets;
import com.patsnap.data.common.s3.template.AWSS3Utils;
import com.patsnap.data.common.util.property.PropertiesUtils;
import com.patsnap.data.npd.dw.etl.utils.CommandUtils;
import com.patsnap.data.npd.dw.etl.utils.CredentialsUtils;
import com.patsnap.data.npd.dw.etl.utils.SparkUtils;
import com.patsnap.data.npd.dw.repository.DBOperator;
import com.patsnap.data.npd.dw.schema.base.BaseNonePatentDto;
import com.patsnap.one.etl.cdc.CdcRecord;
import com.patsnap.one.etl.table.meta.annotation.AsTargetTableMeta;
import com.patsnap.one.etl.tool.datasource.DataSourceCreator;
import com.patsnap.one.etl.tool.s3.S3Helper;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.spark.SparkFiles;
import org.apache.spark.sql.SparkSession;

import javax.sql.DataSource;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.*;
import java.util.stream.Collectors;


public abstract class AbstractSparkBatchJob implements Serializable {

    protected Map<String, SqlSessionFactory> sqlSessionFactoryMap = new ConcurrentHashMap<>();
    protected Map<String, DataSource> dataSourceMap = new ConcurrentHashMap<>();

    protected void laucher(String[] args) {
        Options options = CommandUtils.defaultOptions();
        addExtraOptions(options);
        options.addOption(OptionBuilder.withDescription("partition size").withLongOpt("partition").hasArg(true).isRequired(false).create("p"));
        final CommandLine cmd = CommandUtils.parseArgs(options, args);
        SparkSession spark = SparkUtils.initSpark(cmd);
        process(spark, cmd);
        close();
        spark.close();
    }

    protected void addExtraOptions(Options options) {
    }

    protected AmazonS3Client buildS3Client(Map<String, Object> configMap) {
        if (configMap.containsKey("s3_datasource_names")) {
            String s3SourceName = String.valueOf(configMap.get("s3_datasource_names"));
            if (configMap.containsKey(s3SourceName + "_s3_endpoint")
                    && configMap.containsKey(s3SourceName + "_s3_access_key")
                    && configMap.containsKey(s3SourceName + "_s3_secret_key")) {
                String s3Endpoint = String.valueOf(configMap.get(s3SourceName + "_s3_endpoint"));
                String s3AccessKey = String.valueOf(configMap.get(s3SourceName + "_s3_access_key"));
                String s3SecretKey = String.valueOf(configMap.get(s3SourceName + "_s3_secret_key"));

                return S3Helper.createS3Client(s3AccessKey, s3SecretKey, null, s3Endpoint);
            }
        }
        return null;
    }

    protected abstract void process(SparkSession spark, final CommandLine cmd);

    protected DBOperator initDBOperator(Map<String, String> configMap) {
        DataSourceCreator.assemblerJdbcDatasourcesFromConfigMap(configMap, this.dataSourceMap);
        DataSourceCreator.assemblerSqlSessionFactoriesFromConfigMap(configMap, this.sqlSessionFactoryMap, this.dataSourceMap);
        return SparkUtils.initDBOperator(this.sqlSessionFactoryMap);
    }

    protected DBOperator initDBOperator(CommandLine cmd) {
        Map<String, String> configMap = Arrays.stream(cmd.getOptions()).collect(Collectors.toMap(Option::getLongOpt, e -> e.getValues()[0]));
        DataSourceCreator.assemblerJdbcDatasourcesFromConfigMap(configMap, this.dataSourceMap);
        DataSourceCreator.assemblerSqlSessionFactoriesFromConfigMap(configMap, this.sqlSessionFactoryMap, this.dataSourceMap);
        return SparkUtils.initDBOperator(this.sqlSessionFactoryMap);
    }

    protected AmazonS3Client initImageS3Client(CommandLine cmd) {
        Map<String, String> configMap = Arrays.stream(cmd.getOptions()).collect(Collectors.toMap(Option::getLongOpt, e -> e.getValues()[0]));
        return S3Helper.createS3Client(
                configMap.get("image_source_s3_access_key"),
                configMap.get("image_source_s3_secret_key"),
                null,
                configMap.get("image_source_s3_endpoint"));
    }

    protected AmazonDynamoDBClient initCasDbClient(CommandLine cmd) {
        Map<String, String> configMap = Arrays.stream(cmd.getOptions()).collect(Collectors.toMap(Option::getLongOpt, e -> e.getValues()[0]));
        AmazonDynamoDBClient dbClient = null;
        String awsAccessKey = configMap.get(PropertiesUtils.AWS_DB_KEY_ID);
        String awsSecretKey = configMap.get(PropertiesUtils.AWS_DB_KEY);
        String dbEndpoint = configMap.get(PropertiesUtils.DB_ENDPOINT);
        if (StringUtils.isNotBlank(awsAccessKey) && StringUtils.isNotBlank(awsSecretKey)) {
            AWSCredentials dbCredentials = CredentialsUtils.getCredentials(awsAccessKey, awsSecretKey);
            dbClient = new AmazonDynamoDBClient(dbCredentials);
            dbClient.setRegion(Region.getRegion(Regions.US_EAST_2));
        }
        if (StringUtils.isNotBlank(dbEndpoint)) {
            dbClient.setEndpoint(dbEndpoint);
        }
        return dbClient;
    }

    protected ExecutorService getThreadPool(int thread) {
        ExecutorService pool = new ThreadPoolExecutor(thread, thread,
                0L, TimeUnit.MILLISECONDS,
                new LinkedBlockingQueue<Runnable>(thread), new ThreadPoolExecutor.CallerRunsPolicy());
        return pool;
    }

    protected CdcRecord toCdcRecord(String json, Class<? extends BaseNonePatentDto> clazz) {
        SerializeConfig config = new SerializeConfig();
        config.setPropertyNamingStrategy(PropertyNamingStrategy.SnakeCase);
        AsTargetTableMeta targetTableMeta = clazz.getAnnotation(AsTargetTableMeta.class);
        JSONObject jsonObject = StringUtils.isEmpty(json) ? null : JSONObject.parseObject(json);
        return CdcRecord.builder()
                .source(CdcRecord.Source.builder().table(targetTableMeta.tableName()).build())
                .after(jsonObject)
                .changeFields(jsonObject == null ? Sets.newHashSet() : jsonObject.keySet()).build();
    }


    protected SqlSessionFactory getSqlSessionFactoryByName(String dataSourceName) {
        return this.sqlSessionFactoryMap.get(dataSourceName);
    }


    protected void close() {

        for (DataSource dataSource : this.dataSourceMap.values()) {
            if (dataSource instanceof HikariDataSource) {
                ((HikariDataSource) dataSource).close();
            }
        }
    }


    public void upload(AmazonS3Client s3Client, File file, String target) throws Exception {
        if (s3Client != null) {
            target = AWSS3Utils.upload(s3Client, file, target, true);
            if (org.apache.commons.lang.StringUtils.isBlank(target)) {
                throw new Exception("upload " + target + " failed!");
            }
        } else {
            throw new Exception("s3client is null");
        }
    }

    public Map<String, String> convertMap(Map<String, Object> source) {
        Map<String, String> target = new HashMap<>();
        source.forEach((key, value) -> target.put(key, String.valueOf(value)));
        return target;
    }

    protected String parseFileInfoFromSparkSession(SparkSession spark) {
        String filePath = spark.sparkContext().getConf().get("spark.files");
        String fileName = filePath.substring(filePath.lastIndexOf("/") + 1);
        String sql;
        try {
            sql = FileUtils.readFileToString(new File(SparkFiles.get(fileName)), Charset.forName("utf-8"));
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        return sql;
    }
}
