package com.learn.util;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.TableNotExistException;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import org.apache.flink.types.Row;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Iterator;
import java.util.List;

public class RowUtil {

    private static final Logger logger = LoggerFactory.getLogger(RowUtil.class);

    public static Row getRowFromJson(List<FieldSchema> metaData, JSONObject json) {

        Row row = new Row(metaData.size());

        for (int i = 0; i < metaData.size(); i++) {

            //todo json.get(metaData.get(i).getName()) json根据元数据内容来拿json数据的内容，他这个元数据的字段是有顺序的
            //todo json.get(metaData.get(i).getName()) json根据元数据内容来拿json数据的内容，他这个元数据的字段是有顺序的
            //todo json.get(metaData.get(i).getName()) json根据元数据内容来拿json数据的内容，他这个元数据的字段是有顺序的
            row.setField(i, json.get(metaData.get(i).getName()));

        }
        return row;
    }

    /**
     * @param metaData
     * @return
     *
     * todo 这个方法的作用是传入hive的元数据，通过判断hive那边获取到的字段的类型，转换成java对应的类型
     *      返回 TypeInformation 这个类型是作为程序中java的数据类型，用来做切割列所用的方法
     *
     */
    public static TypeInformation[] getTypeInfoArray(List<FieldSchema> metaData) {

        TypeInformation[] types = new TypeInformation[metaData.size()];

        for (int i = 0; i < metaData.size(); i++) {

            String type = metaData.get(i).getType();

            if("string".equalsIgnoreCase(type)){
                types[i] = Types.STRING;
            }else if("bigint".equalsIgnoreCase(type)){
                types[i] = Types.LONG;
            }else if("tinyint".equalsIgnoreCase(type)) {
                types[i] = Types.BYTE;
            }else if("smallint".equalsIgnoreCase(type)){
                types[i] = Types.SHORT;
            }else if("double".equalsIgnoreCase(type)){
                types[i] = Types.DOUBLE;
            }else if("timestamp".equalsIgnoreCase(type)){
                types[i] = Types.LOCAL_DATE_TIME;
            }else {
                types[i] = Types.STRING;
            }
        }
        return types;
    }


    /**
     *
     * @param metaData
     * @return
     * todo 这个方法是获取列名
     * todo 这个方法是获取列名
     * todo 这个方法是获取列名
     */
    public static String[] getColumnArray(List<FieldSchema> metaData) {

        String[] fieldNames = new String[metaData.size()];

        for (int i = 0; i < metaData.size(); i++) {

            String fieldName = metaData.get(i).getName();

            fieldNames[i] = fieldName;

        }
        return fieldNames;
    }


    public static TableResult sink2Hive(StreamTableEnvironment tableEnv, SingleOutputStreamOperator<Row> rows, String tableName) throws TableNotExistException {
        return tableEnv.fromDataStream(rows).executeInsert(tableName);
    }


    public static void setTblproperties(StreamTableEnvironment tableEnv,String tableName) {

        String json = "{\"tblproperties\": {\n" +
                "      \"sink.rolling-policy.file-size\": \"128MB\",\n" +
                "      \"sink.rolling-policy.rollover-interval\": \"30 min\",\n" +
                "      \"sink.rolling-policy.check-interval\": \"10 min\",\n" +
                "      \"auto-compaction\": \"true\",\n" +
                "      \"sink.partition-commit.policy.kind\": \"metastore,success-file\",\n" +
                "      \"sink.partition-commit.success-file.name\": \"_SUCCESS\"\n" +
                "    }\n" +
                "}";

        JSONObject jsonObject = JSON.parseObject(json);

        JSONObject tblproperties = jsonObject.getJSONObject("tblproperties");

        Iterator<String> iterator = tblproperties.keySet().iterator();

        while (iterator.hasNext()){

            String propName = iterator.next();

            String value = tblproperties.getString(propName);

            String sql = String.format("alter table %s set TBLPROPERTIES('%s'='%s')", tableName, propName, value);

            logger.info("执行： {}", sql);

            tableEnv.executeSql(sql);
        }

    }

    /**
     *  @param tableEnv
     * @param defaultDatabase
     * todo 设置hive连接
     * todo 设置hive连接
     * @param mode
     */
    public static void setHiveConnect(StreamTableEnvironment tableEnv, String defaultDatabase, String mode){
        // 使用Hive的sql方言
        tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
        String name = "myhive";
        HiveCatalog hive;

        String serviceConf = "/usr/local/service/hive/conf";

        //todo 根据传入main方法的模式，传入的是service，则为集群运行模式
        if ("service".equals(mode)){
            hive = new HiveCatalog(name, defaultDatabase, serviceConf);
        }else {
            hive = new HiveCatalog(name, defaultDatabase, "src/main/resources");
        }


        tableEnv.registerCatalog("myhive", hive);
        tableEnv.useCatalog("myhive");
    }

    public static void setHiveConnect(TableEnvironment tEnv, String defaultDatabase, String mode){

        String name = "myhive";      // Catalog名称，定义一个唯一的名称表示
        String hiveConfDir = "src/main/resources";  // hive-site.xml路径
//        String serviceConf = "/usr/local/service/hive/conf";
        String serviceConf = "/etc/hive/conf.cloudera.hive/";

        HiveCatalog hive;

        //todo 根据传入main方法的模式，传入的是service，则为集群运行模式
        if ("service".equals(mode)){
            hive = new HiveCatalog(name, defaultDatabase, serviceConf);
        }else {
            hive = new HiveCatalog(name, defaultDatabase, hiveConfDir);
        }

        tEnv.registerCatalog("myhive", hive);
        tEnv.useCatalog("myhive");
    }

    public static void setHiveConnectCloudLink(TableEnvironment tEnv, String defaultDatabase, String mode){

        String name = "myhive";      // Catalog名称，定义一个唯一的名称表示
        String hiveConfDir = "src/main/resources";  // hive-site.xml路径
        String serviceConf = "/opt/usdp-srv/srv/udp/2.0.0.0/hive/conf";

        HiveCatalog hive;

        //todo 根据传入main方法的模式，传入的是service，则为集群运行模式
        if ("service".equals(mode)){
            hive = new HiveCatalog(name, defaultDatabase, serviceConf);
        }else {
            hive = new HiveCatalog(name, defaultDatabase, hiveConfDir);
        }


        tEnv.registerCatalog("myhive", hive);
        tEnv.useCatalog("myhive");
    }

}
