package cn.getech.data.development.utils.sched;

import cn.getech.data.development.config.HiveMetaDataConfig;
import cn.getech.data.development.config.properties.BdpJobConfig;
import cn.getech.data.development.config.properties.DataDevelopmentConfig;
import cn.getech.data.development.constant.Constant;
import cn.getech.data.development.constant.DBTypeEnum;
import cn.getech.data.development.entity.TableInfo;
import cn.getech.data.development.entity.TablePartitionStatistics;
import cn.getech.data.development.entity.jobEntity.ScheduleJobLogEntity;
import cn.getech.data.development.service.ScheduleJobLogService;
import cn.getech.data.development.service.TableInfoService;
import cn.getech.data.development.service.TablePartitionStatisticsService;
import cn.getech.data.development.utils.HdfsUtil;
import cn.getech.data.development.utils.MysqlTableUtil;
import cn.hutool.core.collection.CollectionUtil;
import com.alibaba.fastjson.JSONObject;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;

import java.util.ArrayList;
import java.util.Date;
import java.util.List;

/***
 * 统计表分区信息
 */
@Slf4j
@Configuration
public class TablePartitionStatisticsTask {

    @Autowired
    private BdpJobConfig bdpJobConfig;
    @Autowired
    private ScheduleJobLogService scheduleJobLogService;
    @Autowired
    private TableInfoService tableInfoService;
    @Autowired
    private TablePartitionStatisticsService tablePartitionStatisticsService;
    @Autowired
    private HiveMetaDataConfig hiveMetaDataConfig;
    @Autowired
    private DataDevelopmentConfig developmentConfig;

    private static String dirHome = "/user/hive/warehouse/";

    public void tablePartitionStatiticsTask(String day) {
        if (!developmentConfig.getTableStatisticsOpen().equals("true")) {
            return;
        }
        log.info("开始统计分区");
        ScheduleJobLogEntity scheduleJobLogEntity = new ScheduleJobLogEntity();
        scheduleJobLogEntity.setBeanName("countPartitionRowsNumTask");
        scheduleJobLogEntity.setJobId(Long.valueOf(0));
        scheduleJobLogEntity.setParams("0");
        scheduleJobLogEntity.setCreateTime(new Date());
        scheduleJobLogEntity.setStatus(Constant.ScheduleStatus.NORMAL.getValue());
        long startTime = System.currentTimeMillis();
        try {
            List<JSONObject> tableList = getTablePartitionSizeList(day);
            // 获取存在分区字段的表
            List<JSONObject> partitionTableList = new ArrayList<>();
            List<TableInfo> partitionTables = tableInfoService.selectExitPartitionFieldTables();
            if (CollectionUtil.isNotEmpty(partitionTables)) {
                String tableName;
                String partitionTableName;
                for (TableInfo partitionTable : partitionTables) {
                    partitionTableName = partitionTable.getDbName() + "." + partitionTable.getTableName();
                    for (JSONObject tableObj : tableList) {
                        tableName = tableObj.getString("dbName") + "." + tableObj.getString("tableName");
                        if (partitionTableName.equals(tableName)) {
                            tableObj.put("tableId", partitionTable.getId());
                            tableObj.put("createPer", partitionTable.getCreatePer());
                            partitionTableList.add(tableObj);
                        }
                    }
                }
            }
            countTableSize(0, partitionTableList);
        } catch (Exception e) {
            log.error("统计分区字段数据失败！" + e);
            if (scheduleJobLogEntity.getStatus().equals(Constant.ScheduleStatus.NORMAL.getValue())) {
                long times = System.currentTimeMillis() - startTime;
                scheduleJobLogEntity.setError(e.getMessage());
                scheduleJobLogEntity.setTimes((int) times);
                scheduleJobLogService.save(scheduleJobLogEntity);
            }
        }
        if (scheduleJobLogEntity.getStatus().equals(Constant.ScheduleStatus.NORMAL.getValue())) {
            long times = System.currentTimeMillis() - startTime;
            scheduleJobLogEntity.setTimes((int) times);
            scheduleJobLogService.save(scheduleJobLogEntity);
        }
        log.info("定时统计分区数据完成！");
    }

    /**
     * 获取分区大小
     *
     * @return
     * @throws Exception
     */
    public List<JSONObject> getTablePartitionSizeList(String day) throws Exception {
        List<JSONObject> tableResult = new ArrayList<>();
        String dbName;
        DBTypeEnum[] dbTypeEnums = DBTypeEnum.values();
        HdfsUtil hdfsUtil = new HdfsUtil(bdpJobConfig);
        List<String> tableNameList;
        List<JSONObject> countResult;
        JSONObject tableObj;
        JSONObject partitionObj;
        for (DBTypeEnum db : dbTypeEnums) {
            try {
                dbName = db.getName();
                String dbDir = dirHome + dbName + ".db";
                tableNameList = hdfsUtil.dbTableNameList(dbDir); // 获取数据仓库列表
                if (tableNameList != null) {
                    for (String t : tableNameList) {
                        String tableDir = dbDir + "/" + t;
                        try {
                            MysqlTableUtil mysqlTableUtil = new MysqlTableUtil(hiveMetaDataConfig.getHiveMetastoreUrl(), hiveMetaDataConfig.getHiveMetastoreDb(), hiveMetaDataConfig.getHiveMetastoreUsername(), hiveMetaDataConfig.getHiveMetastorePassword());
                            List<String> partitionList = mysqlTableUtil.getAllPartition(dbName, t,day); // 获取分区列表
                            if (partitionList.size() > 0) {
                                tableObj = new JSONObject();
                                tableObj.put("dbName", dbName);
                                tableObj.put("tableName", t);

                                countResult = new ArrayList<>();
                                for (String partitionName : partitionList) {
                                    Long partitionSize = hdfsUtil.getTableOrDbSize(tableDir + "/" + partitionName); // 获得分区大小
                                    partitionSize = partitionSize == null ? 0l : partitionSize;
                                    Long fileNum = hdfsUtil.getFileNum(tableDir + "/" + partitionName); // 获得分区大小
                                    fileNum = fileNum == null ? 0l : fileNum;

                                    partitionObj = new JSONObject();
                                    partitionObj.put("partitionName", partitionName);
                                    partitionObj.put("size", partitionSize.doubleValue());
                                    partitionObj.put("fileNum", fileNum);
                                    countResult.add(partitionObj);
                                }
                                tableObj.put("partitionList", countResult);
                                tableResult.add(tableObj);
                            }
                        } catch (Exception e) {
                            log.error("统计数据仓库分区:获取分区大小出错：" + e.getMessage());
                        }
                    }
                }
            } catch (Exception e) {
                log.error("统计数据仓库分区:获取表出错：" + e.getMessage());
            }
        }
        if(null != hdfsUtil){
            hdfsUtil.close();
        }
        return tableResult;
    }

    public void countTableSize(Integer m, List<JSONObject> tableList) {
        int tableIndex = 0;
        int rowsNum = 0;
        List<TablePartitionStatistics> saveList = new ArrayList<>();
        TablePartitionStatistics tablePartitionStatistics;
        JSONObject tableObj;
        String[] keyValue;
        String partitionValue;

        try {
            for (tableIndex = m; tableIndex < tableList.size(); tableIndex++) {
                tableObj = tableList.get(tableIndex);
                List<JSONObject> partitionList = (List<JSONObject>) tableObj.get("partitionList");
                if (partitionList != null) {
                    for (JSONObject partitionObj : partitionList) {
                        try {
                            MysqlTableUtil mysqlTableUtil = new MysqlTableUtil(hiveMetaDataConfig.getHiveMetastoreUrl(), hiveMetaDataConfig.getHiveMetastoreDb(), hiveMetaDataConfig.getHiveMetastoreUsername(), hiveMetaDataConfig.getHiveMetastorePassword());
                            rowsNum = mysqlTableUtil.dataPreviewRowNumByPartitionName(tableObj.getString("dbName"), tableObj.getString("tableName"), partitionObj.getString("partitionName"));

                            keyValue = partitionObj.getString("partitionName").split("=");
                            if (keyValue.length < 2) {
                                continue;
                            }
                            partitionValue = partitionObj.getString("partitionName");
                            Double size = partitionObj.getDouble("size");
                            Long fileNum = partitionObj.getLong("fileNum");
                            tablePartitionStatistics = new TablePartitionStatistics();
                            tablePartitionStatistics.setTabIdleId(tableObj.getInteger("tableId"));
                            tablePartitionStatistics.setPartitionValue(partitionValue);
                            tablePartitionStatistics.setRowsNum(rowsNum);
                            tablePartitionStatistics.setSize(size);
                            tablePartitionStatistics.setFileNum(fileNum);
                            tablePartitionStatistics.setCountTime(new Date());
                            saveList.add(tablePartitionStatistics);
                        } catch (Exception e) {
                            String msg = e.getMessage();
                            if (msg == null) {
                                msg = e.toString();
                            }
                            log.error("出错信息：" + tableList.get(tableIndex).getString("dbName") + "." + tableList.get(tableIndex).getString("tableName") + ":" + e.getMessage());
                            log.error("获取分区的大小和行数出错：{}", msg);
                        }
                    }
                }
            }
            if (saveList.size() > 0) {
                tablePartitionStatisticsService.remove(new QueryWrapper<>());
                tablePartitionStatisticsService.saveBatch(saveList);
            }
        } catch (Exception e) {
            log.error("保存分区值统计出错：{}", e);
            log.error("保存分区值统计出错：" + e.getMessage());

            countTableSize(tableIndex + 1, tableList);
        }
    }
}
