package com.hub.realtime.resource.service.impl;


import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.hub.realtime.common.core.domain.model.ClusterInfo;
import com.hub.realtime.common.core.domain.model.LoginUser;
import com.hub.realtime.common.exception.UtilException;
import com.hub.realtime.common.utils.DateUtils;
import com.hub.realtime.common.utils.StringUtils;
import com.hub.realtime.common.utils.hadoop.HadoopUtil;
import com.hub.realtime.common.utils.hadoop.HdfsUtil;

import com.hub.realtime.framework.service.ServiceBase;
import com.hub.realtime.resource.domain.ResFlinkVersions;
import com.hub.realtime.resource.dto.input.ClusterInfoQueryParam;
import com.hub.realtime.resource.service.IResFlinkVersionsService;
import com.hub.realtime.system.service.ISysConfigService;
import com.streamxhub.streamx.common.util.HdfsUtils;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.hub.realtime.resource.mapper.ResClusterMapper;
import com.hub.realtime.resource.domain.ResCluster;
import com.hub.realtime.resource.service.IResClusterService;
import org.springframework.transaction.annotation.Transactional;

import static com.hub.realtime.common.constant.HdfsConstants.*;

/**
 * 集群管理表Service业务层处理
 *
 * @author wupeng
 * @date 2021-12-30
 */
@Service
@Slf4j
@Transactional
public class ResClusterServiceImpl extends ServiceBase<ResClusterMapper, ResCluster> implements IResClusterService {


    @Autowired
    IResFlinkVersionsService flinkVersionsService;

    @Autowired
    ISysConfigService configService;

    /**
     * 新增集群管理表
     *
     * @param resCluster 集群管理表
     * @return 结果
     */
    @Override
    public int saveInfo(ResCluster resCluster) {
        int res = -1;
        String keepFile = ".gitkeep";
        LoginUser loginUser = getLoginUser();
        if (clusterTest(resCluster)) {
            resCluster.setCreateSuccess(false);
            if (resCluster.getId() != null) {
                resCluster.setUpdateBy(loginUser.getUsername());
                resCluster.setUpdateTime(DateUtils.getNowDate());
                baseMapper.updateById(resCluster);
            } else {
                resCluster.setUserId(loginUser.getUserId());
                resCluster.setCreateBy(loginUser.getUsername());
                resCluster.setCreateTime(DateUtils.getNowDate());
                res = baseMapper.insert(resCluster);
            }
            /**
             * 上传文件
             */
            ClusterInfo clusterInfo = new ClusterInfo();
            clusterInfo.setHadoopConfDir(resCluster.getHadoopDir());
            clusterInfo.setKerberosConfDir(resCluster.getKererosDir());
            executorService.execute(() -> {
                //往hadoop集群上传flink
                try {
                    if (!HdfsUtil.exists(clusterInfo, HDFS_FLINK_HOME)) {
                        HdfsUtil.mkdirs(clusterInfo, HDFS_FLINK_HOME);
                    }
                    LambdaQueryWrapper<ResFlinkVersions> flinkVersionsLambdaQueryWrapper = new LambdaQueryWrapper<>();
                    flinkVersionsLambdaQueryWrapper.eq(ResFlinkVersions::getFlinkVersion, resCluster.getFlinkVersion());
                    ResFlinkVersions flinkVersions = flinkVersionsService.getBaseMapper().selectOne(flinkVersionsLambdaQueryWrapper);
                    String localPath = flinkVersions.getLocalPath();
                    String flinkHdfsHome = HDFS_FLINK_HOME.concat("/").concat(resCluster.getFlinkVersion());
                    if (!HdfsUtil.exists(clusterInfo, flinkHdfsHome)) {
                        log.info("======开始上传flink到hadoop集群上边=========");
                        HdfsUtil.upload(clusterInfo, localPath, flinkHdfsHome, false, false);
                        log.info("======flink到hadoop集群上边完成=========");
                    }
                    flinkVersions.setInUse(true);
                    flinkVersionsService.getBaseMapper().updateById(flinkVersions);
                    String flinkMainVersion = resCluster.getFlinkVersion().substring(0, 4);
                    //往集群上传shims
                    String shimRegex = "^flink-shims-(1.12|1.13|1.14)-(.*).jar$";
                    Pattern shimPattern = Pattern.compile(shimRegex, Pattern.CASE_INSENSITIVE | Pattern.DOTALL);
                    File localShimsFile = new File(getAppHome().concat("/shims"));
                    if (localShimsFile.exists()) {
                        String shimsHdfs = APP_SHIMS.concat("/flink").concat(flinkMainVersion);
                        if (!HdfsUtil.exists(clusterInfo, shimsHdfs)) {
                            HdfsUtil.mkdirs(clusterInfo, shimsHdfs);
                            File[] shims = localShimsFile.listFiles(pathname -> pathname.getName().matches(shimRegex));
                            for (File file : Objects.requireNonNull(shims)) {
                                Matcher matcher = shimPattern.matcher(file.getName());
                                while (matcher.find()) {
                                    String version = matcher.group(1);
                                    if (version.equals(flinkMainVersion)) {
                                        String hdfsFile = shimsHdfs.concat("/")
                                                .concat(file.getName());
                                        if (!HdfsUtil.exists(clusterInfo, hdfsFile) && !keepFile.equals(file.getName())) {
                                            log.info("======开始上传shims到hadoop集群上边=========");
                                            HdfsUtil.upload(clusterInfo, file.getAbsolutePath(), shimsHdfs, false, true);
                                            log.info("======shims到hadoop集群上边完成=========");
                                        }
                                        break;
                                    }
                                }
                            }
                        }
                    }

                    //往集群上传连接器
                    String connectorRegex = "^flink-connector-(1.12|1.13|1.14)-(.*).jar$";
                    Pattern connectorPattern = Pattern.compile(connectorRegex, Pattern.CASE_INSENSITIVE | Pattern.DOTALL);
                    File connectorFile = new File(getAppHome().concat("/connectors"));
                    if (connectorFile.exists()) {
                        String connectorHdfs = APP_CONNECTORS.concat("/flink").concat(flinkMainVersion);
                        if (!HdfsUtil.exists(clusterInfo, connectorHdfs)) {
                            HdfsUtil.mkdirs(clusterInfo, connectorHdfs);
                            File[] connectors = connectorFile.listFiles(pathname -> pathname.getName().matches(connectorRegex));
                            for (File file : Objects.requireNonNull(connectors)) {
                                Matcher matcher = connectorPattern.matcher(file.getName());
                                while (matcher.find()) {
                                    String version = matcher.group(1);
                                    if (version.equals(flinkMainVersion)) {
                                        String hdfsFile = connectorHdfs.concat("/")
                                                .concat(file.getName());
                                        if (!HdfsUtil.exists(clusterInfo, hdfsFile) && !keepFile.equals(file.getName())) {
                                            log.info("======开始上传connector到hadoop集群上边=========");
                                            HdfsUtil.upload(clusterInfo, file.getAbsolutePath(), connectorHdfs, false, true);
                                            log.info("======connector到hadoop集群上边完成=========");
                                        }
                                        break;
                                    }
                                }
                            }
                        }
                    }


                    //上传sql客户端
                    String sqlClientRegex = "^sqlclient-(1.12|1.13|1.14)-(.*).jar$";
                    Pattern sqlClientPattern = Pattern.compile(sqlClientRegex, Pattern.CASE_INSENSITIVE | Pattern.DOTALL);
                    File sqlClientFile = new File(getAppHome().concat("/sqlclients"));
                    if (sqlClientFile.exists()) {
                        String sqlClinetHdfs = APP_SQLCLIENTS.concat("/flink").concat(flinkMainVersion);
                        if (!HdfsUtil.exists(clusterInfo, sqlClinetHdfs)) {
                            HdfsUtil.mkdirs(clusterInfo, sqlClinetHdfs);
                            File[] sqlClientFiles = sqlClientFile.listFiles(pathname -> pathname.getName().matches(sqlClientRegex));
                            for (File file : Objects.requireNonNull(sqlClientFiles)) {
                                Matcher matcher = sqlClientPattern.matcher(file.getName());
                                while (matcher.find()) {
                                    String version = matcher.group(1);
                                    if (version.equals(flinkMainVersion)) {
                                        String hdfsFile = sqlClinetHdfs.concat("/")
                                                .concat(file.getName());
                                        if (!HdfsUtil.exists(clusterInfo, hdfsFile) && !keepFile.equals(file.getName())) {
                                            log.info("======开始上传sql客户端到hadoop集群上边=========");
                                            HdfsUtil.upload(clusterInfo, file.getAbsolutePath(), sqlClinetHdfs, false, true);
                                            log.info("======ql客户端到hadoop集群上边完成=========");
                                        }
                                        break;
                                    }
                                }
                            }
                        }
                    }

                    File plugins = new File(getAppHome().concat("/plugins"));
                    if (plugins.exists()) {
                        if (!HdfsUtil.exists(clusterInfo, APP_PLUGINS)) {
                            HdfsUtil.mkdirs(clusterInfo, APP_PLUGINS);
                        }
                        for (File file : Objects.requireNonNull(plugins.listFiles())) {
                            String plugin = APP_PLUGINS.concat("/").concat(file.getName());
                            if (!HdfsUtil.exists(clusterInfo, plugin) && !keepFile.equals(file.getName())) {
                                log.info("load plugin:{} to {}", file.getName(), APP_PLUGINS);
                                HdfsUtil.upload(clusterInfo, file.getAbsolutePath(), APP_PLUGINS, false, true);
                            }
                        }
                    }
                    resCluster.setCreateSuccess(true);
                    baseMapper.updateById(resCluster);
                } catch (Exception ex) {
                    baseMapper.deleteById(resCluster.getId());
                    HdfsUtil.delete(clusterInfo,HDFS_BASE_DIR);
                    log.error("创建集群失败："+ex.getMessage());
                    ex.printStackTrace();
                    //throw new UtilException(ex);
                }
                updateFlinkVersion();
            });
        } else {
            throw new UtilException("集群不可用不能修改或添加，请检查hadoop配置文件和yarn配置文件是否正确！");
        }
        return res;
    }


    /**
     * 批量删除集群管理表
     *
     * @param ids 需要删除的集群管理表主键
     * @return 结果
     */
    @Override
    public int deleteInfos(Long[] ids) {
        List<ResCluster> resClusters = baseMapper.selectBatchIds(Arrays.stream(ids).collect(Collectors.toList()));
        boolean hasInuse = false;
        List<ClusterInfo> clusterInfos = new ArrayList<>();
        if (resClusters != null && !resClusters.isEmpty()) {
            for (ResCluster cluster : resClusters) {
                if (Boolean.parseBoolean(cluster.getIsInUse())) {
                    hasInuse = true;
                    break;
                }
                ClusterInfo clusterInfo = new ClusterInfo();
                clusterInfo.setHadoopConfDir(cluster.getHadoopDir());
                clusterInfo.setKerberosConfDir(cluster.getKererosDir());
                clusterInfos.add(clusterInfo);
            }
        }
        if (hasInuse) {
            throw new UtilException("存在正在使用的集群，不允许删除，请选择未使用的集群进行删除！");
        }
        //清除集群缓存数据
        clusterInfos.forEach(HadoopUtil::clearClusterInfo);

        int res = delByIds(ids);
        updateFlinkVersion();
        return res;
    }


    /**
     * 修改flink版本信息
     */
    private void updateFlinkVersion() {
        //修改flink环境表中的使用状态
        List<ResCluster> currentList = baseMapper.selectList(new LambdaQueryWrapper<>());
        LambdaQueryWrapper<ResFlinkVersions> flinkVersionsLambdaQueryWrapper = new LambdaQueryWrapper<>();
        List<ResFlinkVersions> resFlinkVersions = new ArrayList<>();
        if (currentList != null && currentList.size() > 0) {
            List<String> currentVersions = new ArrayList<>();
            for (ResCluster cluster : currentList) {
                currentVersions.add(cluster.getFlinkVersion());
            }
            flinkVersionsLambdaQueryWrapper.notIn(ResFlinkVersions::getFlinkVersion, currentVersions);
        }
        resFlinkVersions = flinkVersionsService.getBaseMapper().selectList(flinkVersionsLambdaQueryWrapper);
        if (!resFlinkVersions.isEmpty()) {
            resFlinkVersions.forEach(d -> {
                d.setInUse(false);
                flinkVersionsService.getBaseMapper().updateById(d);
            });
        }
    }

    /**
     * 测试hadoop集群是否可用
     *
     * @param resCluster
     * @return
     */
    @Override
    public boolean clusterTest(ResCluster resCluster) {
        ClusterInfo clusterInfo = new ClusterInfo();
        clusterInfo.setHadoopConfDir(resCluster.getHadoopDir());
        clusterInfo.setKerberosConfDir(resCluster.getKererosDir());
        HadoopUtil.clearClusterInfo(clusterInfo);
        try {
            String defaultFS = HdfsUtil.getDefaultFS(clusterInfo);
            String rmWebAppURL = HadoopUtil.getRMWebAppURL(clusterInfo, true);
            return StringUtils.isNotEmpty(defaultFS) && StringUtils.isNotEmpty(rmWebAppURL);
        } catch (Exception ex) {
            throw new UtilException("集群不能连通，请检查配置文件是否正确：" + ex.getMessage());
        }
    }

    @Override
    public List<ResCluster> getCurrentClusterInfos() {
        LambdaQueryWrapper<ResCluster> wrapper =new LambdaQueryWrapper<>(); //createCommonWrapper(ResCluster::getUserId, ResCluster::getCreateTime);
        wrapper.eq(ResCluster::getIsEnable, Boolean.toString(true))
                .eq(ResCluster::getCreateSuccess, true);
        return baseMapper.selectList(wrapper);
    }

    @Override
    public String getAppHome() {
        return configService.selectConfigByKey("sys:app:home");
    }

    @Override
    public Page<ResCluster> list(ClusterInfoQueryParam param) {
        LambdaQueryWrapper<ResCluster> wrapper = new LambdaQueryWrapper<>();//createCommonWrapper(ResCluster::getUserId, ResCluster::getCreateTime);
        if (StringUtils.isNotEmpty(param.getClusterName())) {
            wrapper.like(ResCluster::getClusterName, param.getClusterName());
        }
        wrapper.orderByDesc(ResCluster::getCreateTime);
        return getPageList(param, wrapper);
    }
}
