package com.qingcloud.df.executor.cluster;

import com.google.common.collect.Maps;
import com.qingcloud.base.constant.SystemConstant;
import com.qingcloud.base.exception.WamingException;
import com.qingcloud.df.sdk.component.config.ExecutorConfig;
import com.qingcloud.df.sdk.component.context.CompInstContext;
import com.qingcloud.df.sdk.component.context.ConfigManager;
import com.qingcloud.df.sdk.component.enums.EnvEnum;
import com.qingcloud.df.sdk.component.message.ExecutorMetrics;
import com.qingcloud.df.sdk.component.message.NodeInfo;
import com.qingcloud.df.sdk.component.message.Ping;
import com.qingcloud.df.sdk.component.message.Pong;
import com.qingcloud.df.sdk.component.message.request.InstanceStartRequest;
import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;

import javax.annotation.Resource;
import java.util.*;

@Slf4j
@Component
public class ServerService {
    private Logger logger =  LoggerFactory.getLogger(ServerService.class);
    @Resource
    private ServerChecker serverChecker;
    /**
     * Node 节点向cluster 注册
     * @param nodeInfo
     */
    public void nodeRegister(NodeInfo nodeInfo) {
        ClusterManager.registerNode(nodeInfo.getAkkaAddress(),nodeInfo);

        ExecutorMetrics metrics = nodeInfo.getExecutorMetrics();
        if(metrics!=null && metrics.getJobList()!=null){
            List<Integer> jobList  = metrics.getJobList();
            for (Integer  jobId: jobList){
                ClusterManager.getInstanceByJobId(jobId);
            }
        }
        //更新数据库记录
        updataRecord();
        ///////////清空所有作业信息
        ///////////重新分配作业信息
        logger.debug("Register node success.  nodeAddress={}",nodeInfo.getAkkaAddress());
    }



    public void nodeHeartbeatCheck(NodeInfo nodeInfo) {
        String nodeId = nodeInfo.getAkkaAddress();

        serverChecker.submit(nodeInfo);
        //检查节点是否正常 未找到节点直接注册
        if(!ClusterManager.getNodeMap().containsKey(nodeId)){
            nodeRegister(nodeInfo);
        } else {//刷新node节点
            ClusterManager.getNodeById(nodeId).refresh(nodeInfo);
        }
    }

    /**
     * 所有节点的心跳数据批量处理   TODO
     * @param nodeInfos
     */
    @Transactional(rollbackFor = Exception.class)
    public void batchHeartbeat(List<NodeInfo> nodeInfos) {

        for (NodeInfo nodeInfo : nodeInfos) {
            logger.debug("node info ---->"+nodeInfo);
            //检查节点的作业信息于ClusterManager维护的作业信息是否一致
            List<Integer> jobList = nodeInfo.getExecutorMetrics().getJobList();
            Map<Integer, List<CompInstContext>> map = ClusterManager.getJobByNodeId(nodeInfo.getAkkaAddress());
            if( map!=null ){
                List<Integer> mapList =  new ArrayList<>(map.keySet());
                boolean isEquals=true;
                for(Integer jobId : mapList){
                    if(!jobList.contains(jobId)){
                        isEquals=false;
                        break;
                    }
                }
                if(!isEquals){  //节点与集群节点Job不一致，重新分配
                    buildJobConfig(nodeInfo.getAkkaAddress(),jobList);
                }
            }
        }
    }

    /**
     * 集群中所有未分配JOB
     */
    public static Map<Integer,List<CompInstContext>> getUnAssignedJobs(String env){
        Map<Integer,List<CompInstContext>> unAssignMap =  Maps.newConcurrentMap();
        Map<Integer,CompInstContext> allJobs = null;
        //集群中所有JOB
        if(env.equals(EnvEnum.DEV.name())) {
            allJobs = ConfigManager.getDevConfig();
        } else{
            allJobs = ConfigManager.getProConfig();
        }
        if(allJobs!=null && allJobs.size()>0){
            List<CompInstContext> list = null;
            for(Map.Entry<Integer,CompInstContext> jobs: allJobs.entrySet()){
                Integer jobId  = jobs.getValue().getJobId();
                if(!ClusterManager.hasAssigned(jobId)){
                    if(unAssignMap.get(jobId)!=null) {
                        list = unAssignMap.get(jobId);
                        list.add(jobs.getValue());
                        unAssignMap.put(jobId, list);
                    }else{
                        list=new ArrayList<>();
                        list.add(jobs.getValue());
                        unAssignMap.put(jobId, list);
                    }
                }
            }
        }
        return unAssignMap;
    }

    /**
     * 执行器主动申请拉取server端未分配的Job ，并确保已经分配给其他节点的Job从该执行器端删除
     * @param nodeId
     * @param jobIds
     * @return
     * @throws Exception
     */
    public List<InstanceStartRequest> buildJobConfig(String nodeId, List<Integer> jobIds)  {

        NodeInfo nodeInfo = ClusterManager.getNodeById(nodeId);
        if(nodeInfo==null) {
            throw  new WamingException("node "+nodeId+" has not register,please register");
        }
        List<InstanceStartRequest> waitingAssignList =  new ArrayList<>();
        //执行器上已经存在的Job，只要没有分配给别的节点，继续保持，并更新集群状态，如果已经分配给其他节点，需要从当前节点中删除。
        if(jobIds!=null && jobIds.size()>0){
            for(Integer jobId: jobIds){
                List<CompInstContext> contexts = ConfigManager.loadByJobId(jobId,nodeInfo.getEnv());
                //未分配给其他节点，继续保持在该节点
                if(!ClusterManager.hasAssigned(jobId,nodeId) && contexts!=null){
                    for(CompInstContext context : contexts){
                        InstanceStartRequest request =  new InstanceStartRequest();
                        String objectName = String.format(SystemConstant.JOB_CONFIG_OBJECTNAME,context.getEnv(),context.getId().toString(),context.getSoftwareName());
                        request.setConfigPath(objectName);
                        request.setJobId(context.getJobId());
                        request.setInstanceId(context.getId());
                        request.setEnv(context.getEnv());
                        waitingAssignList.add(request);
                    }
                }
            }
        }

        //分配任务继续分配，直到容量满
        Map<Integer,List<CompInstContext>> unAssignMap =  getUnAssignedJobs(nodeInfo.getEnv());
        if(unAssignMap!=null && unAssignMap.size()>0){
            for(Map.Entry<Integer,List<CompInstContext>> jobs: unAssignMap.entrySet()){
                if(nodeInfo.isOverload()){//已经没有容量，不能在接收任务
                    break;
                }
                for(CompInstContext context : jobs.getValue()){
                    InstanceStartRequest request =  new InstanceStartRequest();
                    String objectName = String.format(SystemConstant.JOB_CONFIG_OBJECTNAME,context.getEnv(),context.getId().toString(),context.getSoftwareName());
                    request.setConfigPath(objectName);
                    request.setJobId(context.getJobId());
                    request.setInstanceId(context.getId());
                    request.setEnv(context.getEnv());
                    waitingAssignList.add(request);
                }
            }
        }
        return waitingAssignList;
    }

    public Pong executorPing(Ping ping){
        String nodeId = ping.getNodeId();
        Pong pong = new Pong();
        pong.setKnownNode(false);
        NodeInfo nodeInfo = ClusterManager.getNodeById(nodeId);
        if(nodeInfo!=null){
            pong.setKnownNode(true);
        }
        pong.setNodeId(ExecutorConfig.getServerAddress());//server 地址
        return pong;
    }

    /**
     * 节点检查
     */
    public void nodeCheck(NodeInfo nodeInfo) {
        String nodeId = nodeInfo.getAkkaAddress();
        //检查节点是否正常

        //检查节点的作业信息于ClusterManager维护的作业信息是否一致

        //实现作业一致性
    }
    /**
     * 移除节点
     * @param nodeInfo
     */
    public void nodeRemove(NodeInfo nodeInfo) {

    }
    /**
     * 执行器节点检查 S-E
     */
    public void executorCheck(String nodeId) {

    }

    /**
     * server节点检查,执行器检查server是否已经正常启动  E-S
     */
    public void serverCheck() {

    }

    /**
     * 根据job编号，实例编号，查找实例所在节点，并确保与集群Server中维护的节点一致
     * @param jobId
     * @param instanceId
     */
    public void searchJob(Integer jobId, Integer instanceId) {
        String nodeId  = ClusterManager.getNodeByInstanceId(jobId,instanceId);
        if(nodeId==null){

        }
    }

    private void updataRecord() {

    }
}
