package com.talkingdata.hadoop.guard.schedule;

import com.talkingdata.hadoop.guard.bean.*;
import com.talkingdata.hadoop.guard.service.HadoopHandlerSerivce;
import com.talkingdata.hadoop.guard.mapper.*;
import com.talkingdata.hadoop.guard.util.LoadConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import org.apache.log4j.Logger;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

@Component
public class ScheduledTasks {
    public static Logger log = Logger.getLogger(ScheduledTasks.class);
    @Autowired
    LoadConfig loadConfig;

    @Autowired
    NamenodeMapper namenodeMapper;

    @Autowired
    DfsClusterMapper dfsClusterMapper;

    @Autowired
    DataNodeMapper dataNodeMapper;

    @Autowired
    FolderMapper folderMapper;

    @Autowired
    RmMapper rmMapper;

    @Autowired
    YarnClusterMapper yarnClusterMapper;

    @Autowired
    ApplicationMapper applicationMapper;

    @Autowired
    AppFinishOffsetMapper appFinishOffsetMapper;

    @Autowired
    SchedulerMapper schedulerMapper;

    @Autowired
    StudentMapper studentMapper;

    @Autowired
    SchedulerReportMapper schedulerReportMapper;


    @Scheduled(cron = "${job.schedule.dailyRise}")
    public void dailyRise () {
        ArrayList<XmlConfig> configure = loadConfig.readxml();
        for (XmlConfig xc : configure) {
            HadoopHandlerSerivce hadoopHandlerSerivce = new HadoopHandlerSerivce();
            if ("3.1.0".equals(xc.getVersion())) {
                try {
                    hadoopHandlerSerivce.dfsInit(xc);
                } catch (IOException e) {
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.dfsInit()  get failed !");
                    e.printStackTrace();
                }
                // 获取集群目录信息
                try {
                    ArrayList<FolderState> fsList = hadoopHandlerSerivce.getFolderStatus("/");
                    for (FolderState fs : fsList) {
                        folderMapper.insertFolder(fs);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.getFolderStatus()  get failed !");
                }
                // 清空 kerberos 认证
                UserGroupInformation.reset();
            }
        }
        log.info("reportMetric completed ！");
    }

    /**
     *  每10分钟采集一次集群指标 （目录大小除外）
     *
     */
    @Scheduled(fixedRate = 10 * 60000)
    public void reportMetric() {
        ArrayList<XmlConfig> configure = loadConfig.readxml();
        for (XmlConfig xc : configure) {
            HadoopHandlerSerivce hadoopHandlerSerivce = new HadoopHandlerSerivce();
            if ("3.1.0".equals(xc.getVersion())) {
                try {
                    hadoopHandlerSerivce.dfsInit(xc);
                } catch (IOException e) {
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.dfsInit(xc) get failed !");
                    e.printStackTrace();
                }

                // 获取集群namenode 管理节点信息 ， 找到activenamenode 节点
                try {
                    List<NameNodeState> nnlist = hadoopHandlerSerivce.getNamenodeState();
                    for (NameNodeState nn : nnlist) {
                        namenodeMapper.insertNamenode(nn);
                    }
                } catch (IOException e) {
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.getNamenodeState() get failed !");
                    e.printStackTrace();
                }
                // 获取集群状态信息
                try {
                    DfsClusterState hdfsState = hadoopHandlerSerivce.getDfsClusterStatus();
                    dfsClusterMapper.insertDfsCluster(hdfsState);
                } catch (IOException e) {
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.getDfsClusterStatus() get failed !");
                    e.printStackTrace();
                }
                // 获取集群 datanode 节点信息
                try {
                    ArrayList<DataNodeState> dnList = hadoopHandlerSerivce.getDataNodeState();
                    for (DataNodeState dn : dnList) {
                        dataNodeMapper.insertDataNode(dn);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.getDataNodeState()  get failed !");
                }
                // 初始化 yarn集群
                try {
                    hadoopHandlerSerivce.yarnInit(xc);
                    ArrayList<RmState> rmList = hadoopHandlerSerivce.getRMState();
                    for (RmState rs : rmList) {
                        rmMapper.insertRm(rs);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.getRMState()  get failed !");
                }
                // 获取 Yarn 集群信息
                try {
                    YarnClusterState yarnClusterStatus = hadoopHandlerSerivce.getYarnClusterStatus();
                    yarnClusterMapper.insertYarnCluster(yarnClusterStatus);
                } catch (IOException e) {
                    e.printStackTrace();
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.getYarnClusterStatus()  get failed !");
                }
                String yarnClusterId = hadoopHandlerSerivce.getYarnNameServices();
                /*//获取Yarn集群任务信息
                try {
                    ArrayList<ApplicationState> appList = hadoopHandlerSerivce.getApplications();
                    if (!appList.isEmpty()) {
                        for (ApplicationState app : appList) {
                            applicationMapper.insertApplication(app);
                        }
                    }
                    // 将本次采集最新的app任务写入 offset 方便查看
                    long maxAppFinishedTime = applicationMapper.selectMaxFinishedTimeByClusterId(yarnClusterId);
                    AppFinishOffset ao = applicationMapper.selectMaxAppByFinishedTime(yarnClusterId, maxAppFinishedTime);
                    appFinishOffsetMapper.insertAppFinishOffset(ao);
                } catch (IOException e) {
                    e.printStackTrace();
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.getApplications()  get failed !");
                }*/
                // 获取集群队列信息
                ArrayList<SchedulerState> schedulerList = null;
                try {
                    schedulerList = hadoopHandlerSerivce.getScheduler();
                    for (SchedulerState ss : schedulerList) {
                        schedulerMapper.insertScheduler(ss);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                    log.info(xc.getServiceId() + "hadoopHandlerSerivce.getScheduler()  get failed !");
                }
                // 清空 kerberos 认证
                UserGroupInformation.reset();
            }
        }
        log.info("reportMetric completed ！");
    }
}