package com.kpmg.datalake.common.service.impl.job;

import com.alibaba.fastjson.JSON;
import com.kpmg.datalake.common.enums.ProjectSourceEnum;
import com.kpmg.datalake.common.enums.ProjectSyncStatusEnum;
import com.kpmg.datalake.common.service.IProjectService;
import com.kpmg.datalake.common.service.RbacFunctionService;
import com.kpmg.datalake.db.model.Project;
import com.kpmg.datalake.operatelog.enums.LogTypeEnum;
import com.kpmg.datalake.operatelog.model.OperateLog;
import com.kpmg.datalake.operatelog.service.IOperateLogService;
import com.kpmg.datalake.schedule.model.ScheduleJob;
import com.kpmg.datalake.schedule.service.ScheduleExecuteService;
import java.util.Date;
import java.util.List;
import javax.annotation.Resource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;

/**
 * @author Alex.zhao [alex.zhao@kpmg.com]
 * @since Created on azhao6 on 2018-08-27 22:36:40 项目信息同步
 */
@Service
public class ProjectInfoSyncImpl implements ScheduleExecuteService {

  private static final Logger logger = LoggerFactory.getLogger(ProjectInfoSyncImpl.class);

  @Resource
  private IProjectService projectService;
  @Resource
  private IOperateLogService operateLogService;
  @Resource
  private RbacFunctionService rbacSynchronization;

  /**
   * 动态任务实现的接口
   *
   * @param scheduleJob 任务信息
   */
  @Override
  public void execute(ScheduleJob scheduleJob) {
    //同步项目数据
    logger.info("sync dataLake project  begin init ");
    try {
      projectService.syncProject();
    } catch (Exception e) {
      logger.error("sync dataLake project  failed {}", e);
    }
    logger.info("sync dataLake project  success ");
    //获取未初始化项目
    List<Project> syncNotCompleteProjectList = projectService
        .getSyncNotCompleteProjectList(ProjectSyncStatusEnum.UN_INIT);
    for (Project project : syncNotCompleteProjectList) {
      OperateLog operateLog = new OperateLog();
      try {
        operateLog.setLogType(LogTypeEnum.SYSTEM.getCode());
        operateLog.setLogModule("interface");
        operateLog.setLogDesc("项目信息同步");
        operateLog.setClassName(getClass().getName());
        operateLog.setMethodName("initProjectDatabase");
        operateLog.setRequestParam(JSON.toJSONString(project));
        operateLog.setBeginTime(new Date());
        operateLog.setRequestStatus(0);
        logger.info("project {} begin init ", project.getPrjId());
        projectService.initProjectDatabase(project.getPrjId());
        logger.info("project {} end init Success ", project.getPrjId());
        //dataLake 同步数据 需要继续同步sftp信息
        if (ProjectSourceEnum.DATALAKE.getCode().equals(project.getPrjSource())) {
          projectService.syncProjectSftpInfo(project.getPrjId());
        }
        logger.info("project {} end sync sftp info Success ", project.getPrjId());
        //更像项目同步状态
        project.setSyncStatus(ProjectSyncStatusEnum.UN_SYNC.getCode());
        projectService.updateById(project);
      } catch (Exception e) {
        logger.info("project {} end init failed ", project.getPrjId());
        operateLog.setRequestStatus(1);
        operateLog.setRequestResult(e.getMessage());
        project.setSyncStatus(ProjectSyncStatusEnum.SYNC_FAILED.getCode());
        projectService.updateById(project);
        logger.error(e.getMessage(), e);
      } finally {
        long endTime = System.currentTimeMillis();
        operateLog.setEndTime(new Date(endTime));
        operateLog.setCostTime(endTime - operateLog.getBeginTime().getTime());
        operateLogService.insert(operateLog);
      }
    }
    //用户数据权限同步
    try {
      rbacSynchronization.rbacSynchronization();
    } catch (Exception e) {
      logger.error(e.getMessage(), e);
    }
  }
}
