package com.ideal.hadoopadmin.service.cluster;

import com.github.pagehelper.PageHelper;
import com.github.pagehelper.PageInfo;
import com.ideal.hadoopadmin.api.better.hdfs.HadoopHDFSAPI;
import com.ideal.hadoopadmin.api.better.kerberos.KerberosAPI;
import com.ideal.hadoopadmin.api.better.linux.ClusterUserAPI;
import com.ideal.hadoopadmin.api.better.yarn.YarnAPI;
import com.ideal.hadoopadmin.api.hdfs.HDFSAPI;
import com.ideal.hadoopadmin.api.kerberos.KDCAPI;
import com.ideal.hadoopadmin.api.linux.UserAPI;
import com.ideal.hadoopadmin.common.entity.Result;
import com.ideal.hadoopadmin.common.entity.ResultAPI;
import com.ideal.hadoopadmin.common.framework.orm.SearchFilter;
import com.ideal.hadoopadmin.entity.cluster.ClusterUser;
import com.ideal.hadoopadmin.entity.cluster.user.Clientquota;
import com.ideal.hadoopadmin.entity.cluster.user.Hdfsquota;
import com.ideal.hadoopadmin.entity.cluster.user.Kbrconfig;
import com.ideal.hadoopadmin.entity.cluster.user.Queue;
import com.ideal.hadoopadmin.mapper.webdb.cluster.ClusterUserMapper;
import com.ideal.hadoopadmin.mapper.webdb.cluster.user.HdfsquotaMapper;
import com.ideal.hadoopadmin.mapper.webdb.cluster.user.KbrAuthMapper;
import com.ideal.hadoopadmin.mapper.webdb.cluster.user.KbrconfigMapper;
import com.ideal.hadoopadmin.mapper.webdb.cluster.user.QueueMapper;
import com.ideal.hadoopadmin.mapper.webdb.meta.*;
import com.ideal.hadoopadmin.service.cluster.user.ClientquotaService;
import com.ideal.hadoopadmin.service.cluster.user.HdfsquotaService;
import com.ideal.hadoopadmin.service.cluster.user.KbrconfigService;
import com.ideal.hadoopadmin.service.cluster.user.QueueService;
import com.ideal.hadoopadmin.service.meta.hive.MetaHiveInfoService;
import com.ideal.tools.scheduler.DbFairSchedulerTools;
import com.ideal.tools.ssh.entity.ContextResult;
import com.ideal.tools.ssh.entity.FairScheduler;
import com.ideal.tools.ssh.result.LinuxResult;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * Created by CC on 2016/2/19.
 */
@Service
public class ClusterUserServiceNew {
    @Resource
    private ClusterUserMapper clusterUserMapper;
    @Resource
    private MetaHdfsInfoMapper metaHdfsInfoMapper;
    @Resource
    private MetaHdfsAccessMapper metaHdfsAccessMapper;
    @Resource
    private MetaHiveInfoMapper metaHiveInfoMapper;
    @Resource
    private MetaHiveAccessMapper metaHiveAccessMapper;
    @Resource
    private QueueMapper queueMapper;
    @Resource
    private HdfsquotaMapper hdfsquotaMapper;
    @Resource
    private MetaHdfsInfoBakMapper metaHdfsInfoBakMapper;
    @Resource
    private KbrconfigMapper kbrconfigMapper;
    @Resource
    private HdfsquotaService hdfsquotaService;
    @Resource
    private ClientquotaService clientquotaService;
    @Resource
    private KbrconfigService kbrconfigService;
    @Resource
    private QueueService queueService;
    @Resource
    ParameterService parameterService;
    @Resource
    ClusterMachineService clusterMachineService;
    @Resource
    private KbrAuthMapper kbrAuthMapper;
    @Resource
    private MetaHiveSqlMapper metaHiveSqlMapper;
    @Resource
    MetaHiveInfoService metaHiveInfoService;

    private static Logger logger = LoggerFactory.getLogger(ClusterUserServiceNew.class);

    public PageInfo searchUser(Map<String, Object> searchParams, HttpServletRequest request) {
        //过滤器
        String where = SearchFilter.parseToString(searchParams);
        //获取当前页
        int currentPage = request.getParameter("page") == null ? 1 : Integer.parseInt(request.getParameter("page"));
        //使用分页
        PageHelper.startPage(currentPage, 10, "createTime desc");
        //加入组织好的wherw 条件
        PageHelper.setAppendWhere(where);
        List<ClusterUser> clusterUsers = clusterUserMapper.findClusterUser();
        PageInfo page = new PageInfo(clusterUsers);
        return page;
    }

    /**
     * 查询所有的集群用户
     */
    public List<ClusterUser> queryClusterUser() {
        return clusterUserMapper.queryClusterUser();
    }

    /**
     * 根据id查询
     */
    public ClusterUser queryClusterUserById(Long id) {
        return clusterUserMapper.queryClusterUserById(id);
    }

    public void updateClusterUser(ClusterUser clusterUser) {
        //新增,创建时间,修改时间为当前时间和用户状态为停用状态
        clusterUser.setChangeTime(System.currentTimeMillis());
        clusterUserMapper.update(clusterUser);
    }

    /**
     * 根据公司id查询公司下所有的用户
     */
    public List<ClusterUser> queryClusterUserBySystemId(Long sysCompanyId) {
        return clusterUserMapper.queryClusterUserBySystemId(sysCompanyId);
    }

    /**
     * 根据集群类型id查询符合的用户
     */
    public List<ClusterUser> queryClusterUserByClusterTypeId(Long clusterTypeId) {
        return clusterUserMapper.queryClusterUserByClusterTypeId(clusterTypeId);
    }

    /**
     * 根据公司id和集群类型id查询符合的用户
     * update20160713qinfengxia
     * @param sysCompanyId
     * @param clusterTypeId
     * @return
     */
    public List<ClusterUser> queryClusterUserByParams(Long sysCompanyId , Long clusterTypeId) {
        return clusterUserMapper.queryClusterUserByParams(sysCompanyId ,clusterTypeId);
    }
    //租户入库,判断数据是否插入成功
    @Transactional
    public boolean saveClusterUerDB(ClusterUser clusterUser, Hdfsquota hdfsquota, Kbrconfig kbrconfig, List<Queue> queues) {
        ClusterUser oldClusterUser = clusterUserMapper.findByNameAndCluster(clusterUser.getUserName(), clusterUser.getClusterTypeId());
        if (oldClusterUser != null && oldClusterUser.getId() != clusterUser.getId()) {
            return false;//用户冲突
        }
        clusterUserMapper.save(clusterUser);
        hdfsquotaService.saveHdfsDB(hdfsquota, clusterUser.getId());
        kbrconfigService.saveKbrDB(kbrconfig, clusterUser.getId());
        queueService.saveQueuesDB(queues, clusterUser.getId());
        return true;
    }

    /**
     * 删除用户
     * update20160802qinfengxia
     * @param id
     * @return
     */
    public List<String> deleteClusterUser(Long id) {
        List<String> messageList = new ArrayList<String>();
        List<LinuxResult> linuxResults = deleteClusterUserAPI(id);
        ResultAPI.initAPIResult(linuxResults);
        messageList.addAll(ResultAPI.messageList);
        if (ResultAPI.flag) {
            deleteClusterUserById(id);
            messageList.add("<div>数据库信息:删除用户成功!</div>");
        }
        return messageList;
    }

    /**
     * 根据用户id进行删除用户相关的东西
     * update20160802qinfengxia
     * @param id
     * @return
     */
    public List<LinuxResult> deleteClusterUserAPI(Long id) {
        ClusterUser clusterUser = clusterUserMapper.findById(id);
        Map<String, Object> params = new HashMap<String, Object>();
        params.put(UserAPI.Cluster_User_Name, clusterUser.getUserName());
        params.put(UserAPI.Cluster_User_PW, clusterUser.getClientPW());
        params.put(UserAPI.Cluster_User_SysPW, clusterUser.getSystemPW());
        ContextResult contextResult = ClusterUserAPI.DropClusterUser(params);
        return contextResult.getLastResult();
    }

    @Transactional
    public String deleteClusterUserById(Long id) {
        //hiveAccess,hdfsAccess表根据userId及其对应删除的的hdfsInfoId进行删除
        metaHdfsAccessMapper.deleteByUserIdOrHdfsInfoId(id);
        metaHiveAccessMapper.deleteByUserIdOrHiveInfoId(id);
        clusterUserMapper.deleteById(id);
        metaHdfsInfoMapper.deleteByUserId(id);
//        metaHdfsAccessMapper.deleteByUserId(id);
        metaHdfsInfoBakMapper.deleteByUserId(id);
        metaHiveInfoMapper.deleteByUserId(id);
//        metaHiveAccessMapper.deleteByUserId(id);
        queueMapper.deleteByUserId(id);
        hdfsquotaMapper.deleteByUserId(id);
        kbrconfigMapper.deleteByUserId(id);
        //meta_hive_sql 根据userId找到对应的hiveInfoId再根据hiveInfoId删除sql
        metaHiveSqlMapper.deleteByUserId(id);
        kbrAuthMapper.deleteByUserId(id);
        return "success";
    }

    public List<String> openUser(Long userId) {
        List<String> messageList = new ArrayList<String>();
        Kbrconfig kbrconfig = kbrconfigService.findByUserId(userId);
        if (kbrconfig == null) {
            messageList.add("<div style='color:red'>kbr未认证,请在修改界面!</div>");
            return messageList;
        }
        //注释掉旧方法，调用新方法update20160802qinfengxia
        //List<LinuxResult> linuxResults = kbrconfigService.callKDCAPI(kbrconfig);
        List<LinuxResult> linuxResults = kbrconfigService.callKDCAPINew(kbrconfig);
        ResultAPI.initAPIResult(linuxResults);
        messageList.addAll(ResultAPI.messageList);
        if (ResultAPI.flag) {
            clusterUserMapper.updateStatus(userId, ClusterUser.onStatus);
            messageList.add("<div>数据库信息:开启认证成功!</div>");
        }
        return messageList;
    }

    public List<String> pauseUser(Long userId) {
        List<String> messageList = new ArrayList<String>();
        ClusterUser clusterUser = clusterUserMapper.findById(userId);
        Kbrconfig kbrconfig = kbrconfigService.findByUserId(userId);
        String machineIps = clusterMachineService.getMachineIpsByIds(kbrconfig.getMachineIds());
        //注释掉旧方法，调用新方法update20160802qinfengxia
        //List<LinuxResult> linuxResults = kbrconfigService.destroyClientKerberosAPI(clusterUser.getUserName(), machineIps);
        List<LinuxResult> linuxResults = kbrconfigService.destroyClientKerberosAPINew(clusterUser.getUserName(), machineIps);
        ResultAPI.initAPIResult(linuxResults);
        messageList.addAll(ResultAPI.messageList);
        if (ResultAPI.flag) {
            clusterUserMapper.updateStatus(userId, ClusterUser.unStatus);
            messageList.add("<div>数据库信息:用户暂停成功!</div>");
        }
        return messageList;
    }

    public List<String> resetPassword(Long id, String clientPW) {
        List<String> messageList = new ArrayList<String>();
        ClusterUser clusterUser = clusterUserMapper.findById(id);
        clusterUser.setClientPW(clientPW);
        List<LinuxResult> linuxResults = resetUserPwdAPI(clusterUser);
        ResultAPI.initAPIResult(linuxResults);
        messageList.addAll(ResultAPI.messageList);
        if (ResultAPI.flag) {
            clusterUserMapper.update(clusterUser);
            messageList.add("<div>数据库信息:重置用户密码成功!</div>");
        }
        return messageList;
    }

    public List<LinuxResult> resetUserPwdAPI(ClusterUser clusterUser) {
        Map<String, Object> params = initUserAPI(clusterUser, null);
        ContextResult contextResult = ClusterUserAPI.ResetUserPassword(params);
        return contextResult.getLastResult();
    }

    public List<LinuxResult> getResultList(ContextResult contextResult, String curStep) {
        return contextResult.getResultByName(curStep);
    }

    //
    //

    /**
     * 保存所有步骤信息
     * update20160715qinfengxia
     * @param clusterUser
     * @param hdfsquota
     * @param kbrconfig
     * @param queues
     * @return
     */
    public Result saveCluserUser(ClusterUser clusterUser, Hdfsquota hdfsquota,Kbrconfig kbrconfig, List<Queue> queues, List<Clientquota> clientquotaList ) {
        Result result = new Result();
        result.setFlag(true);
        String clientStr = "";
        for(Clientquota client: clientquotaList){
            clientStr += client.getMachineId() + "," + client.getClientCatalog() + "," + changeUntil(client.getClientCatalogSize().toString(),client.getClientCatalogUnit())+";";
        }
        clientStr = clientStr.substring(0,clientStr.length()-1);
        Map<String, Object> params = initCallUserAPI(clusterUser, hdfsquota, kbrconfig, queues,clientStr);
        //1.用户配置
        result = saveClusterUser(clusterUser,result,params);
        //只有用户配置成功，才能进行以下步骤的操作
        if(result.getFlag()){
            //2.kebers配置
            System.out.println("start do kerberos");
            result = saveKbr(clusterUser,kbrconfig,result,params);
            //3.HDFS配置
            System.out.println("start do hdfs");
            result = saveHdfs(clusterUser,hdfsquota,result,params);
            //4.队列配置
            System.out.println("start do quenes");
            result = saveQueues(clusterUser,queues,result,params);
            //5.client配置
            System.out.println("start do client");
            result = saveClient(clusterUser,clientquotaList,result,params);
        }
        return result;
    }

    /**
     * 第1步：用户配置
     * 从原先代码中抽取出来作为单独的方法
     * update20160715qinfengxia
     * @param clusterUser
     * @param result
     * @param params
     * @return
     */
    public Result saveClusterUser(ClusterUser clusterUser,Result result, Map<String, Object> params){
        //调用接口
       ContextResult contextResult = ClusterUserAPI.AddClusterUser(params);
        ResultAPI.initAPIResult(contextResult.getLastResult());
        result.getMessageList().addAll(ResultAPI.messageList);//调用接口返回的信息
        //1.用户配置
        ClusterUser oldClusterUser = clusterUserMapper.findByNameAndCluster(clusterUser.getUserName(), clusterUser.getClusterTypeId());
        if (oldClusterUser != null && oldClusterUser.getId() != clusterUser.getId()) {
            result.getMessageList().add("<div style='color:red'>数据库信息:用户冲突!</div>");
            result.setFlag(false);
            return result;//用户冲突
        }
        clusterUserMapper.save(clusterUser);
        result.getMessageList().add("<div>数据库信息:用户保存成功!</div>");
        return result;
    }

    /**
     * 第2步：kebers配置
     * 从原先代码中抽取出来作为单独的方法
     * update20160715qinfengxia
     * @param clusterUser
     * @param kbrconfig
     * @param result
     * @param params
     * @return
     */
    private Result saveKbr(ClusterUser clusterUser,  Kbrconfig kbrconfig,Result result, Map<String, Object> params){
        kbrconfigService.saveKbrDB(kbrconfig, clusterUser.getId());
        ContextResult contextResult = KerberosAPI.AuthClientKerberos(params);
        System.out.println("end interface : " + contextResult.getLastResult());
        ResultAPI.initAPIResult(contextResult.getLastResult());

        result.getMessageList().addAll(ResultAPI.messageList);//调用接口返回的信息
        if (!ResultAPI.flag) {
            result.setFlag(false);//kbr调用接口失败
            kbrconfigService.deleteByUserId(clusterUser.getId());
        }
        return result;
    }

    /**
     * 第3步：HDFS配置
     * 从原先代码中抽取出来作为单独的方法
     * update20160715qinfengxia
     * @param clusterUser
     * @param hdfsquota
     * @param result
     * @param params
     * @return
     */
    private Result saveHdfs(ClusterUser clusterUser,  Hdfsquota hdfsquota,Result result,Map<String, Object> params){
        if (hdfsquota != null) {
            ContextResult contextResult = HadoopHDFSAPI.QuotaHadoopHDFS(params);
            ResultAPI.initAPIResult(contextResult.getLastResult());
            result.getMessageList().addAll(ResultAPI.messageList);//调用接口返回的信息
            if (!ResultAPI.flag) {
              result.setFlag(false);
            } else {
                hdfsquotaService.saveHdfsDB(hdfsquota, clusterUser.getId());
                result.getMessageList().add("<div>数据库信息:hdfs保存成功!</div>");
            }
        }
        return result;
    }

    /**
     * 第4步：队列配置
     * 从原先代码中抽取出来作为单独的方法
     * update20160715qinfengxia
     * @param clusterUser
     * @param queues
     * @param result
     * @param params
     * @return
     */
    private Result saveQueues(ClusterUser clusterUser,List<Queue> queues,Result result, Map<String, Object> params){
        queueService.saveQueuesDB(queues, clusterUser.getId());
        result.getMessageList().add("<div>数据库信息:队列保存成功!</div>");
        String curStep = "Schedule_Yarn";
        //update20160802qinfengxia
        ContextResult contextResult = YarnAPI.FairScheduler(params);
        System.out.println("queues ------------- start");
//        ResultAPI.initAPIResult(contextResult.getLastResult());
        System.out.println("queues ------------- end");

//        result.getMessageList().addAll(ResultAPI.messageList);
        System.out.println("queues ------------- end end");

        if (!ResultAPI.flag) {
            result.setFlag(false);
        }
        return result;
    }

    /**
     * 第5步：client配置
     * add20160715qinfengxia
     * @param result
     * @return
     */
    private Result saveClient(ClusterUser clusterUser,List<Clientquota> clientquotaList,Result result,Map<String, Object> params){
        //todo 请补全需要的接口代码
        clientquotaService.saveClientDB(clusterUser,clientquotaList);
        //result.getMessageList().add("<div>数据库信息:client保存成功!</div>");
        //此参数需要在真正调用接口之后设置值
        result.setFlag(true);
        return  result;
    }

    public Map<String, Object>  initCallUserAPI(ClusterUser clusterUser, Hdfsquota hdfsquota, Kbrconfig kbrconfig, List<Queue> queues,String clientStr) {
        //初始化数据
        Map<String, Object> params = new HashMap<String, Object>();
        params.put(UserAPI.Cluster_User_Name, clusterUser.getUserName());
        params.put(UserAPI.Cluster_User_PW, clusterUser.getClientPW());
        params.put(UserAPI.Cluster_User_SysPW, clusterUser.getSystemPW());
        params.put(UserAPI.Cluster_User_HomeDir,clientStr);

        //根据kbr的machineIds获取对应的ips
        String machineIps = clusterMachineService.getMachineIpsByIds(kbrconfig.getMachineIds());
        params.put(KDCAPI.Cluster_KBR_Client, machineIps);
        if (hdfsquota != null) {
            String sSpace = hdfsquota.getHdfsSpace() == null ? null : hdfsquotaService.changeUntil(hdfsquota);
            params.put(HDFSAPI.HDFS_QUOTA_SPACE_SIZE, sSpace);
            String fileCount = hdfsquota.getHdfsFileCount() == null ? null : hdfsquota.getHdfsFileCount().toString();
            params.put(HDFSAPI.HDFS_QUOTA_DIR_NUMBER, fileCount);
        }
        //获去对应的参数
        List<FairScheduler> fairSchedulerQueues = queueService.getFairSchedulerQueues(queues, clusterUser);
        params.put(DbFairSchedulerTools.FAIR_SCHEDULER_QUEUE_LIST, fairSchedulerQueues);
        return params;
    }

    /*public void callUserAPI(ClusterUser clusterUser, Hdfsquota hdfsquota, Kbrconfig kbrconfig, List<Queue> queues, Result result) {
        //初始化数据
        Map<String, String> parameterMap = parameterService.getAllParameter();
        CommonProperties commonProperties = new CommonProperties(parameterMap);
        commonProperties.setArgument(UserAPI.Cluster_User_Name, clusterUser.getUserName());
        commonProperties.setArgument(UserAPI.Cluster_User_PW, clusterUser.getClientPW());
        commonProperties.setArgument(UserAPI.Cluster_User_SysPW, clusterUser.getSystemPW());
        //根据kbr的machineIds获取对应的ips
        String machineIps = clusterMachineService.getMachineIpsByIds(kbrconfig.getMachineIds());
        commonProperties.setArgument(KDCAPI.Cluster_KBR_Client, machineIps);
        if (hdfsquota != null) {
            String sSpace = hdfsquota.getHdfsSpace() == null ? null : hdfsquotaService.changeUntil(hdfsquota);
            commonProperties.setArgument(HDFSAPI.HDFS_QUOTA_SPACE_SIZE, sSpace);
            String fileCount = hdfsquota.getHdfsFileCount() == null ? null : hdfsquota.getHdfsFileCount().toString();
            commonProperties.setArgument(HDFSAPI.HDFS_QUOTA_DIR_NUMBER, fileCount);
        }
        //获去对应的参数
        List<FairScheduler> fairSchedulerQueues = queueService.getFairSchedulerQueues(queues, clusterUser);
        commonProperties.setObjParamter(DbFairSchedulerTools.FAIR_SCHEDULER_QUEUE_LIST, fairSchedulerQueues);
        ClusterContext context = new ClusterContext(commonProperties);
        List<LinuxMachine> machineList = clusterMachineService.getMachineList(null);
        context.setOriginalList(machineList);
        //调用接口
        UserAPI.addClusterUser(context);
//            ResultAPI ls = new ResultAPI(context);
        KDCAPI.authClientKerberos(context);
//            ResultAPI ls2 = new ResultAPI(context);
        if (hdfsquota != null) {
            HDFSAPI.QuotaHDFS(context);
        }
        YarnAPI.dbFairScheduler(context);
    }*/

    public List<ClusterUser> findClusterUserByClusterType(Long clusterTypeId) {
        return clusterUserMapper.findByClusterType(clusterTypeId);
    }

    /**
     * update20160802qinfengxia
     * @param clusterUser
     * @param machineIds
     * @return
     */
    public List<LinuxResult> addClientUserAPI(ClusterUser clusterUser, String machineIds) {
        Map<String, Object> params = initUserAPI(clusterUser, machineIds);
        //todo 需要根据机器不同类型调用不同方法
        ContextResult contextResult = ClusterUserAPI.AddClientUser(params);
        return contextResult.getLastResult();
    }


    /**
     * 前台获取的机器list及选中的用户
     * update20160802qinfengxia
     * @param clusterUser
     * @param machineIps
     * @return
     */
    public Map<String, Object> initUserAPI(ClusterUser clusterUser, String machineIps) {
        Map<String, Object> params = new HashMap<String, Object>();
        params.put(UserAPI.Cluster_User_Name, clusterUser.getUserName());
        params.put(UserAPI.Cluster_User_PW, clusterUser.getClientPW());
        if (StringUtils.isNotBlank(machineIps)) {
            params.put(UserAPI.Cluster_User_Client, machineIps);
        }
        return params;
    }

    /**
     * 根据companyId(租户id)获取租户信息
     * add20160714qinfengxia
     * @param companyId
     * @return
     */
    public Map<String ,Object> findCompanyParams(Long companyId){
        Map<String,Object> map = clusterUserMapper.findCompanyParams(companyId);
        return map;
    }

    /**
     * 根据选择的用户获取clusterUser和hdfs信息
     * add20160829qinfengxia
     * @return
     */
    public PageInfo findUserAndHdfsList(String userIds,HttpServletRequest request){
        int currentPage = request.getParameter("page") == null ? 1 : Integer.parseInt(request.getParameter("page"));
        Map<String,Object> map = new HashMap<String,Object>();
        //获取所有的userIds
        List<Long> ids = new ArrayList<Long>();
        if (!StringUtils.isBlank(userIds)) {
            String[] idArray = userIds.split(",");
            for (String id : idArray) {
                ids.add(Long.parseLong(id));
            }
            map.put("ids",ids);
        }
        List<Map<String,Object>> userAndHdfsList = clusterUserMapper.findUserAndHdfsList(map);
        PageInfo page = new PageInfo(userAndHdfsList);
        return page;
    }

    public String changeUntil(String inputValue,String inputUnit) {
        long space = Long.valueOf(inputValue);
        if ("T".equals(inputUnit)) {
            space = space * 1024 * 1024 * 1024 * 1024;
        } else if ("G".equals(inputUnit)) {
            space = space * 1024 * 1024 * 1024;
        } else if("M".equals(inputUnit)) {
            space = space * 1024 * 1024;
        }
        return space + "";
    }
}
