package com.ideal.hadoopadmin.service.cluster.user;

import com.ideal.hadoopadmin.api.better.hdfs.HadoopHDFSAPI;
import com.ideal.hadoopadmin.api.hdfs.HDFSAPI;
import com.ideal.hadoopadmin.api.linux.UserAPI;
import com.ideal.hadoopadmin.common.entity.Result;
import com.ideal.hadoopadmin.common.entity.ResultAPI;
import com.ideal.hadoopadmin.entity.cluster.ClusterUser;
import com.ideal.hadoopadmin.entity.cluster.user.Clientquota;
import com.ideal.hadoopadmin.entity.cluster.user.Hdfsquota;
import com.ideal.hadoopadmin.mapper.webdb.cluster.ClusterUserMapper;
import com.ideal.hadoopadmin.mapper.webdb.cluster.user.HdfsquotaMapper;
import com.ideal.hadoopadmin.service.cluster.ClusterMachineService;
import com.ideal.hadoopadmin.service.cluster.ParameterService;
import com.ideal.tools.ssh.common.CommonProperties;
import com.ideal.tools.ssh.context.ClusterContext;
import com.ideal.tools.ssh.entity.ContextResult;
import com.ideal.tools.ssh.entity.LinuxMachine;
import com.ideal.tools.ssh.result.LinuxResult;
import org.apache.commons.lang3.StringUtils;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * Created by 袁颖 on 2016/2/26.
 */
@Service
public class HdfsquotaService {
    @Resource
    private HdfsquotaMapper hdfsquotaMapper;
    @Resource
    private ParameterService parameterService;
    @Resource
    private ClusterUserMapper clusterUserMapper;
    @Resource
    private ClusterMachineService clusterMachineService;

    public Hdfsquota findByUserId(Long userId) {
        return hdfsquotaMapper.findByUserId(userId);
    }
 //入库,前台可能不传任何值
    public void saveHdfsDB(Hdfsquota hdfsquota, Long userId) {
        if (hdfsquota != null) {
            hdfsquota.setUserId(userId);
            hdfsquotaMapper.save(hdfsquota);
        }
    }

    public void updateByUserIds(Map<String, Object> map) {
        hdfsquotaMapper.updateByUserIds(map);
    }

    //批量处理
    public List<String> updateHDFS(Long[] userIds, Hdfsquota hdfsquota) {
        List<String> messageList = new ArrayList<String>();
        //通过hdfs查询出对应的userName
        for (int i = 0; i < userIds.length; i++) {
            hdfsquota.setUserId(userIds[i]);
            //注释掉旧方法，调用新方法update20160802qinfengxia
            //List<LinuxResult> linuxResults = updateHDFSAPI(hdfsquota);
            List<LinuxResult> linuxResults = updateHDFSAPINew(hdfsquota);
            //ResultAPI.initAPIResult(linuxResults);
            //messageList.addAll(ResultAPI.messageList);
            ClusterUser clusterUser = clusterUserMapper.findById(userIds[i]);
            hdfsquota.setHdfsPath("/user/" + clusterUser.getUserName());
            //if (ResultAPI.flag) {
                if (hdfsquota.getHdfsSpace() == null && hdfsquota.getHdfsFileCount() == null) {//如果这俩个数据为空,即把对应用户里面的hdfs删除
                    deleteHdfs(userIds[i]);
                } else {
                    updateHDFSDB(hdfsquota);
                }
                messageList.add("<div>数据库信息:hdfs修改成功!</div>");
           // }
        }
        return messageList;
    }

    public ClusterContext initHDFSAPI(Hdfsquota hdfsquota) {
        //通过hdfsquota查找clusterUser
        ClusterUser clusterUser = clusterUserMapper.findById(hdfsquota.getUserId());
        Map<String, String> parameterMap = parameterService.getAllParameter();
        List<LinuxMachine> machineList = clusterMachineService.getMachineList(null);
        CommonProperties commonProperties = new CommonProperties(parameterMap);
        String space = changeUntil(hdfsquota);
        commonProperties.setArgument(UserAPI.Cluster_User_Name, clusterUser.getUserName());
        commonProperties.setArgument(HDFSAPI.HDFS_QUOTA_SPACE_SIZE, space);
        String fileCount = hdfsquota.getHdfsFileCount() == null ? null : hdfsquota.getHdfsFileCount().toString();
        commonProperties.setArgument(HDFSAPI.HDFS_QUOTA_DIR_NUMBER, fileCount);
        ClusterContext context = new ClusterContext(commonProperties);
        context.setOriginalList(machineList);
        return context;
    }

    /**
     * 初始化HDFS参数
     * update20160802qinfengxia
     * @param hdfsquota
     * @return
     */
    public Map<String, Object> initHDFSAPINew(Hdfsquota hdfsquota) {
        //通过hdfsquota查找clusterUser
        ClusterUser clusterUser = clusterUserMapper.findById(hdfsquota.getUserId());
        String space = changeUntil(hdfsquota);
        Map<String, Object> params = new HashMap<String, Object>();
        params.put(UserAPI.Cluster_User_Name, clusterUser.getUserName());
        params.put(HDFSAPI.HDFS_QUOTA_SPACE_SIZE, space);
        String fileCount = hdfsquota.getHdfsFileCount() == null ? null : hdfsquota.getHdfsFileCount().toString();
        params.put(HDFSAPI.HDFS_QUOTA_DIR_NUMBER, fileCount);
        return params;
    }

    //调用hdfs接口
    public List<LinuxResult> updateHDFSAPI(Hdfsquota hdfsquota) {
        ClusterContext context = initHDFSAPI(hdfsquota);
        HDFSAPI.QuotaHDFS(context);
        return context.getContextResult().getLastResult();
    }

    /**调用hdfs接口
     * update20160802qinfengxia
     * @param hdfsquota
     * @return
     */
    public List<LinuxResult> updateHDFSAPINew(Hdfsquota hdfsquota) {
        Map<String, Object> params = initHDFSAPINew(hdfsquota);
        ContextResult contextResult = HadoopHDFSAPI.QuotaHadoopHDFS(params);
        return contextResult.getLastResult();
    }

    //更新数据库,比较特殊的是可能新建的时候并没有添加hdfs,所以需要先进行判断是否存在这条数据
    public void updateHDFSDB(Hdfsquota hdfsquota) {
        Hdfsquota existHdfsquota = hdfsquotaMapper.findByUserId(hdfsquota.getUserId());
        if (existHdfsquota == null) {
            saveHdfsDB(hdfsquota, hdfsquota.getUserId());
        } else {
            hdfsquotaMapper.update(hdfsquota);
        }
    }

    public String changeUntil(Hdfsquota hdfsquota) {
        long space = hdfsquota.getHdfsSpace().intValue();
        if ("T".equals(hdfsquota.getHdfsSpaceUnit())) {
            space = space * 1024 * 1024 * 1024 * 1024;
        } else if ("G".equals(hdfsquota.getHdfsSpaceUnit())) {
            space = space * 1024 * 1024 * 1024;
        } else {
            space = space * 1024 * 1024;
        }
        return space + "";
    }

    /*根据userId进行删除*/
    public void deleteHdfs(Long userId) {
        hdfsquotaMapper.deleteByUserId(userId);
    }

    /**
     * 保存hdfs配置信息
     * add20160830qinfengxia
     * @param clusterUser
     */
    public void saveHdfsDB(ClusterUser clusterUser,List<Hdfsquota> hdfsquotaList) {
        for (Hdfsquota hdfs : hdfsquotaList) {
            hdfs.setUserId(clusterUser.getId());
            if(null != hdfs.getId() && !"".equals(hdfs.getId())){
                hdfsquotaMapper.update(hdfs);
            }else{
                hdfsquotaMapper.save(hdfs);
            }
        }
    }
}
