package com.ideal.hadoopadmin.web.controller.meta.hdfs;

import com.github.pagehelper.PageInfo;
import com.ideal.hadoopadmin.entity.cluster.ClusterType;
import com.ideal.hadoopadmin.entity.cluster.ClusterUser;
import com.ideal.hadoopadmin.entity.cluster.Parameter;
import com.ideal.hadoopadmin.entity.meta.hdfs.MetaHdfsInfo;
import com.ideal.hadoopadmin.entity.system.company.SystemCompany;
import com.ideal.hadoopadmin.framework.message.WebMessageLevel;
import com.ideal.hadoopadmin.framework.web.HttpRequests;
import com.ideal.hadoopadmin.framework.web.json.JsonObject;
import com.ideal.hadoopadmin.security.ShiroDbRealm;
import com.ideal.hadoopadmin.service.cluster.ClusterTypeService;
import com.ideal.hadoopadmin.service.cluster.ClusterUserService;
import com.ideal.hadoopadmin.service.cluster.ParameterService;
import com.ideal.hadoopadmin.service.meta.hdfs.MetaHdfsInfoBakService;
import com.ideal.hadoopadmin.service.meta.hdfs.MetaHdfsInfoService;
import com.ideal.hadoopadmin.service.system.company.SystemCompanyService;
import com.ideal.hadoopadmin.web.controller.CommonDictionary;
import com.ideal.hadoopadmin.web.controller.UIController;
import com.ideal.hadoopadmin.framework.page.Page;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.RequestMapping;

import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import java.util.*;

import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.util.WebUtils;

/**
 * Created by fwj on 16-2-22.
 * Hdfs元数据管理
 */
@Controller
@RequestMapping(HdfsController.PORTAL_PREFIX)
public class HdfsController extends UIController {
    public final static String PORTAL_PREFIX = "/meta/hdfs";
    private Logger logger = LoggerFactory.getLogger(HdfsController.class);
    @Resource
    private MetaHdfsInfoService metaHdfsInfoService;
    @Resource
    private SystemCompanyService systemCompanyService;
    @Resource
    private ClusterUserService clusterUserService;
    @Resource
    private ShiroDbRealm shiroDbRealm;
    @Resource
    private ParameterService parameterService;
    @Resource
    private MetaHdfsInfoBakService metaHdfsInfoBakService;
    @Resource
    private ClusterTypeService clusterTypeService;

    /**
     * 查询hdfs元数据管理并跳转到页面
     */
    @RequestMapping("hdfs_metadata_management")
    public void hdfsMetadataManagement(HttpServletRequest request, Model model) {
        Map<String, Object> searchParams = WebUtils.getParametersStartingWith(request, "Q_");
        List<SystemCompany> queryClusterCompany = systemCompanyService.queryClusterCompany();
        model.addAttribute("cusPage", queryClusterCompany);
        List<ClusterUser> userList = null;
        //根据页面选中的参数回相应参数下的值update20160717qinfengxia
        if(null != searchParams && searchParams.size()>0){
            //租户类型
            Long  id = searchParams.get("EQ_company.id").toString() == "" ? null : Long.valueOf(searchParams.get("EQ_company.id").toString());
            //集群类型
            Long clusterTypeId = searchParams.get("EQ_clusterType.clusterTypeId").toString() == "" ? null : Long.valueOf(searchParams.get("EQ_clusterType.clusterTypeId").toString());
            userList = selectCluterUserByParams(id,clusterTypeId);
        }else{
            userList = selectCluterUserByParams(null,null);
        }
        model.addAttribute("hpPage", userList);
        PageInfo pageInfo = metaHdfsInfoService.queryMetaHdfsInfo(searchParams, request);
        model.addAttribute("page", pageInfo);
        //查询出所有的集群类型add20160713qinfengxia
        List<ClusterType> clusterTypes = clusterTypeService.findAllClusterType();
        model.addAttribute("clusterTypePage", clusterTypes);
    }


    /**
     * 新增目录
     */
    @RequestMapping("add_dir_pop")
    public void addDir(Model model) {
        List<ClusterUser> hadoopUsers = clusterUserService.queryClusterUser();
        model.addAttribute("users", hadoopUsers);
    }

    //修改hdfs元数据
    @RequestMapping("edit_hdfs_pop")
    public void editHdfs(Long id, Model model) {
        MetaHdfsInfo metaHdfsInfo = metaHdfsInfoService.queryMetaHdfsInfoById(id);
        model.addAttribute("metaHdfsInfo", metaHdfsInfo);
    }

    /**
     * 新增目录
     */
    @RequestMapping("save_hdfs_Info")
    @ResponseBody
    public JsonObject saveHdfsInfo(HttpServletRequest request, String hdfsPath, Long clusterUserId, String note, Long id) {
        //hdfsPath不能重名
        ClusterUser clusterUser = clusterUserService.queryClusterUserById(clusterUserId);
        String owner = clusterUser.getUserName();
        List<MetaHdfsInfo>  resUserPath = metaHdfsInfoService.findPathByUserAndTbl("/user/" + owner + "/public/" + hdfsPath,clusterUserId);
        if(resUserPath.size() != 0){
            return JsonObject.alert("目录名存在!", WebMessageLevel.ERROR);
        }
        MetaHdfsInfo metaHdfsInfo = new MetaHdfsInfo();
        if (id == null) {
            metaHdfsInfo.setHdfsPath(hdfsPath);
            metaHdfsInfo.setNote(note);
            metaHdfsInfo.setCreateTime(System.currentTimeMillis());
            metaHdfsInfo.setUserId(clusterUserId);
            List<String> messageList = metaHdfsInfoService.addHdfs(metaHdfsInfo);
            return JsonObject.alert(StringUtils.strip(messageList.toString(), "[]"), WebMessageLevel.SUCCESS);
        } else {
            metaHdfsInfo.setNote(note);
            metaHdfsInfo.setId(id);
            metaHdfsInfoService.updateMetaHdfsInfoById(metaHdfsInfo);
            return JsonObject.alert("修改用户备注成功", WebMessageLevel.SUCCESS);
        }
    }

    /**
     * 删除
     */
    @RequestMapping("delete_hdfs_pop")
    public void delHdfs(Long hdfsId, String name, Model model, String flag) {
        model.addAttribute("hdfsId", hdfsId);
        model.addAttribute("name", name);
        model.addAttribute("flag", flag);
    }

    /**
     * 删除hdfs
     */
    @RequestMapping("delete_hdfs")
    @ResponseBody
    public JsonObject delete_hdfs(Long hdfsId, String password, String flag) {  //flag  判断是hdfs管理的删除还是 hdfs对比的删除
        Parameter parameterNew = parameterService.findParameterByKey(CommonDictionary.delHDFSKey);
        String val = parameterNew.getParameterVal();
        if (!(val.equals(password))) {
            return JsonObject.alert("密码错误", WebMessageLevel.ERROR);
        }
        //删除hdfsInfo
        List<String> messageList = metaHdfsInfoService.deleteHdfs(hdfsId);
        return JsonObject.alert(StringUtils.strip(messageList.toString(), "[]"), WebMessageLevel.SUCCESS);
    }

    /**
     * 配置组内用户，组外用户
     */
    @RequestMapping("config_hadoopUser_wss")
    public void configHadoopUser(HttpServletRequest request, Long resourceId, String divId) {
        try {
            request.setAttribute("resourceId", resourceId);
        } catch (Exception e) {
            e.printStackTrace();
            logger.info(e.getMessage());
        }
    }

    /**
     * 组内用户
     */
    @RequestMapping("query_groupuser_pop")
    public void queryGroupUser(HttpServletRequest request, Long resourceId, String divId, String divId_groupuser) {
        try {
            Map<String, Object> searchParams = HttpRequests.getParametersStartingWith(request, "Q_");
            int currentPage = Page.getCurrentPage(request);
            int defaultSize = Page.DEFAULT_PAGESIZE;
            PageInfo page = metaHdfsInfoService.findGroupInner(resourceId, searchParams, currentPage, defaultSize);
            request.setAttribute("page_group", page);
            if (null == divId) {
                request.setAttribute("divId", divId_groupuser);
            } else {
                request.setAttribute("divId", divId);
            }
            request.setAttribute("resourceId", resourceId);
        } catch (Exception e) {
            e.printStackTrace();
            logger.info(e.getMessage());
        }

    }

    /**
     * 组外用户
     */
    @RequestMapping("query_otheruser_pop")
    public void queryOtherUser(HttpServletRequest request, Long resourceId, String divId, String divId_otheruser) {
        try {
            Map<String, Object> searchParams = HttpRequests.getParametersStartingWith(request, "Q_");
            int currentPage = Page.getCurrentPage(request);
            int defaultSize = Page.DEFAULT_PAGESIZE;
            PageInfo page = metaHdfsInfoService.findGroupOuter(resourceId, searchParams, currentPage, defaultSize);
            request.setAttribute("page_other", page);
            if (null == divId) {
                request.setAttribute("divId", divId_otheruser);
            } else {
                request.setAttribute("divId", divId);
            }
            request.setAttribute("resourceId", resourceId);
        } catch (Exception e) {
            e.printStackTrace();
            logger.info(e.getMessage());
        }
    }

    /**

     * */
    @RequestMapping("del_pri_pop")
    @ResponseBody
    public JsonObject delGroupUser(Long[] hadoopUserId, Long resourceId, Model model) {
        MetaHdfsInfo hdfsInfo = metaHdfsInfoService.queryMetaHdfsInfoById(resourceId);
        //判断要移除的权限是不是自身
        for (int i = 0; i < hadoopUserId.length; i++) {
            if (hdfsInfo.getClusterUserId().getId().equals(hadoopUserId[i])) {
                return JsonObject.alert("不能移除自身", WebMessageLevel.ERROR);
            }
        }
        List<String> messageList = metaHdfsInfoService.deleteInnerUser(hadoopUserId, resourceId);
        return JsonObject.alert(StringUtils.strip(messageList.toString(), "[]"), WebMessageLevel.SUCCESS);
//            return JsonObject.alert("移除成功", WebMessageLevel.SUCCESS);
    }

    @RequestMapping("add_pri_pop")
    @ResponseBody
    public JsonObject addGroupUser(Long[] hadoopUserId, Long resourceId) {
        List<String> messageList = metaHdfsInfoService.addOuterUser(hadoopUserId, resourceId);
        return JsonObject.alert(StringUtils.strip(messageList.toString(),"[]"), WebMessageLevel.SUCCESS);
//        return JsonObject.alert("添加成功", WebMessageLevel.SUCCESS);
    }

    /**
     * 刷新列表
     */
    @RequestMapping("flush_hdfs")
    @ResponseBody
    public JsonObject flushHdfs(HttpServletRequest request) {
//        metaHdfsInfoBakService.flushHdfsAPI();
        //根据
        return JsonObject.success();
    }


    @RequestMapping("select_hpUv")
    @ResponseBody
    public JsonObject selectHpuv(Model model, Long id , Long clusterTypeId) {
        List<ClusterUser> hpList = selectCluterUserByParams(id,clusterTypeId);
        return JsonObject.success(hpList);
    }

    /**
     * 根据租户类型和集群类型，获取符合条件的用户
     * add20160717qinfengxia
     * @param id:租户类型
     * @param clusterTypeId:集群类型
     * @return
     */
    public List<ClusterUser> selectCluterUserByParams( Long id , Long clusterTypeId){
        List<ClusterUser> hpList = new ArrayList<ClusterUser>();
        try {
            if(id != null && clusterTypeId != null){
                hpList = clusterUserService.queryClusterUserByParams(id, clusterTypeId);
            }else if(id != null){
                hpList = clusterUserService.queryClusterUserBySystemId(id);
            }else if(clusterTypeId != null){
                hpList = clusterUserService.queryClusterUserByClusterTypeId(clusterTypeId);
            }else{
                hpList = clusterUserService.queryClusterUser();
            }
        } catch (Exception e) {
            e.printStackTrace();
            logger.info(e.getMessage());
        }
        return hpList;
    }

    @Override
    public String getPortalPrefix() {
        return PORTAL_PREFIX;
    }
}
