package com.huiquan.vocab.service;

import com.huiquan.framework.base.BaseService;
import com.huiquan.framework.base.ReturnCode;
import com.huiquan.framework.base.ReturnData;
import com.huiquan.framework.constant.BaseContants;
import com.huiquan.framework.utils.GetListUtil;
import com.huiquan.framework.utils.RespHeaderUtil;
import com.huiquan.framework.utils.ReturnUtil;
import com.huiquan.management.constant.MigrateCode;
import com.huiquan.management.service.MigrateService;
import com.huiquan.vocab.constant.VocabConstant;
import com.huiquan.vocab.dao.VocabElementCategoryDao;
import com.huiquan.vocab.dao.VocabElementClassifyDao;
import com.huiquan.vocab.domain.VocabElementCategory;
import com.huiquan.vocab.domain.VocabElementClassify;
import com.huiquan.vocab.domain.VocabElementClassifyDto;
import org.apache.commons.lang.StringUtils;
import org.apache.poi.ss.usermodel.Cell;
import org.apache.poi.ss.usermodel.Row;
import org.apache.poi.ss.usermodel.Sheet;
import org.apache.poi.ss.usermodel.Workbook;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;
import org.springframework.web.servlet.ModelAndView;

import javax.servlet.http.HttpServletResponse;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.*;

@Service
public class VocabElementClassifyService extends BaseService {

    @Autowired
    private VocabElementClassifyDao vocabElementClassifyDao;

    @Autowired
    private VocabElementCategoryDao vocabElementCategoryDao;
    @Autowired
    private MigrateService migrateService;

    public ModelAndView list(String startStr, String nameKey, String exactSearchFlag, String checkFlag,
                             String categoryKey, String countPerPageStr,String parentKey) {

        Map<String, Object> param = new HashMap<>();
        if (nameKey != null && !nameKey.isEmpty()) {
            if (exactSearchFlag != null && exactSearchFlag.equals("1")) {
                param.put("name", nameKey);
            } else {
                param.put("name", "%" + nameKey + "%");
            }
        }
        if (parentKey != null && !parentKey.isEmpty()) {
            if (exactSearchFlag != null && exactSearchFlag.equals("1")) {
                param.put("parent", parentKey);
            } else {
                param.put("parent", "%" + parentKey + "%");
            }
        }
        if (checkFlag != null && !checkFlag.isEmpty()) {
            param.put("checkFlag", checkFlag);
        }
        if (categoryKey != null && !categoryKey.isEmpty()) {
            param.put("category", categoryKey);
        }
        int totalSize = vocabElementClassifyDao.retrieveSize(param);

        Map<String, Integer> pageNo;
        if (countPerPageStr != null && !countPerPageStr.isEmpty()) {
            int countPerPage = Integer.parseInt(countPerPageStr);
            pageNo = GetListUtil.getPageNoParam(totalSize, startStr, countPerPage);
            param.put("countPerPage", countPerPage);
        } else {
            pageNo = GetListUtil.getPageNoParam(totalSize, startStr);
        }
        int endPage = pageNo.get("endPage");
        int start = pageNo.get("start");
        int startIndex = pageNo.get("startIndex");

        List<VocabElementClassify> list = new ArrayList<>();
        List<VocabElementClassifyDto> dtoList = new ArrayList<>();

        if (startIndex < totalSize) {
            param.put("startIndex", startIndex);
            param.put("orderStr", "gmt_modified desc");
            list = vocabElementClassifyDao.retrieveList(param);
            for (VocabElementClassify classify : list) {
                dtoList.add(getDtoByClassify(classify));
            }
        }

        Map<String, String> showSearch = new LinkedHashMap<>();
        showSearch.put("nameKey", "词语");
        showSearch.put("parentKey", "上级");

        Map<String, Object> map = GetListUtil.getReturnMap2(totalSize, start, startIndex, endPage, dtoList, null);
        map.put("showSearch", showSearch);

        // 返回类别列表
        map.put("categoryList", getCategoryList());

        return new ModelAndView("vocab/element_index", map);
    }

    private VocabElementClassifyDto getDtoByClassify(VocabElementClassify classify) {
        VocabElementClassifyDto dto = new VocabElementClassifyDto();
        dto.setId(classify.getId());
        dto.setName(classify.getName());
        dto.setCategory(classify.getCategory());
        dto.setParent(classify.getParent());

        String lines = "";
        if (classify.getCategory().equals("器官")) {
            List<String> lineList = vocabElementClassifyDao.retrieveOrganLinesByElement(classify.getName());
            for (String line : lineList) {
                lines += line + "\n";
            }
        }
        dto.setLines(lines);

        return dto;
    }

    private List<String> getCategoryList() {

        List<String> categoryList = new ArrayList<>();
        for (VocabElementCategory o : vocabElementCategoryDao.retrieveList()) {
            categoryList.add(o.getName());
        }

        return categoryList;
    }

    public ReturnData init() {
        // 如果有冲突先修改完冲突之后才初始化
        List<VocabElementClassify> conflictList = vocabElementClassifyDao.retrieveConflict();
        if (conflictList != null) {
            StringBuffer err = new StringBuffer();
            String tempName = "";
            for (VocabElementClassify conflict : conflictList) {
                if (StringUtils.isBlank(tempName)) {
                    err.append("分级分类同义词存在冲突,先处理完冲突再进行刷新：");
                } else if (StringUtils.equals(conflict.getStd(), tempName)) {
                    err.append(",");
                } else {
                    err.append(";");
                }
                err.append(conflict.getName());
                tempName = conflict.getStd();
            }
            if (err.length() > 0) {
                ReturnCode rc = ReturnCode.DYNAMIC_DESC_EXCEPTION;
                rc.setDesc(err.toString());
                return ReturnUtil.fail(rc);
            }
        }
        vocabElementClassifyDao.init();
        // 维护bas_vocab_element_organ_line表
        // 查询已经校验完成的
        List<VocabElementClassify> elementLists = vocabElementClassifyDao.retrieveAllForExport("1");
        // 定义map key 父节点 value 子节点 子节点可能有多个用","分隔
        Map<String, String> findSonMap = new HashMap<>();
        // 定义map key 子节点 value 父节点
        Map<String, String> findFatherMap = new HashMap<>();
        // 循环存放map
        for (VocabElementClassify element : elementLists) {
            String sonName = element.getName();
            String fatherName = element.getParent();
            if (StringUtils.isNotBlank(sonName)) {
                // key 子节点 value 父节点
                findFatherMap.put(sonName, fatherName);
            }
            if (StringUtils.isNotBlank(fatherName)) {
                if (findSonMap.get(fatherName) != null) {
                    // 子节点用","分隔
                    findSonMap.put(fatherName, findSonMap.get(fatherName) + "," + sonName);
                } else {
                    findSonMap.put(fatherName, sonName);
                }
            }
        }
        // 需要排序的词
        List<String> sortList = new ArrayList<>();

        // 循环找出每一节
        for (VocabElementClassify element : elementLists) {
            String name = element.getName();
            String fatherName = element.getParent();
            // 判断是否是最后一个节点
            // 如果没有子节点,且有父节点.说明是最后一个节点
            if (findSonMap.get(name) == null && StringUtils.isNotBlank(fatherName)) {
                StringBuffer sortName = new StringBuffer();
                sortName.append(name + "-");
                String tempName = name;
                // 用子节点查询父节点,拿到所有的节点
                while (StringUtils.isNotBlank(findFatherMap.get(tempName))) {
                    // 添加到第一个
                    sortName.insert(0, findFatherMap.get(tempName) + "-");
                    tempName = findFatherMap.get(tempName);
                }
                sortList.add(sortName.toString());

            } else {
                continue;
            }
        }
        // 把导出的数据排序后再写文件
        String[] sortName = sortList.toArray(new String[0]);
        Arrays.sort(sortName);
        List<String> lineList = new ArrayList<>();
        Collections.addAll(lineList, sortName);
        vocabElementClassifyDao.insertOrganLines(lineList);
        return ReturnUtil.success();
    }

    public String getParentData() {
        List<VocabElementClassify> list = vocabElementClassifyDao.retrieveList();
        StringBuffer sb = new StringBuffer();
        for (VocabElementClassify temp : list) {
            sb.append(temp.getName());
            sb.append(",");
        }
        if (sb.length() > 1) {
            sb.substring(0, sb.length() - 1);
        }
        return sb.toString();
    }

    public List<VocabElementClassify> getDataByCategory(String category) {
        Map<String, Object> param = new HashMap<>();
        param.put("category", category);
        return vocabElementClassifyDao.retrieveList(param);
    }

    public ReturnData addCategory(String name) {
        // 判断是否重复
        VocabElementCategory category = vocabElementCategoryDao.retrieveObjectByName(name);
        if (category != null) {
            return ReturnUtil.fail(ReturnCode.ELEMENT_CATEGORY_NAME_REPEAT);
        }

        // 维护添加类别
        category = new VocabElementCategory();
        category.setName(name);
        category.setProperty(VocabConstant.ELEMENT_ORGAN);

        vocabElementCategoryDao.insert(category);

        return ReturnUtil.success();
    }

    public ReturnData deleteCategory(String name) {
        // 删除类别
        VocabElementCategory category = new VocabElementCategory();
        category.setName(name);
        category.setProperty(VocabConstant.ELEMENT_ORGAN);

        vocabElementCategoryDao.delete(category);

        // 将该类别的数据的放到待校验中
        vocabElementClassifyDao.uncheckByCategory(name);

        return ReturnUtil.success();
    }

    public ReturnData changeCategory(String id, String category) {

        VocabElementClassify classify = vocabElementClassifyDao.retrieveObjectById(Long.parseLong(id));
        // 如果不是器官类别的小词则没有上下级
        if (!"器官".equals(category)) {
            classify.setParent("");
        }
        classify.setCategory(category);

        vocabElementClassifyDao.update(classify);

        return ReturnUtil.success();
    }

    public ReturnData changeParent(String id, String parent) {
        // 校验上级是否在可选范围中
        if (!parent.isEmpty()) {
            VocabElementClassify parentClassify = vocabElementClassifyDao.retrieveObjectByNameAndProperty(parent,
                    VocabConstant.ELEMENT_ORGAN);
            if (parentClassify == null) {
                ReturnCode rc = ReturnCode.DYNAMIC_DESC_EXCEPTION;
                rc.setDesc("“" + parent + "”不是可选上级！");
                return ReturnUtil.fail(rc);
            }
        }

        VocabElementClassify classify = vocabElementClassifyDao.retrieveObjectById(Long.parseLong(id));
        classify.setParent(parent);

        vocabElementClassifyDao.update(classify);

        return ReturnUtil.success();
    }

    public ReturnData submitPage(String ids) {

        vocabElementClassifyDao.checkByIds(ids);

        return ReturnUtil.success();
    }

    public void exportParent(HttpServletResponse resp) throws Exception {
        RespHeaderUtil.setXlsxResp(resp, "器官上下级");

        // 查询出所有需要导出的数据
        List<VocabElementClassify> elementLists = vocabElementClassifyDao.retrieveAllForExport("");
        // 定义map key 父节点 value 子节点 子节点可能有多个用","分隔
        Map<String, String> findSonMap = new HashMap<>();
        // 定义map key 子节点 value 父节点
        Map<String, String> findFatherMap = new HashMap<>();

        Row row;
        Cell cell = null;
        int i = 0;
        String[] wordArray;
        // 循环存放map
        for (VocabElementClassify element : elementLists) {
            String sonName = element.getName();
            String fatherName = element.getParent();
            if (StringUtils.isNotBlank(sonName)) {
                // key 子节点 value 父节点
                findFatherMap.put(sonName, fatherName);
            }
            if (StringUtils.isNotBlank(fatherName)) {
                if (findSonMap.get(fatherName) != null) {
                    // 子节点用","分隔
                    findSonMap.put(fatherName, findSonMap.get(fatherName) + "," + sonName);
                } else {
                    findSonMap.put(fatherName, sonName);
                }
            }
        }
        // 存放单个的词
        List<String> singleList = new ArrayList<>();
        // 需要排序的词
        List<String> sortList = new ArrayList<>();

        // 循环找出每一节
        for (VocabElementClassify element : elementLists) {
            String name = element.getName();
            String fatherName = element.getParent();
            // 判断是否是最后一个节点
            // 如果没有子节点,且有父节点.说明是最后一个节点
            if (findSonMap.get(name) == null && StringUtils.isNotBlank(fatherName)) {
                StringBuffer sortName = new StringBuffer();
                sortName.append(name);
                String tempName = name;
                // 用子节点查询父节点,拿到所有的节点
                while (StringUtils.isNotBlank(findFatherMap.get(tempName))) {
                    // 添加到第一个
                    sortName.insert(0, findFatherMap.get(tempName) + "-");
                    tempName = findFatherMap.get(tempName);
                }
                sortList.add(sortName.toString());
            } else if (findSonMap.get(name) == null && StringUtils.isBlank(fatherName)) {
                // 说明没有父节点也没有子节点,是独立的要写在文件最后
                singleList.add(name);
            } else {
                // 中间节点不需要处理
                continue;
            }
        }
        // 把导出的数据排序后再写文件
        String[] sortName = sortList.toArray(new String[0]);
        Arrays.sort(sortName);

        Workbook workbook = new XSSFWorkbook(); // 创建文档
        Sheet sheet = workbook.createSheet(); // 创建Sheet
        // 写入文件
        for (String line : sortName) {
            row = sheet.createRow(i);
            wordArray = line.split("-");
            for (int j = 0; j < wordArray.length; j++) {
                cell = row.createCell(j);
                cell.setCellValue(wordArray[j]);
            }
            i++;
        }
        // 写入单个词语的数据
        for (String element : singleList) {
            row = sheet.createRow(i);
            cell = row.createCell(0);
            cell.setCellValue(element);
            i++;
        }

        OutputStream os = resp.getOutputStream();
        workbook.write(os);

        os.close();
        workbook.close();
    }

    public void generateClassify() throws Exception {

        List<VocabElementClassify> elementLists = vocabElementClassifyDao.retrieveAllForExport("1");
        // 定义map key 父节点 value 子节点 子节点可能有多个用","分隔
        Map<String, String> findSonMap = new HashMap<>();
        // 定义map key 子节点 value 父节点
        Map<String, String> findFatherMap = new HashMap<>();

        String[] wordArray;
        // 循环存放map
        for (VocabElementClassify element : elementLists) {
            String sonName = element.getName();
            String fatherName = element.getParent();
            if (StringUtils.isNotBlank(sonName)) {
                // key 子节点 value 父节点
                findFatherMap.put(sonName, fatherName);
            }
            if (StringUtils.isNotBlank(fatherName)) {
                if (findSonMap.get(fatherName) != null) {
                    // 子节点用","分隔
                    findSonMap.put(fatherName, findSonMap.get(fatherName) + "," + sonName);
                } else {
                    findSonMap.put(fatherName, sonName);
                }
            }
        }
        // 存放单个的词
        List<String> singleList = new ArrayList<>();
        // 需要排序的词
        List<String> sortList = new ArrayList<>();

        // 循环找出每一节
        for (VocabElementClassify element : elementLists) {
            String name = element.getName();
            String fatherName = element.getParent();
            // 判断是否是最后一个节点
            // 如果没有子节点,且有父节点.说明是最后一个节点
            if (findSonMap.get(name) == null && StringUtils.isNotBlank(fatherName)) {
                StringBuffer sortName = new StringBuffer();
                sortName.append(name);
                String tempName = name;
                // 用子节点查询父节点,拿到所有的节点
                while (StringUtils.isNotBlank(findFatherMap.get(tempName))) {
                    // 添加到第一个
                    sortName.insert(0, findFatherMap.get(tempName) + "-");
                    tempName = findFatherMap.get(tempName);
                }
                sortList.add(sortName.toString());
            } else if (findSonMap.get(name) == null && StringUtils.isBlank(fatherName)) {
                // 说明没有父节点也没有子节点,是独立的要写在文件最后
                singleList.add(name);
            } else {
                // 中间节点不需要处理
                continue;
            }
        }
        // 把导出的数据排序后再写文件
        String[] sortName = sortList.toArray(new String[0]);
        Arrays.sort(sortName);

        StringBuffer rtn = new StringBuffer();
        // 写入文件
        for (String line : sortName) {
            wordArray = line.split("-");
            for (int j = 0; j < wordArray.length; j++) {
                rtn.append(wordArray[j]);
                if (j != (wordArray.length - 1)) {
                    rtn.append("\t");
                }
            }
            rtn.append("\n");
        }
        // 写入单个词语的数据
        for (String element : singleList) {
            rtn.append(element).append("\n");
        }

        // 将数据输出到文件
        migrateService.migrateFileByCodeAndString(MigrateCode.ELEMENT_CLASSIFY, rtn.toString());
    }


    public ReturnData importParent(MultipartFile file) throws Exception {

        // 根据property和keyword获取同义词列表
        Map<String, Object> param = new HashMap<>();
        param.put("checkFlag", BaseContants.FLAG_YES);
        param.put("category", "器官");
        List<VocabElementClassify> vocabElementClassifyList = vocabElementClassifyDao.retrieveList(param);
        List<String> elementList = new ArrayList<>();
        for (VocabElementClassify classify : vocabElementClassifyList) {
            elementList.add(classify.getName());
        }

        // 获取文件流，解析文件
        InputStream inputStream = file.getInputStream();

        Workbook workbook = new XSSFWorkbook(inputStream);
        Sheet sheet = workbook.getSheetAt(0);

        StringBuffer elementErr = new StringBuffer(); // 记录不在器官小词中的错误数据
        StringBuffer parentErr = new StringBuffer(); // 记录上级冲突的小词
        Map<String, String> parentMap = new HashMap<>();

        List<String> lineList = new ArrayList<>();
        String prev;
        StringBuffer line;
        for (Row row : sheet) {
            prev = "";
            line = new StringBuffer();
            for (Cell cell : row) {
                String element = cell.getStringCellValue().trim();

                if (!element.isEmpty()) {
                    // 校验小词是否在系统中
                    if (!elementList.contains(element)) {
                        elementErr.append(element);
                        elementErr.append(",");
                    } else {
                        if (!prev.isEmpty()) {
                            // 校验上级小词是否有冲突
                            if (parentMap.get(element) != null && !parentMap.get(element).equals(prev)) {
                                parentErr.append(prev);
                                parentErr.append(",");
                            } else {
                                parentMap.put(element, prev);
                            }
                        }
                        prev = element;
                    }

                    line.append(element);
                    line.append("-");
                }
            }
            if (line.length() > 1 && line.substring(0, line.length() - 1).indexOf("-") > -1) {
                lineList.add(line.toString());
            }
        }

        workbook.close();
        inputStream.close();

        String error = "";
        if (elementErr.length() > 0) {
            elementErr.insert(0, "以下小词不是器官词：");
            error += elementErr + "\n";
        }
        if (parentErr.length() > 0) {
            parentErr.insert(0, "以下小词有多个不同的上级：");
            error += parentErr + "\n";
        }
        if (!error.isEmpty()) {
            ReturnCode rc = ReturnCode.DYNAMIC_DESC_EXCEPTION;
            rc.setDesc(error);
            return ReturnUtil.fail(rc);
        }

        for (String element : parentMap.keySet()) {
            String parent = parentMap.get(element);
            vocabElementClassifyDao.updateParentByElement(element, parent);
        }

        vocabElementClassifyDao.deleteOrganLines();
        vocabElementClassifyDao.insertOrganLines(lineList);

        return ReturnUtil.success();
    }


    public ReturnData deleteClassify(String id) {
        // 判断该name是不是一个上级若为上级不能删除
        VocabElementClassify value = vocabElementClassifyDao.retrieveObjectById(Long.parseLong(id));
        if (value != null ) {
            if("器官".equals(value.getCategory())){
                Map<String, Object> param = new HashMap<>();
                param.put("parent",value.getName());
                List<VocabElementClassify> list = vocabElementClassifyDao.retrieveList(param);
                if(list != null && list.size()>0){
                    ReturnCode rc = ReturnCode.DYNAMIC_DESC_EXCEPTION;
                    rc.setDesc("该器官是一个一级！不允许删除");
                    return ReturnUtil.fail(rc);
                }
            }
            vocabElementClassifyDao.delete(value);
        }

        return ReturnUtil.success();


    }
}
