package com.netease.controller;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.netease.entity.DataElementTable;
import com.netease.entity.IndexDictionary;
import com.netease.entity.Word;
import com.netease.service.impl.StandFieldServiceImpl;
import com.netease.utils.*;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import io.swagger.annotations.ApiParam;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;

import java.io.*;
import java.util.ArrayList;
import java.util.List;

@Api(value = "标准字段接口", tags = {"标准字段相关接口"})
@RequestMapping("standField")
@RestController
public class StandFieldGenerationController {
    private static final Logger logger = LoggerFactory.getLogger(StandFieldGenerationController.class);
    private static final String FILE_PATH = "/root" + File.separator +"dataElement.csv";
    private JiebaSegmenter segmenter = new JiebaSegmenter();

    @Autowired
    private StandFieldServiceImpl standFieldService;

    @ApiOperation(value = "标准字段生成", notes = "请输入要生产的标准字段中文注释，多个标准字段注释请用空格符进行分割，例如：数据仓库标准 词频统计", httpMethod = "GET")
    @GetMapping("/generate/{text}")
    public String generate(@ApiParam(name = "text", value = "字段注释", required = true)@PathVariable String text) {

        //加载用户词典
        FileLoadUtils.userDictLoad();

        if (text.length() <= 0) {
            JsonResult.errorMsg("请输入要生成的标准字段注释！");
        }

        //按照空格符号切分
        String[] fields = text.split(" ");

        IndexDictionary indicators = null;
        StringBuilder result = new StringBuilder();

        for (int i = 0; i < fields.length; i++) {
            //初始化分词参考StringBuilder
            StringBuilder wordSgmntRfrnc = new StringBuilder();
            //初始化标准英文名称StringBuilder
            StringBuilder stdEngName = new StringBuilder();
            //创建IndexDictionary对象
            indicators = new IndexDictionary();
            //设置中文标准字段至IndexDictionary对象
            indicators.setIndexName(fields[i]);
            //对中文标准字段进行中文分词&翻译&缩写，将结果封装至Word对象中
            ArrayList<Word> words = standFieldService.builderStandField(fields[i], segmenter);
            //设置词根至IndexDictionary对象
            indicators.setWords(words);
            for (int i1 = 0; i1 < words.size(); i1++) {
                //单个词根英文
                StringBuilder rootName = new StringBuilder();
                //对词根翻译缩写进行|分割
                String[] engAbbreviations = words.get(i1).getEngAbbreviation().split("\\|");
                for (int i2 = 0; i2 < engAbbreviations.length; i2++) {
                    //判断英文简写是否是词组组成
                    if (!StringUtils.isPhrase(engAbbreviations[i2])) {
                        rootName.append(engAbbreviations[i2]);
                        break;
                    } else {
                        //词根英文包含空格按照空格切分处理
                        String[] s = engAbbreviations[i2].split(" ");
                        //基于空格切分出来的结果集用下划线进行拼接
                        for (int i3 = 0; i3 < s.length; i3++) {
                            rootName.append(s[i3]);
                            if (i3 != s.length - 1) {
                                rootName.append("_");
                            }
                        }
                        break;
                    }
                }
                stdEngName.append(rootName);
                //拼接分词参考字符串
                wordSgmntRfrnc.append(words.get(i1).getName()).append(":").append(words.get(i1).getEngAbbreviation());
                if (i1 < words.size() - 1 && !StringUtils.isEnglish(text)) {
                    stdEngName.append("_");
                    wordSgmntRfrnc.append(",");
                }

                //打印Word对象
                logger.info(words.get(i1).toString());
            }
            //设置数据元名称
            indicators.setDataElementName(stdEngName.toString());
            //拼接结果字符串
            result.append(fields[i])
                    .append("|")
                    .append(wordSgmntRfrnc)
                    .append("|")
                    .append(stdEngName);
            if (i < fields.length){
                result.append("<BR />");
            }
        }

        return result.toString();
    }

    @ApiOperation(value = "标准字段批量生成", notes = "标准字段批量生成", httpMethod = "POST")
    @PostMapping("/generate")
    public JsonResult generateByFile(@RequestParam("csvFile") MultipartFile file) {
        StringBuilder stringBuilder = new StringBuilder();

        // 保存拆分后原始字词的集合
        List<String> originalWords = CsvUtils.readCsvFile(file);

        if(originalWords.size()==0 && originalWords.isEmpty()){
            // 字词为空可能文件内容为空
            return JsonResult.errorMsg(null);
        }

        // 翻译字词
        for (String ow : originalWords) {
            StringBuilder sb1 = new StringBuilder();
            //TODO 调用服务层进行中文分词&翻译&简写&最终下划线拼接
            ArrayList<Word> word = standFieldService.builderStandField(ow, segmenter);
            for (int i1 = 0; i1 < word.size(); i1++) {
                StringBuilder sb = new StringBuilder();
                String[] ss = word.get(i1).getEngAbbreviation().split("\\|");
                for (int i2 = 0; i2 < ss.length; i2++) {
                    //判断英文简写是否是词组组成
                    if (!StringUtils.isPhrase(ss[i2])) {
                        sb.append(ss[i2]);
                        break;
                    } else {
                        String[] s = ss[i2].split(" ");
                        for (int i3 = 0; i3 < s.length; i3++) {
                            sb.append(s[i3]);
                            if (i3 != s.length - 1) {
                                sb.append("_");
                            }
                        }
                        break;
                    }
                }
                if(!sb1.toString().contains(sb.toString())){
                    // 避免重复字符串拼接（粗粒度判断）
                    sb1.append(sb);
                    if (i1 < word.size() - 1 && !StringUtils.isEnglish(ow)) {
                        sb1.append("_");
                    }
                }
                logger.info(word.get(i1).toString());
            }
            stringBuilder.append(ow + "," + sb1)
                    .append(System.lineSeparator());
        }

        try{
            File downFile = new File(FILE_PATH);
            // the true will append the new data
            FileWriter fw = new FileWriter(downFile, true);
            fw.write(stringBuilder.toString());
            fw.close();
        }catch (Exception e){
            e.printStackTrace();
        }

        return JsonResult.ok("已完成！");
    }

    @ApiOperation(value = "表名&数据元&词根&翻译生成接口", notes = "表名&数据元&词根&翻译生成接口", httpMethod = "POST")
    @PostMapping("/generateRoot")
    public JsonResult generateRootByCSV(@RequestParam("csvFile") MultipartFile csvFile) {
        // 保存拆分后原始字词的集合
        List<DataElementTable> originalWords = CsvUtils.readCsvFile1(csvFile);

        if(originalWords.size()==0 && originalWords.isEmpty()){
            // 字词为空可能文件内容为空
            return JsonResult.errorMsg(null);
        }

        StringBuilder sb = new StringBuilder();

        //初步核实填写人名称
        for (DataElementTable dataElementTable : originalWords) {
            List<DataElementTable> dataElementTables = standFieldService.segmentWordTranslation(dataElementTable, segmenter);
            for (DataElementTable elementTable : dataElementTables) {
                System.out.println(dataElementTable);
                List<Word> words = elementTable.getWords();
                for (int i = 0; i < words.size(); i++) {
                    sb.append(elementTable.getTableName() + "," + elementTable.getFieldName() + "," + words.get(i).getName() + "," + words.get(i).getEngName() + "," + words.get(i).getEngAbbreviation());
                    if (i < words.size()){
                        //\n
                        sb.append("&");
                    }
                }
            }
        }

        return JsonResult.ok(sb.toString());
    }

}
