package cn.com.cfae.iras.doc.analyze.parse.tokenizer;

import cn.com.cfae.iras.doc.analyze.parse.kv.KVText;
import cn.com.cfae.iras.doc.analyze.parse.kv.TextLayout;
import cn.com.cfae.iras.doc.analyze.parse.model.ExtractBizModel;
import cn.com.cfae.iras.doc.analyze.parse.model.ExtractItem;
import cn.com.cfae.iras.doc.analyze.parse.model.WordItem;
import com.hankcs.hanlp.corpus.document.sentence.Sentence;
import com.hankcs.hanlp.corpus.document.sentence.word.IWord;
import com.hankcs.hanlp.tokenizer.NLPTokenizer;
import org.apache.commons.lang3.ArrayUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * 授信
 */
public class SXTokenizer extends BaseTokenizer {

    private static SXTokenizer instance = new SXTokenizer();


    public static SXTokenizer getInstance() {
        return instance;
    }

    private SXTokenizer() {
    }

    private static Logger logger = LoggerFactory.getLogger(SXTokenizer.class);

    public void splitWords5(ExtractBizModel extractBizModel, String text) {
        logger.info("开始分析授信业务模型......", extractBizModel.getModelName());
        long t1 = System.currentTimeMillis();
        Sentence sentence = NLPTokenizer.ANALYZER.analyze(text);
        ExtractItem extractItem = null;
        List<IWord> wordList = sentence.wordList;
        IWord iWord = null;
        Map<String, ExtractItem> extractItemMap = new HashMap<>();
        logger.info("开始分析授信业务模型，文本：{}，切词列表：{}。", extractBizModel.getModelName(), text, wordList);
        int offset = -1;
        for (int i = 0; i < wordList.size(); i++) {
            iWord = wordList.get(i);
            String word = iWord.getValue();
            if (extractBizModel.getSynonyms().contains(word)) {
                offset = i;
                extractItem = extractBizModel.getExtractItemBySynonym(word);
                if (extractItem == null) {
                    continue;
                }
                if (!extractItemMap.containsKey(extractItem.getItemName())) {
                    extractItemMap.put(extractItem.getItemName(), extractItem);
                    extractItem.setItemValue(iWord.getValue());
                    logger.info("分析授信业务模型，分析定位到业务模型数据，业务指标名称：{}，ItemName：{}，索引位置：{}。", extractBizModel.getModelName(), word, extractItem.getItemName(), offset);
                    continue;
                }
            }
        }

        if (offset < 0) {
            return;
        }
        List<WordItem> wordItemList = null;
        for (Iterator<String> it = extractItemMap.keySet().iterator(); it.hasNext(); ) {
            wordItemList = new ArrayList<>();
            String key = it.next();
            ExtractItem var1 = extractItemMap.get(key);
            String prefixRegex = null;
            if (var1.getItemName().equalsIgnoreCase("SXZE")) {
                prefixRegex = "(("+joinSynonyms(extractItem.getSynonyms())+")|((([0-9]{0,3}\\,{1,}){0,}([0-9]{1,3}){1,}\\.{0,}[0-9]{0,2})\\s{0,}(亿元|万元){1}))";
//                prefixRegex = "((授信额度|授信总额|授信总额度|授信限额|银行授信)|((([0-9]{0,3}\\,{1,}){0,}([0-9]{1,3}){1,}\\.{0,}[0-9]{0,2})\\s{0,}(亿元|万元){1}))";
            } else if (var1.getItemName().equalsIgnoreCase("YSYSX")) {
                prefixRegex = "(("+joinSynonyms(extractItem.getSynonyms())+")|((([0-9]{0,3}\\,{1,}){0,}([0-9]{1,3}){1,}\\.{0,}[0-9]{0,2})\\s{0,}(亿元|万元){1}))";
//                prefixRegex = "((已使用授信|借款已使用|已使用额度)|((([0-9]{0,3}\\,{1,}){0,}([0-9]{1,3}){1,}\\.{0,}[0-9]{0,2})\\s{0,}(亿元|万元){1}))";

            }
            Pattern pattern = Pattern.compile(prefixRegex);
            Matcher matcher = pattern.matcher(text);
            WordItem wordItem;
            while (matcher.find()) {
                wordItem = new WordItem();
                wordItem.setValue(matcher.group());
                wordItem.setOffset(matcher.start());
                wordItemList.add(wordItem);
            }

            Collections.sort(wordItemList);
            logger.info("分析授信业务模型，输出提取的业务指标内容，extractItem：{}，指标名称：{}。", var1.getItemName(), var1.getItemValue());
            wordItemList.forEach(e -> {
                logger.info("分析授信业务模型，分析定位到业务模型数据文本信息，分析文本：{}，偏移位置：{}。", e.getValue(), e.getOffset());
            });

            for (int i = 0; i < wordItemList.size(); i++) {
                WordItem wordItem1 = wordItemList.get(i);
                if (ArrayUtils.contains(var1.getSynonyms(), wordItem1.getValue())) {
                    KVText kText = var1.getKey();
                    if (var1.getKey() == null) {
                        kText = new KVText();
                        TextLayout kTextLayout = new TextLayout();
                        var1.setKey(kText);
                        kText.add(kTextLayout);
                    }
                    kText.setText(wordItem1.getValue());
                    var1.getKey().getTextLayout().setText(var1.getKey().getText());
                }

                if (isAmt(wordItem1.getValue())) {
                    if(var1.getKey()==null){
                        continue;
                    }
                    KVText vText = new KVText();
                    vText.setText(wordItem1.getValue());
                    TextLayout vTextLayout = new TextLayout();
                    vTextLayout.setText(vText.getText());
                    var1.setValue(vText);
                    vText.add(vTextLayout);
                    break;
                }
            }
        }
        long t2 = System.currentTimeMillis();
        logger.info("分析授信业务模型完成，耗时：{}毫秒。", (t2 - t1));
    }

    public boolean splitWords(ExtractItem extractItem, String text) {
        Sentence sentence = NLPTokenizer.ANALYZER.analyze(text);
        List<IWord> wordList = sentence.wordList;
        logger.info("开始分析抽取授信业务模型数据，指标名称：{}，文本：{}，分词列表：{}。", extractItem.getItemName(), text, wordList);
        IWord iWord = null;
        int offset = -1;
        boolean isCandicate = false;
        for (int i = 0; i < wordList.size(); i++) {
            iWord = wordList.get(i);
            String word = iWord.getValue();
            if (ArrayUtils.contains(extractItem.getSynonyms(), word.trim())) {
                KVText kText = new KVText();
                kText.setText(iWord.getValue());
                extractItem.setKey(kText);
                isCandicate = true;
                logger.info("抽取授信业务模型数据，分析到抽取授信业务模型数，指标名称：{}，文本内容：{}，索引位置：{}。", extractItem.getItemName(), word, offset);
                break;
            }
        }
        return isCandicate;
    }

    /**
     * 授信总额
     *
     * @param extractItem
     * @param extractText
     * @return
     */
    public boolean extractSX_SXZE(ExtractItem extractItem, String extractText) {
        Sentence sentence = NLPTokenizer.ANALYZER.analyze(replaceAmtComma(extractText));
        List<IWord> wordList = sentence.wordList;
        logger.info("开始分析抽取授信业务模型数据，指标名称：{}，文本：{}，分词列表：{}。", extractItem.getItemName(), extractText, wordList);
        IWord iWord = null;
        int offset = -1;
        for (int i = 0; i < wordList.size(); i++) {
            iWord = wordList.get(i);
            String word = iWord.getValue();
            if (ArrayUtils.contains(extractItem.getSynonyms(), word)) {
                KVText kText = null;
                if (extractItem.getKey() == null) {
                    kText = new KVText();
                    extractItem.setKey(kText);
                } else {
                    kText = extractItem.getKey();
                }
                offset = i;
                kText.setText(iWord.getValue());
                logger.info("抽取授信业务模型数据，分析到抽取授信业务模型数，指标名称：{}，文本内容：{}，索引位置：{}。", extractItem.getItemName(), word, offset);
                break;
            }
        }

        if (offset == -1) {
            return false;
        }
        boolean isCandicate = false;
        for (int i = offset + 1; i < wordList.size(); i++) {
            IWord var1 = wordList.get(i);
            if (isSX_SXZE(var1.getValue().trim())) {
                KVText vText = new KVText();
                vText.setText(var1.getValue());
                extractItem.setValue(vText);
                isCandicate = true;
                break;
            }
        }
        return isCandicate;
    }

    /**
     * 已使用授信
     *
     * @param extractItem
     * @param extractText
     * @return
     */
    public boolean extractSX_YSYSX(ExtractItem extractItem, String extractText) {
        Sentence sentence = NLPTokenizer.ANALYZER.analyze(replaceAmtComma(extractText));
        List<IWord> wordList = sentence.wordList;
        logger.info("开始分析抽取授信业务模型数据，指标名称：{}，文本：{}，分词列表：{}。", extractItem.getItemName(), extractText, wordList);
        IWord iWord = null;
        int offset = -1;
        for (int i = 0; i < wordList.size(); i++) {
            iWord = wordList.get(i);
            String word = iWord.getValue();
            if (ArrayUtils.contains(extractItem.getSynonyms(), word)) {
                KVText kText = null;
                if (extractItem.getKey() == null) {
                    kText = new KVText();
                    extractItem.setKey(kText);
                } else {
                    kText = extractItem.getKey();
                }
                offset = i;
                kText.setText(iWord.getValue());
                logger.info("抽取授信业务模型数据，指标名称：{}，分析到抽取授信业务模型数，文本内容：{}，索引位置：{}。", extractItem.getItemName(), word, offset);
                break;
            }
        }

        if (offset == -1) {
            return false;
        }
        boolean isCandicate = false;
        for (int i = offset + 1; i < wordList.size(); i++) {
            IWord var1 = wordList.get(i);
            if (isSX_YSYSX(var1.getValue().trim())) {
                KVText vText = new KVText();
                vText.setText(var1.getValue());
                extractItem.setValue(vText);
                isCandicate = true;
                break;
            }
        }
        return isCandicate;
    }
}
