package cn.com.cfae.iras.doc.analyze.pdf.training;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.corpus.io.IOUtil;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary;
import com.hankcs.hanlp.mining.word2vec.DocVectorModel;
import com.hankcs.hanlp.mining.word2vec.Word2VecTrainer;
import com.hankcs.hanlp.mining.word2vec.WordVectorModel;
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.SpeedTokenizer;
import org.apache.commons.lang.StringUtils;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.text.PDFTextStripper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.io.*;
import java.util.*;

/**
 * Created by zouxun on 2018/10/28
 */
@Service
public class PDFTrainingServiceImpl implements PDFTrainingService {

    private static Logger logger = LoggerFactory.getLogger(PDFTrainingServiceImpl.class);

    @Value("${pdf.file.path}")
    private String PDF_FILE_PATH;
    @Value("${train.file.name}")
    private String TRAIN_FILE_NAME;
    @Value("${model.file.name}")
    private String MODEL_FILE_NAME;
    @Value("${deactivate.file.name}")
    private String deactivateFileName;
    private static WordVectorModel newWordVectorModel = null;
    private static WordVectorModel oldWordVectorModel = null;

//    private static final String NEW_MODEL_FILE_NAME = "G:\\work\\training\\word-vector_model.txt";
    private static final String NEW_MODEL_FILE_NAME = "/data/Training/pdf/word-vector_model.txt";

//    private static final String OLD_MODEL_FILE_NAME = "G:\\work\\training\\word-vector_model.txt";
    private static final String OLD_MODEL_FILE_NAME = "/data/Training/pdf/word-vector_model_old.txt";

    static {
        try {
            long t1 = System.currentTimeMillis();
            logger.info("开始加载训练模型......");

            Properties properties = System.getProperties();

            String loadSwitch=properties.getProperty("loadSwitch");

            if(StringUtils.isNotEmpty(loadSwitch)){
                if(loadSwitch.equals("true")){
                    if(new File(NEW_MODEL_FILE_NAME).exists()){
                        newWordVectorModel = new WordVectorModel(NEW_MODEL_FILE_NAME);
                    }
                    if(new File(OLD_MODEL_FILE_NAME).exists()){
                        oldWordVectorModel = new WordVectorModel(OLD_MODEL_FILE_NAME);
                    }
                }
            }
            long t2 = System.currentTimeMillis();
            logger.info("加载训练模型完成，耗时：{}毫秒。", (t2 - t1));
        } catch (IOException e) {
            logger.error(e.getMessage(), e);
        }
    }


    @Override
    public void PDFTraining() {
        try{
            logger.info("开始读取PDF文件,PDF源文件目录为：{}",PDF_FILE_PATH);

            File pdfSourceDir = new File(PDF_FILE_PATH);

            File[] pdfFileGroupDir = pdfSourceDir.listFiles(new FilenameFilter() {
                @Override
                public boolean accept(File dir, String name) {
                    return new File(dir,name).isDirectory();
                }
            });


//            CustomDictionary.add("负债总计");
//            CustomDictionary.add("负债合计");
//            CustomDictionary.add("总负债");
//            CustomDictionary.add("资产总计");
//            CustomDictionary.add("资产合计");
//            CustomDictionary.add("总资产");
//            CustomDictionary.add("营业收入");
//            CustomDictionary.add("营业总收入");
//            CustomDictionary.add("流动负债合计");
//            CustomDictionary.add("负债合计");

            BufferedReader br = new BufferedReader(new FileReader(deactivateFileName));
            String line = null;
            while((line=br.readLine()) !=null){
                CustomDictionary.add(line);
            }
            br.close();

            for(File pdfFileGroup:pdfFileGroupDir){
                logger.info("当前操作路径：{}，操作目录：{}",pdfFileGroup.getAbsolutePath(),pdfFileGroup.getName());

                File[] pdfFileArray = pdfFileGroup.listFiles(new FilenameFilter() {
                    @Override
                    public boolean accept(File dir, String name) {
                        return name.endsWith(".pdf");
                    }
                });

                File targetFile = new File(TRAIN_FILE_NAME);
                if(!targetFile.exists()){
                    targetFile.createNewFile();
                }

                BufferedWriter output = new BufferedWriter(new FileWriter(targetFile,true));

                for(File pdfFile:pdfFileArray){
                    logger.info("当前操作的PDF文件是：{}，当前文件路径：{}",pdfFile.getName(),pdfFile.getAbsolutePath());
                    long t1=System.currentTimeMillis();
                    PDDocument document = PDDocument.load(pdfFile);
                    PDFTextStripper stripper = new  PDFTextStripper();
//                    stripper.setStartPage(1);
//                    stripper.setEndPage(1);
                    String result=stripper.getText(document);
                    document.close();
                    String[] resultArray = result.split("\n");
                    HanLP.Config.ShowTermNature = false;

                    for(String str:resultArray){
                        str = str.replaceAll("\\s{0,}","");

                        if(str.contains("[")){
                            str=str.replace("[","");
                        }
                        if(str.contains("]")){
                            str=str.replace("]","");
                        }
                        if(str.contains("-")){
                            str=str.replace("-","");
                        }
//                        if(str.contains(" ")){
//                            str=str.replace(" ","");
//                        }

                        str = str.replaceAll("[+|：|:|（|）|(|)|《|》|<|>|“|”|，|,|、|。|【|】|%|×|〔|〕|/|；|;|.|0|1|2|3|4|5|6|7|8|9|－|‰|*|％]"," ");

                        str = str.replaceAll("\\s{1,}","");

                        SpeedTokenizer.SEGMENT.enableCustomDictionaryForcing(true);
                        List<Term> terms = SpeedTokenizer.segment(str);
//                        List<Term> terms = HanLP.segment(str);
                        CoreStopWordDictionary.apply(terms);
                        for(Term term:terms){
                            output.write(term.word);
                            output.write(" ");
                        }
                        output.newLine();
                        output.flush();
                    }
                    long t2 = System.currentTimeMillis();
                    logger.info("耗时{}毫秒，PDF文件 {} 切词完成",(t2-t1),pdfFile.getName());
                }
                output.close();
            }

        }catch (Exception e){
            logger.error(e.getMessage(),e);
        }

    }

    @Override
    public WordVectorModel wordVectorModelTraining() {
        if (!IOUtil.isFileExisted(MODEL_FILE_NAME)) {
            if (!IOUtil.isFileExisted(TRAIN_FILE_NAME)) {
                logger.info("语料不存在，请阅读文档了解语料获取与格式：https://github.com/hankcs/HanLP/wiki/word2vec");
                System.exit(1);
            }
            Word2VecTrainer trainerBuilder = new Word2VecTrainer();
            return trainerBuilder.train(TRAIN_FILE_NAME, MODEL_FILE_NAME);
        }

        return null;
    }

    @Override
    public Map<String, String> similarityAnalyze(Map params) {
        Map<String,String> result = new HashMap<>();

        DocVectorModel docVectorModel = new DocVectorModel(newWordVectorModel);
        logger.info("Map：{}",params);
        String a = (String) params.get("a");
        String b = (String) params.get("b");
        Float num = docVectorModel.similarity(a, b);
        logger.info("相似度：{}",num);
        result.put("new_similarity",String.valueOf(num));

        List<Map<String,String>> new_a_word_group = getWordGroup(newWordVectorModel,a);
        result.put("new_a_word_group",new_a_word_group.toString());

        List<Map<String,String>> new_b_word_group = getWordGroup(newWordVectorModel,b);
        result.put("new_b_word_group",new_b_word_group.toString());

        docVectorModel = new DocVectorModel(oldWordVectorModel);
        num = docVectorModel.similarity(a, b);

        result.put("old_similarity",String.valueOf(num));

        List<Map<String,String>> old_a_word_group = getWordGroup(oldWordVectorModel,a);
        result.put("old_a_word_group",old_a_word_group.toString());

        List<Map<String,String>> old_b_word_group = getWordGroup(oldWordVectorModel,b);
        result.put("old_b_word_group",old_b_word_group.toString());

        return result;
    }

    private List<Map<String,String>> getWordGroup(WordVectorModel wordVectorModel,String word){
        List<Map<String,String>> wordGroupList = new ArrayList<>();
        for(Map.Entry<String, Float> entry : wordVectorModel.nearest(word)){
            Map<String,String> map = new HashMap<>();
            map.put(entry.getKey(),entry.getValue().toString());
            wordGroupList.add(map);
        }
        return wordGroupList;
    }

}
