package com.samp.solr.file2solr;

import com.google.common.hash.BloomFilter;
import com.google.common.hash.Funnels;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.seg.common.Term;
import org.apache.commons.lang3.StringUtils;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;

public class DataExtract {

//    private static int size = 40000000;
//    private static BloomFilter<Integer> bloomFilter = BloomFilter.create(Funnels.integerFunnel(), size, 0.00000001);


    private static Segment segment= HanLP.newSegment().enableCustomDictionary(true);

    private static Map<String, String> synonymMap = new HashMap<>();

    private static void extract(String rawPath, String resultFilePath) throws Exception{
        Path filePath = Paths.get(rawPath);
        BufferedReader bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
        String line;
        Set<String> clidSet = new HashSet<>();
        List<String> resultList = new ArrayList<>();
        try {
            int index = 1;
            bufferReader.readLine();
            long start = System.currentTimeMillis();
            while( (line = bufferReader.readLine()) != null ){
                String[] words = line.split(",");
                String clid = words[0].trim();
//                Integer clid = Integer.valueOf(words[0]);
//                if( bloomFilter.mightContain(clid) ){
//                    continue;
//                }
//                bloomFilter.put(clid);
                if( clidSet.contains(clid) ){
                    continue;
                }
                clidSet.add(clid);
                String text = words[2];
                String semantic = words[3];
//                System.out.println(clid);
//                System.out.println(text);
//                System.out.println(semantic);
                //对文本进行分词
                String tokenText = tokenize(text);
                //如果分词语料为结果为空，则去掉
                if( StringUtils.isBlank(tokenText) ){
                    continue;
                }
                resultList.add(tokenText+"\t"+semantic);
//                if( index > 10 ){
//                    break;
//                }
                if( index % 10000 == 0 ){
                    long cost = System.currentTimeMillis()-start;
                    System.out.println("process 1000 lines.current index ="+index+", cost:"+cost);
                    start = System.currentTimeMillis();
                }
                index++;
            }
            saveResultToFile(resultList, resultFilePath);
            System.out.println("data process finished.");
        } catch (IOException e) {
            e.printStackTrace();
        }finally{
            bufferReader.close();
        }
        System.out.println("all done!");
    }

    public static String tokenize( String doc ){
        List<String> tokenizeList = new ArrayList<>() ;//对输入进行分词
//        Segment segment= HanLP.newSegment();
        List<Term> termList=segment.seg(doc);
//        CoreStopWordDictionary.apply(termList);
        for(Term term:termList){
            tokenizeList.add(term.toString().split("/")[0]);
        }
//        System.out.println( "分词结果：" + tokenizeList );
        //同义词替换
        tokenizeList = replaceSynonym(tokenizeList);
        return StringUtils.join(tokenizeList.iterator(), " ");
//        return tokenizeList;
    }

    private static void loCustomDict() {
        String rawPath = "D:\\gcc-omcp\\new\\nlp\\customercutfile.txt";
        String similarFile = "D:\\gcc-omcp\\new\\nlp\\similarwords.txt";
        Path filePath = Paths.get(rawPath);
        Path similarPath = Paths.get(similarFile);
        BufferedReader bufferReader = null;
        try {
            bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
            String line;
            int index = 1;
            while ((line = bufferReader.readLine()) != null) {
                CustomDictionary.add(line);
                index++;
            }
            System.out.println("加载自定义词个数："+index);
            // 同义词，要全部加到自定义词典中.
            bufferReader = Files.newBufferedReader(similarPath, StandardCharsets.UTF_8);
            index = 1;
            while ((line = bufferReader.readLine()) != null) {
                String[] splits = line.split(",");
                for( String word: splits ) {
                    CustomDictionary.add(word);
                    index++;
                }
            }
            System.out.println("同义词生成自定义个数："+index);
        }catch (IOException e) {
            e.printStackTrace();
        }finally{
            if( bufferReader != null ) {
                try {
                    bufferReader.close();
                }catch (Exception e1){
                    e1.printStackTrace();
                }
            }
        }
    }

    private static void loadStopWords() {
        String rawPath = "D:\\gcc-omcp\\new\\nlp\\customerstopfile_small.txt";
        Path filePath = Paths.get(rawPath);
        BufferedReader bufferReader = null;
        try {
            bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
            String line;
            int index = 1;
            while ((line = bufferReader.readLine()) != null) {
                CoreStopWordDictionary.add(line);
                index++;
            }
            System.out.println("加载停词个数："+index);
        }catch (IOException e) {
            e.printStackTrace();
        }finally{
            if( bufferReader != null ) {
                try {
                    bufferReader.close();
                }catch (Exception e1){
                    e1.printStackTrace();
                }
            }
        }
    }

    private static List<String> replaceSynonym(List<String> tokenizeList) {
        if( synonymMap == null || synonymMap.size() == 0 ){
            System.out.println("initial synonymMap");
            String rawPath = "D:\\gcc-omcp\\new\\nlp\\similarwords.txt";
            Path filePath = Paths.get(rawPath);
            BufferedReader bufferReader = null;
            try {
                bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
                String line;
                while ((line = bufferReader.readLine()) != null) {
                    String[] synonyms = line.split(",");
                    if( synonyms.length > 1 ){
                        for( int i = 1 ; i< synonyms.length ; i++ ){
                            synonymMap.put(synonyms[i], synonyms[0]);
                        }
                    }
                }
                System.out.println("加载同义词大小："+synonymMap.size());
            }catch (IOException e) {
                e.printStackTrace();
            }finally{
                if( bufferReader != null ) {
                    try {
                        bufferReader.close();
                    }catch (Exception e1){
                        e1.printStackTrace();
                    }
                }
            }
        }
        List<String> resultList = new ArrayList<>();
        if( tokenizeList != null && tokenizeList.size() > 0 ){
            for( int i = 0 ;i < tokenizeList.size(); i++ ){
                String synonymWord = synonymMap.get(tokenizeList.get(i));
                if( synonymWord != null ){
                    resultList.add(synonymWord);
                }else {
                    resultList.add(tokenizeList.get(i));
                }
            }
        }
        return resultList;
    }

    private static void saveResultToFile(List<String> resultList, String resultFilePath) throws Exception{
        BufferedWriter writer = null;
        try {
            Integer index = 0;
            writer = Files.newBufferedWriter(Paths.get(resultFilePath), StandardCharsets.UTF_8);
            for( String str: resultList ){
                writer.write(str);
                writer.newLine();
                index++;
            }
            System.out.println("save record: "+index);
            writer.flush();
        } catch (IOException e1) {
            e1.printStackTrace();
        }finally{
            if( writer != null ){
                writer.close();
            }
        }

    }

    /**
     * 将原始未分词，未进行任务处理的数据，转换成分词语料-意图的形式。
     * @param args
     */
    public static void main(String[] args) {
        loCustomDict();
//        loadStopWords();
        String sourceDir = "D:\\gcc-omcp\\new\\zjk\\";
        String distDir = "D:\\gcc-omcp\\new\\trans06\\";
        Path dir = Paths.get(sourceDir);
        //通过Files.newDirectoryStream(dir)来遍历文件夹
        try( DirectoryStream<Path> stream = Files.newDirectoryStream(dir) ){
            for(Path path : stream){
                if( !Files.isDirectory(path) ){
                    System.out.println("process........"+path.getFileName());
                    String rawPath = sourceDir+path.getFileName();
                    String outPath = distDir+path.getFileName();
                    extract(rawPath, outPath);
                    extract(rawPath, rawPath);
                }
            }
        }catch(Exception e){
            e.printStackTrace();
        }
//        String rawPath = "D:\\gcc-omcp\\new\\pro_test\\111.txt";
//        String outPath = "D:\\gcc-omcp\\new\\pro_test\\111_out.txt";
//        try {
//            extract(rawPath, outPath);
//        }catch (Exception e){
//            e.printStackTrace();
//        }
    }
}
