package com.samp.solr.file2solr;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.seg.common.Term;
import com.samp.util.CnStringUtils;
import com.samp.util.MapUtils;
import org.apache.commons.lang3.StringUtils;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;

public class ErrorIntentionExtract {

    private static Segment segment= HanLP.newSegment().enableCustomDictionary(true);

    private static Map<String, String> synonymMap = new HashMap<>();
//
    private static Map<String, Integer> synonymUserStatMap = new HashMap<>();

    private static Map<String,Map<String,Integer>> recordMap = new HashMap<>();

    private static Map<String, String> wordsTransMap = new HashMap<>();


    private static void extract(String rawPath, String resultFilePath,boolean addOriText) throws Exception{
        Path filePath = Paths.get(rawPath);
        BufferedReader bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
        String line;
        Set<String> clidSet = new HashSet<>();
        List<String> resultList = new ArrayList<>();
        try {
            int index = 1;
            bufferReader.readLine();
            long start = System.currentTimeMillis();
            while( (line = bufferReader.readLine()) != null ){
                String[] words = line.split(",");
                String clid = words[0].trim();
                if( clidSet.contains(clid) ){
                    continue;
                }
                clidSet.add(clid);
                String text = words[2];
                String semantic = words[3];
                //如果原始只有一个字，先过滤掉。
                if( text.trim().length() == 1 ){
                    continue;
                }
                //对文本进行分词
                String tokenText = tokenize(text);
                //如果分词语料为结果为空，则去掉
                if( StringUtils.isBlank(tokenText) ){
                    continue;
                }
                //如果语料只有一个字，去掉
                if( tokenText.trim().length() ==1 ){
//                    System.out.println("skip .."+line);
                    continue;
                }
                //如果全是英文，也去掉。
                if(CnStringUtils.isEnglish(tokenText) ){
//                    System.out.println("skip ...."+line);
                    continue;
                }
                if(addOriText){
                    resultList.add(tokenText + "\t" + removeLastSlot(semantic)+"\t"+text);
                }else {
                    resultList.add(tokenText + "\t" + removeLastSlot(semantic));
                }
//                通过意图，新增一条语料 -- 统一增加
                String newWords = intentionToWord(semantic);
                if( StringUtils.isNotBlank(newWords) ){
                    if(addOriText){
                        resultList.add(newWords + "\t" + removeLastSlot(semantic)+"\t"+newWords);
                    }else {
                        resultList.add(newWords + "\t" + removeLastSlot(semantic));
                    }
                }
                if( index % 10000 == 0 ){
                    long cost = System.currentTimeMillis()-start;
                    System.out.println("process 1000 lines.current index ="+index+", cost:"+cost);
                    start = System.currentTimeMillis();
                }
                index++;
            }
            saveResultToFile(resultList, resultFilePath);
            System.out.println("data process finished.");
        } catch (IOException e) {
            e.printStackTrace();
        }finally{
            bufferReader.close();
        }
        System.out.println("all done!");
    }

    public static String tokenize( String doc ){
        List<String> tokenizeList = new ArrayList<>() ;//对输入进行分词
        List<Term> termList=segment.seg(doc);
        CoreStopWordDictionary.apply(termList);
        for(Term term:termList){
            tokenizeList.add(term.toString().split("/")[0]);
        }
        //进行同义词替换
        tokenizeList = replaceSynonym(tokenizeList);
        tokenizeList = removeContinuousRepeatWord(tokenizeList);
        tokenizeList = removeABAB(tokenizeList);
        return StringUtils.join(tokenizeList.iterator(), " ");
    }

    public static List<String> replaceSynonym(List<String> tokenizeList) {
        if( synonymMap == null || synonymMap.size() == 0 ){
            System.out.println("initial synonymMap");
            String rawPath = "D:\\gcc-omcp\\new\\nlp\\similarwords.txt";
            Path filePath = Paths.get(rawPath);
            BufferedReader bufferReader = null;
            try {
                bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
                String line;
                Set<String> checkFile = new HashSet<>();
                while ((line = bufferReader.readLine()) != null) {
                    String[] synonyms = line.split(",");
                    if( !checkFile.contains(synonyms[0]) ){
                        checkFile.add(synonyms[0]);
                    }else {
                        System.out.println("error similarword:"+synonyms[0]);
                    }
                    if( synonyms.length > 1 ){
                        for( int i = 1 ; i< synonyms.length ; i++ ){
                            synonymMap.put(synonyms[i], synonyms[0]);
                            if( !checkFile.contains(synonyms[i]) ){
                                checkFile.add(synonyms[i]);
                            }else {
                                System.out.println("error similarword:"+synonyms[i]);
                            }
                        }
                    }
                }
                System.out.println("加载同义词大小："+synonymMap.size());
            }catch (IOException e) {
                e.printStackTrace();
            }finally{
                if( bufferReader != null ) {
                    try {
                        bufferReader.close();
                    }catch (Exception e1){
                        e1.printStackTrace();
                    }
                }
            }
        }
        List<String> resultList = new ArrayList<>();
        if( tokenizeList != null && tokenizeList.size() > 0 ){
            for( int i = 0 ;i < tokenizeList.size(); i++ ){
                String synonymWord = synonymMap.get(tokenizeList.get(i));
                if( synonymWord != null ){
                    resultList.add(synonymWord);
                    String key = synonymWord+"_"+tokenizeList.get(i);
                    if( synonymUserStatMap.get(key) == null ){
                        synonymUserStatMap.put(key, 0);
                    }
                    synonymUserStatMap.put(key,synonymUserStatMap.get(key)+1);
                }else {
                    resultList.add(tokenizeList.get(i));
                }
            }
        }
        return resultList;
    }

    public static void saveResultToFile(Collection<String> resultList, String resultFilePath) throws Exception{
        BufferedWriter writer = null;
        try {
            Integer index = 0;
            writer = Files.newBufferedWriter(Paths.get(resultFilePath), StandardCharsets.UTF_8);
            for( String str: resultList ){
                writer.write(str);
                writer.newLine();
                index++;
            }
            System.out.println("save record: "+index);
            writer.flush();
        } catch (IOException e1) {
            e1.printStackTrace();
        }finally{
            if( writer != null ){
                writer.close();
            }
        }

    }

    /**
     * 整合语料，去重生成一个总语料。。
     * @throws Exception
     */
    public static void staticWordsDistrib(String sourceDir, String outFileName, String stringFileName,
                                          String rightfile2, String totalFile) throws Exception{
        Path dir = Paths.get(sourceDir);
        //通过Files.newDirectoryStream(dir)来遍历文件夹
        try( DirectoryStream<Path> stream = Files.newDirectoryStream(dir) ){
            for(Path path : stream){
                if( !Files.isDirectory(path) ){
                    System.out.println("process........"+path.getFileName());
                    String rawPath = sourceDir+path.getFileName();
                    staticWordsDistribInFile(rawPath);
                }
            }
        }catch(Exception e){
            e.printStackTrace();
        }
        //将文件保存起来。
        BufferedWriter writer = null;
        BufferedWriter rightWriter = null;
        BufferedWriter rightWriter2 = null;
        List<String> rightList = new ArrayList<>(20000);
        List<String> rightList2 = new ArrayList<>(4000000);
        try {
            Integer index = 0;
            writer = Files.newBufferedWriter(Paths.get(outFileName), StandardCharsets.UTF_8);
            for( String key: recordMap.keySet() ){
                Map<String,Integer> data = recordMap.get(key);
                if( data.size() > 1 ){
                    // 找出其中正确的,占比大于80%，个数大于50
                    String right = choiceRight(key, data);
                    if( right == null ) {
                        Set<String> intentions = data.keySet();
                        for (String intention : intentions) {
                            writer.write(key + "\t" + intention + "\t" + data.get(intention));
                            writer.newLine();
                            index++;
                        }
                    }else{
                        rightList.add(key+"\t"+right);
                    }
                }else{
                    Set<String> intentions = data.keySet();
                    for (String intention : intentions) {
                        rightList2.add(key + "\t" + intention + "\t" + data.get(intention));
                    }
                }
            }
            System.out.println("save record: "+index);
            writer.flush();
            rightWriter = Files.newBufferedWriter(Paths.get(stringFileName), StandardCharsets.UTF_8);
            for( String rig : rightList ){
                rightWriter.write(rig);
                rightWriter.newLine();
            }
            rightWriter.flush();
            rightWriter2 = Files.newBufferedWriter(Paths.get(rightfile2), StandardCharsets.UTF_8);
            for( String rig : rightList2 ){
                rightWriter2.write(rig);
                rightWriter2.newLine();
            }
            rightWriter2.flush();
            rightWriter2 = Files.newBufferedWriter(Paths.get(totalFile), StandardCharsets.UTF_8);
            for( String rig : rightList ){
                rightWriter2.write(rig);
                rightWriter2.newLine();
            }
            for( String rig : rightList2 ){
                rightWriter2.write(rig);
                rightWriter2.newLine();
            }
            rightWriter2.flush();
        } catch (IOException e1) {
            e1.printStackTrace();
        }finally{
            if( writer != null ){
                writer.close();
            }
            if( rightWriter != null ){
                rightWriter.close();
            }
            if( rightWriter2 != null ){
                rightWriter2.close();
            }
        }
    }

    private static String choiceRight(String key, Map<String,Integer> data){
        Map<String,Integer> sortData = MapUtils.sortedByValue(data);
        Integer total = 0 ;
        String firstIntention = null ;
        Integer firstNum = 0;
        Set<String> intentions = data.keySet();
        for(String intention : intentions ){
//            if( "银行卡".equals(key) ){
//                System.out.println("intention="+intention);
//                System.out.println("count="+data.get(intention));
//            }
            if( data.get(intention) > firstNum ) {
                firstIntention = intention;
                firstNum =data.get(intention);
            }
            total += data.get(intention);
        }
//        if( "银行卡".equals(key) ){
//            System.out.println("firstIntention="+firstIntention);
//            System.out.println("firstNum="+firstNum);
//            System.out.println("total="+total);
//        }
        if( firstNum >= 50 && firstNum.floatValue()/total.floatValue() > 0.8 ){
            return firstIntention+"\t"+firstNum;
        }else {
            return null;
        }
    }


    public static String transferText2(String text){
        List<String> list = new ArrayList<>();
        for( String str : text.split(" ") ){
            list.add(str.trim());
        }
        Collections.sort(list);
        return StringUtils.join(list.iterator(), " ");
    }

    /**
     * 计算单个文件的语料分布情况。
     * @throws Exception
     */
    public static void staticWordsDistribInFile(String rawPath) throws Exception{
        Path filePath = Paths.get(rawPath);
        BufferedReader bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
        String line;
        try {
            int i = 0 ;
            while( (line = bufferReader.readLine()) != null ){
                String[] words = line.split("\t");
                //如果没有意图，直接剔除掉。
                if( words[1].indexOf("out-of-voca") > 0 ){
                    continue;
                }
                String transText = transferText2(words[0]);
                if( transText == null ){
                    continue;
                }
                if( "信用卡 信".equals(transText) ){
                    System.out.println(line);
                }
                if( wordsTransMap.get(transText ) == null ){
                    wordsTransMap.put(transText, words[0]);
                }
                if( recordMap.get(wordsTransMap.get(transText )) == null ){
                    recordMap.put(wordsTransMap.get(transText ), new HashMap<>());
                }
                Map<String,Integer> wordMap = recordMap.get(wordsTransMap.get(transText ));
                if( wordMap.get((words[1])) == null ){
                    wordMap.put(words[1], 0);
                }
                wordMap.put(words[1], (wordMap.get(words[1])+1));
                i++;
//                if( i > 20 ){
//                    break;
//                }
            }
            System.out.println("current size = "+recordMap.size());
        } catch (IOException e) {
            e.printStackTrace();
        }finally{
            bufferReader.close();
        }
    }

    /**
     * 去掉第四个槽位，并将execute替换为default
     * @param text
     * @return
     */
    public static String removeLastSlot(String text){
        text = text.replaceAll("execute","default");
        //@TODO 不去掉第四个槽位
//        if( "1".equals("1") ){
//            return text;
//        }
        String[] intentions = text.split("#");
        if( intentions.length < 4 ){
            return text;
        }
        String newIntention = intentions[0]+"#"+intentions[1]+"#"+intentions[2];
        return newIntention;
    }

    public static void loCustomDict() {
        String rawPath = "D:\\gcc-omcp\\new\\nlp\\customercutfile.txt";
        String similarFile = "D:\\gcc-omcp\\new\\nlp\\similarwords.txt";
        Path filePath = Paths.get(rawPath);
        Path similarPath = Paths.get(similarFile);
        BufferedReader bufferReader = null;
        try {
            bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
            String line;
            int index = 1;
            while ((line = bufferReader.readLine()) != null) {
                CustomDictionary.add(line);
                index++;
            }
            System.out.println("加载自定义词个数："+index);
            // 同义词，要全部加到自定义词典中.
            bufferReader = Files.newBufferedReader(similarPath, StandardCharsets.UTF_8);
            index = 1;
            while ((line = bufferReader.readLine()) != null) {
                String[] splits = line.split(",");
                for( String word: splits ) {
                    CustomDictionary.add(word);
                    index++;
                }
            }
            System.out.println("同义词生成自定义个数："+index);
        }catch (IOException e) {
            e.printStackTrace();
        }finally{
            if( bufferReader != null ) {
                try {
                    bufferReader.close();
                }catch (Exception e1){
                    e1.printStackTrace();
                }
            }
        }
    }

    public static void loadStopWords(boolean addUnderTopN) {
        String rawPath = "D:\\gcc-omcp\\new\\nlp\\customerstopfile_small.txt";
        Path filePath = Paths.get(rawPath);
        BufferedReader bufferReader = null;
        try {
            bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
            String line;
            int index = 1;
            while ((line = bufferReader.readLine()) != null) {
                CoreStopWordDictionary.add(line);
                index++;
            }
            System.out.println("加载停词个数："+index);
        }catch (IOException e) {
            e.printStackTrace();
        }finally{
            if( bufferReader != null ) {
                try {
                    bufferReader.close();
                }catch (Exception e1){
                    e1.printStackTrace();
                }
            }
        }
        if( addUnderTopN ) {
            loadFreqUnderTopNStopWords();
        }
    }

    /**
     * 增加词使用频率低于特定值的词为停用词。
     */
    public static void loadFreqUnderTopNStopWords(){
        String rawPath = "D:\\gcc-omcp\\trainning\\95511_stop_words_under_100.txt";
        Path filePath = Paths.get(rawPath);
        BufferedReader bufferReader = null;
        try {
            bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
            String line;
            int index = 1;
            while ((line = bufferReader.readLine()) != null) {
                CoreStopWordDictionary.add(line);
                index++;
            }
            System.out.println("underTopN加载停词个数："+index);
        }catch (IOException e) {
            e.printStackTrace();
        }finally{
            if( bufferReader != null ) {
                try {
                    bufferReader.close();
                }catch (Exception e1){
                    e1.printStackTrace();
                }
            }
        }
    }

    public static void checkFile(String rawFileName) throws Exception{
        Path filePath = Paths.get(rawFileName);
        BufferedReader bufferReader = null;
        try {
            bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
            String line;
            int count = 0;
            Set<String> set = new HashSet<>();
            Set<String> testSet = new HashSet<>();
//            testSet.add(transferText2("转账 证券 转 银行"));
//            testSet.add(transferText2("转账 证券 转 银行 银行"));
//            testSet.add(transferText2("转账 整存 整取 转 活期 银行"));
//            testSet.add(transferText2("转账 通知 存款 转 活期 银行"));
            while ((line = bufferReader.readLine()) != null) {
                String[] splits = line.split("\t");
                if( splits.length == 3 ) {
                    String trans = transferText2(splits[0]);
                    if( testSet.contains(trans) ){
                        System.out.println("hehe:"+line);
                    }
                    if( set.contains(trans) ){
                        System.out.println("error:"+splits[0]+"="+line);

                    }else{
                        set.add(trans);
                    }
                    Integer currentCount = Integer.valueOf(splits[2].trim());
                    count += currentCount;
                }else{
                    System.out.println("error data :"+line);
                }
            }
            System.out.println("总记录数："+count);
        }catch (IOException e) {
            e.printStackTrace();
        }finally{
            if( bufferReader != null ) {
                try {
                    bufferReader.close();
                }catch (Exception e1){
                    e1.printStackTrace();
                }
            }
        }
    }

    public static void printSynStatis(String outFileName) throws Exception{
        Map<String,Integer> map = MapUtils.sortedByValue(synonymUserStatMap);
        BufferedWriter writer = null;
        try {
            writer = Files.newBufferedWriter(Paths.get(outFileName), StandardCharsets.UTF_8);
            Set<Map.Entry<String, Integer>> entrys = map.entrySet();
            for( Map.Entry<String, Integer> entry: entrys ){
                StringBuilder line = new StringBuilder();
                line.append(entry.getKey());
                line.append("\t");
                line.append(entry.getValue());
                writer.write(line.toString());
                writer.newLine();
            }
            writer.flush();
        }finally{
            if( writer != null ){
                writer.close();
            }
        }
    }

    /**
     * 去掉语料中连续重复的部分。
     * @param tokenizeList
     * @return
     */
    public static List<String> removeContinuousRepeatWord(List<String> tokenizeList){
        if( tokenizeList.size() == 1 ){
            return tokenizeList;
        }
        List<String> noRepeatList = new ArrayList<>();
        for( String word: tokenizeList ){
            if( !noRepeatList.contains(word) ){
                noRepeatList.add(word);
            }else if( !noRepeatList.get(noRepeatList.size()-1).equals(word) ){
                noRepeatList.add(word);
            }
        }
        return noRepeatList;
    }


    private static List<String> removeABAB(List<String> tokenizeList){
        if( tokenizeList.size() <= 3 ){
            return tokenizeList;
        }
        List<String> resultList = new ArrayList<>();
        for(String word: tokenizeList){
            addElementRemoveABAB(resultList,word);
        }
        return resultList;
    }

    private static List<String> addElementRemoveABAB(List<String> list, String word){
        if( list.size() < 3 ){
            list.add(word);
            return list;
        }
        int i = list.size()-1;
        if( word.equals(list.get(i-1))
                && list.get(i).equals(list.get(i-2)) ){
            list.remove(i);
        }else {
            list.add(word);
        }
        return list;
    }
    /**
     * 将意图转换为一条新的语料.
     * @param rawIntention
     * @return
     */
    public static String intentionToWord(String rawIntention){
        List<String> list = new ArrayList<>();
        String[] intentionSplits = rawIntention.trim().split("#");
        for( int i = 0 ;i < intentionSplits.length ; i++ ){
            String intention = intentionSplits[i];
            String[] details = intention.split("=");
//            System.out.println(rawIntention);
            if( details.length < 2 ){
                return null;
            }
            if(  !"default".equals(details[1]) && !"execute".equals(details[1])
                    && !"chat".equals(details[1]) && !"集外".equals(details[1]) ){
                list.add(tokenize(details[1]));
            }
        }
        return StringUtils.join(list.iterator(), " ");
    }

    public static void main(String[] args) {
//        boolean addUnderTopN = true;
        loCustomDict();
//        loadStopWords(addUnderTopN);
//        String sourceDir = "D:\\gcc-omcp\\new\\zjk\\";
//        String distDir = "D:\\gcc-omcp\\new\\trans_1_20\\";
//        Path dir = Paths.get(sourceDir);
//        boolean addOriText = false;
//        List<String> testFileList = new ArrayList<>();
//        testFileList.add("20180923.csv");
//        testFileList.add("20180926.csv");
//        testFileList.add("20181003.csv");
//        testFileList.add("20181007.csv");
//        testFileList.add("20181015.csv");
//        testFileList.add("20181019.csv");
//        testFileList.add("20181022.csv");
//        testFileList.add("20181030.csv");
//        //通过Files.newDirectoryStream(dir)来遍历文件夹
//        try( DirectoryStream<Path> stream = Files.newDirectoryStream(dir) ){
//            for(Path path : stream){
//                if( !Files.isDirectory(path) ){
//                    System.out.println("process........"+path.getFileName());
//                    if( testFileList.contains(path.getFileName().toString()) ){
//                        System.out.println("test file, continue....................");
//                        continue;
//                    }
//                    String rawPath = sourceDir+path.getFileName();
//                    String outPath = distDir+path.getFileName();
//                    extract(rawPath, outPath, addOriText);
//                }
//            }
//        }catch(Exception e){
//            e.printStackTrace();
//        }
////
        //打印同义词使用情况
//        try {
//            String outDir = "D:\\gcc-omcp\\new\\data\\";
//            printSynStatis(outDir+"018_synStatics.txt");
////            String words = "查询 查询 查哨 查哨 查哨 信 查询 信 信";
////            String[] wordArray = words.split(" ");
////            List list = Arrays.asList(wordArray);
////            list = removeContinuousRepeatWord(list);
////            String result = StringUtils.join(list.iterator(), " ");
////            System.out.println(result);
//        }catch (Exception e){
//            e.printStackTrace();
//        }

        try{
            String sourceDir = "D:\\gcc-omcp\\new\\trans_1_20\\";
            String outDir = "D:\\gcc-omcp\\new\\data\\";
            String outFileName = "027_total_words_error_all.txt";
            String rightFileName = "027_total_words_right_all.txt";
            String rightFileName2 = "027_total_words_right_all2.txt";
            String totalFile = "027_total_words.txt";
            staticWordsDistrib(sourceDir, outDir+outFileName, outDir+rightFileName,
                    outDir+rightFileName2,outDir+totalFile);
            checkFile(outDir+totalFile);
        }catch (Exception e){
            e.printStackTrace();
        }
//

//        String intention = "action.name=电话号码#target.name=结束语";
////        String c = removeLastSlot(intention);
////        System.out.println(c);
//        String d  = intentionToWord(intention);
//        System.out.println(d);
    }
}
