package com.samp.solr.solrlstm;


import com.samp.util.FilesUtils;
import com.samp.util.MapUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.common.SolrInputDocument;

import java.io.BufferedReader;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;

/**
 * 从原始文件中生成solr所需要的文件格式，并导入到solr.
 */
public class GenerateSolrData {
    private static void generateSolrData(String rawPath, String customPath,String synonymPath,String stopwodsPath,
                                         String outPath) throws Exception{
        BufferedReader bufferReader = null;
        String line = null;
        try {
            bufferReader = Files.newBufferedReader(Paths.get(rawPath), StandardCharsets.UTF_8);
            TokenizeUtils.loCustomDict(customPath);
            TokenizeUtils.loadStopwordsDict(stopwodsPath);
            TokenizeUtils.loadSynonymDict(synonymPath);
            List<String> resultList = new ArrayList<>();
            //去掉第一行标题
            bufferReader.readLine();
            // 去掉重复的记录。
            Set<String> removeDulSet = new HashSet<>();
            while ((line = bufferReader.readLine()) != null) {
                if(StringUtils.isNotBlank(line) ) {
                    String[] splits = line.split("\t");
                    if( splits.length != 5 ){
                        System.out.println("error data:"+line);
                        continue;
                    }
                    String text = splits[1];
                    String intention = SolrlstmUtils.removeLastSlot(splits[3]);
                    String tokenText = TokenizeUtils.tokenize(text);
                    if( StringUtils.isBlank(tokenText) ){
                        continue;
                    }
//                    if( !tokenText.equals(splits[2]) ){
//                        System.out.println(tokenText+"==>"+splits[2]);
//                    }
                    String solrData = tokenText+"\t"+ intention;
                    if( !removeDulSet.contains(solrData) ) {
                        resultList.add(solrData+"\t"+text);
                        removeDulSet.add(solrData);
                    }
                    //通过意图，新增一条语料
                    String newWords = SolrlstmUtils.intentionToWord(intention);
                    text = "from intention";
                    solrData = newWords+"\t"+ intention;
                    if( !removeDulSet.contains(solrData) ) {
                        resultList.add(solrData+"\t"+text);
                        removeDulSet.add(solrData);
                    }
                }
            }
            FilesUtils.saveListToFile(resultList, outPath);
        }catch (IOException e) {
            throw new Exception("error line:"+line, e);
        }finally{
            if( bufferReader != null ) {
                try {
                    bufferReader.close();
                }catch (Exception e1){
                    e1.printStackTrace();
                }
            }
        }
    }

    /**
     * 检查生成的文件是否有异常。
     * 1：同一语料对应多个意图。
     * 统计solr语料基本分布信息：
     * 1：意图总数+分布。
     * @param rawFileName
     */
    private static void checkSolrDataFile(String rawFileName,String processedDataFile, String wordsDistributionFile){
        Path filePath = Paths.get(rawFileName);
        BufferedReader bufferReader = null;
        try {
            bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
            String line;
            Map<String, Set<String>> wordsMap = new HashMap();
            Map<String, Integer> intentionsMap = new HashMap();
            //词项分布情况。
            Map<String, Integer> wordsDistributionMap = new HashMap<>();
            int index = 0;
            Map<String, String> dataMap = new HashMap();
            while ((line = bufferReader.readLine()) != null) {
                String[] splits = line.split("\t");
                if( splits.length != 3 ){
                    System.out.println("error data:"+line);
                }
                if( wordsMap.get(splits[0]) == null ){
                    wordsMap.put(splits[0], new HashSet<>());
                }
                wordsMap.get(splits[0]).add(splits[1]);
                if( intentionsMap.get(splits[1]) == null ){
                    intentionsMap.put(splits[1], 0);
                }
                dataMap.put(splits[0]+"_"+splits[1], splits[2]);
                intentionsMap.put(splits[1], intentionsMap.get(splits[1])+1);

                String[] wordsSplits = splits[0].split(" ");
                for( String s: wordsSplits ){
                    if( wordsDistributionMap.get(s) == null ){
                        wordsDistributionMap.put(s, 0);
                    }
                    wordsDistributionMap.put(s, wordsDistributionMap.get(s)+1);
                }
                index++;
            }
//            List<String> rightWords = new ArrayList<>();
            Iterator wordsInter = wordsMap.entrySet().iterator();
            System.out.println(">>>>>>>>print error solr data start>>>>>>>>>");
            List<String> removeDulWordsList = new ArrayList<>();
            while ( wordsInter.hasNext() ){
                Map.Entry entry = (Map.Entry)wordsInter.next();
                String key = (String)entry.getKey();
                Set<String> value = (Set<String>)entry.getValue();
                if( value.size() > 1 ){
//                    System.out.println("error solr data: words="+key+", intentions="+value);
                    for(String intention: value){
                        String mapkey =key+"_"+intention;
                        if( dataMap.get(mapkey) != null ){
                            System.out.println(mapkey+", rawwords="+dataMap.get(mapkey));
                        }else {
                            System.out.println(mapkey+" not found in map");
                        }
                    }
                }else if( value.size() == 1){
                    for(String intention: value){
                        removeDulWordsList.add(key+"\t"+intention);
                    }
                }

            }
            if( wordsMap.size() != index ) {
                System.out.println("文件语料大小:"+index+", wordsSet大小："+wordsMap.size());
            }else {
                System.out.println("文件语料大小和wordsSet大小一致");
            }
            //保存处理过后的正确的solr数据。
            FilesUtils.saveListToFile(removeDulWordsList, processedDataFile);
            System.out.println("<<<<<<<<<print error solr data end<<<<<<<<<");
            System.out.println(">>>>>>>>print intention info start>>>>>>>>>");
            System.out.println("共有意图："+intentionsMap.size()+",分布如下：");
            Map sortMap = MapUtils.sortedByValue(intentionsMap);
            Iterator iterator = sortMap.entrySet().iterator();
            while ( iterator.hasNext() ){
                Map.Entry entry = (Map.Entry)iterator.next();
                String wordNum = entry.getKey()+":"+entry.getValue();
                System.out.println(wordNum);
            }
            System.out.println("<<<<<<<<<print intention info end<<<<<<<<<");
            List<String> wordsDistributionList = new ArrayList<>();
            Map sortWordsDistributionMap = MapUtils.sortedByValue(wordsDistributionMap);
            Iterator iterator1 = sortWordsDistributionMap.entrySet().iterator();
            while (iterator1.hasNext()){
                Map.Entry entry = (Map.Entry)iterator1.next();
                String wordNum = entry.getKey()+":"+entry.getValue();
                wordsDistributionList.add(wordNum);
            }
            FilesUtils.saveListToFile(wordsDistributionList, wordsDistributionFile);
        }catch (IOException e) {
            e.printStackTrace();
        }finally{
            if( bufferReader != null ) {
                try {
                    bufferReader.close();
                }catch (Exception e1){
                    e1.printStackTrace();
                }
            }
        }
    }

    /**
     * 将语料信息添加到solr.
     */
    private static void uploadFileToSolr(String rawPath, String collectionName, boolean delete) throws Exception{
        Path filePath = Paths.get(rawPath);
        BufferedReader bufferReader = Files.newBufferedReader(filePath, StandardCharsets.UTF_8);
        String line;
        try {
            String solrUrl = SolrlstmUtils.BASE_SOLR_URL+collectionName;
            SolrClient solrCliect = SolrlstmUtils.getUpdateSolrClient(solrUrl);
            if( delete ) {
                solrCliect.deleteByQuery(SolrlstmUtils.SOLR_FIELD_CONTENT+":*");
            }
            int i = 0 ;
            long start = System.currentTimeMillis();
            while( (line = bufferReader.readLine()) != null ){
                SolrInputDocument document = new SolrInputDocument();
                String[] words = line.split("\t");
                if( words.length == 2){
                    document.addField(SolrlstmUtils.SOLR_FIELD_ID, UUID.randomUUID().toString().toUpperCase());
                    document.addField(SolrlstmUtils.SOLR_FIELD_CONTENT, words[0]);
                    document.addField(SolrlstmUtils.SOLR_FIELD_CONTENT_FULL_MATCH, words[0]);
                    document.addField(SolrlstmUtils.SOLR_FIELD_INTENTION, words[1]);
                    solrCliect.add(document);
                }else {
                    System.out.println("error line:"+line);
                    continue;
                }
                i++;
                if( i % 50000 == 0  ){
                    System.out.println("commit doc every 50000 docs.");
                    System.out.println(".......cost:"+(System.currentTimeMillis()-start));
                    start = System.currentTimeMillis();
                }
            }
            solrCliect.commit();
            System.out.println("last commit.");
        } catch (IOException e) {
            e.printStackTrace();
        }finally{
            bufferReader.close();
        }
    }


    public static void main(String[] args) {
        String callMethod = "uploadFileToSolr";
        String collectionDirName = "lifeareanavi";
        String collectionFullPath = SolrlstmUtils.BASE_PATH+collectionDirName+"\\";
        String solrFilePath = collectionFullPath+"solr_data_02.txt";
        String processedSolrFilePath = collectionFullPath+"solr_data_02_xx.txt";
        try {
            if ("generateSolrData".equals(callMethod) || "all".equals(callMethod)) {
                String rawPath = collectionFullPath+"raw_training_data.txt";
                String customPath = collectionFullPath+"tokenize_custom_xx.txt";
                String synonymPath = collectionFullPath+"tokenize_synonym_xx.txt";
                String stopWordsPath = collectionFullPath+"stopwords.txt";
                generateSolrData(rawPath, customPath,synonymPath,stopWordsPath, solrFilePath);
            }
            if("checkSolrDataFile".equals(callMethod) || "all".equals(callMethod)){
                String wordsDistributionFile = collectionFullPath+"words_distribution.txt";
                checkSolrDataFile(solrFilePath, processedSolrFilePath, wordsDistributionFile);
            }
            if("uploadFileToSolr".equals(callMethod) || "all".equals(callMethod) ){
                String collectionName = "GCC-OMCP_NLPLifeAreaExtensionNavi";
                boolean deleteExists = true;
                uploadFileToSolr(processedSolrFilePath, collectionName, deleteExists );
            }
        }catch (Exception e){
            e.printStackTrace();
        }
        System.out.println("done all!!!");
    }
}
