package com.seanLab.tool.TagBat;

import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import org.ansj.library.DicLibrary;
import org.ansj.library.StopLibrary;
import org.ansj.lucene6.AnsjAnalyzer;
import org.ansj.util.MyStaticValue;
import org.apache.commons.math3.linear.ArrayRealVector;
import org.apache.commons.math3.linear.RealVector;
import org.apache.lucene.document.*;
import org.apache.lucene.index.*;
import org.apache.lucene.queries.mlt.MoreLikeThis;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.BytesRef;

import java.io.*;
import java.nio.file.Paths;
import java.util.*;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.logging.Logger;

/***
 * new project as Deduplication in Data_Process
 */
@Deprecated
public class TextDeduplication {
    private static final Logger LOG = Logger.getLogger(TextDeduplication.class.getName());
    private static final String diretory = "workingDir";
    private static final String dicDir = diretory + File.separator + "dict";
    private static String articleDataset = "chens.article_20180306_500的副本.json";
//    private static String articleDataset = "demo.filterDocs_0406.json";
    private static String imageDataset = "chens.validImage_20180306_5000.json";
    private static int threadNum = 1;
    private static int verboseStep = 100;
    private static double threshold  = 0.95;
    private static final FieldType TYPE = new FieldType();

    private static int tagCount = 0;
    private static final Object tagCountLock = new Object();

    private static final Map<String, String> ansjMap;

    static {
        TYPE.setOmitNorms(true);
        TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
        TYPE.setStoreTermVectors(true);
        TYPE.setStored(true);
        TYPE.setTokenized(true);
        TYPE.freeze();

        MyStaticValue.putLibrary(DicLibrary.DEFAULT, dicDir);
        MyStaticValue.reloadLibrary(DicLibrary.DEFAULT);
        MyStaticValue.putLibrary(StopLibrary.DEFAULT + "_cn", "workingDir/stop/chinese_stop_word.txt");
        MyStaticValue.putLibrary(StopLibrary.DEFAULT + "_en", "workingDir/stop/stopwords_en.txt");
        MyStaticValue.putLibrary(StopLibrary.DEFAULT + "_sina", "workingDir/stop/chinese_stop_word_sina.txt");
        MyStaticValue.reloadLibrary(StopLibrary.DEFAULT + "_cn");
        MyStaticValue.reloadLibrary(StopLibrary.DEFAULT + "_en");
        MyStaticValue.reloadLibrary(StopLibrary.DEFAULT + "_sina");

//        List<String> dic = new ArrayList<>();
//        List<String> stop = new ArrayList<>();
//        dic.add(dicDir + File.separator + "das_actor.dic");
//        dic.add(dicDir + File.separator + "das_sports.dic");
//        dic.add(dicDir + File.separator + "THUOCL_animal.dic");
//        dic.add(dicDir + File.separator + "THUOCL_diming.dic");
//        dic.add(dicDir + File.separator + "THUOCL_lishimingren.dic");
//        dic.add(dicDir + File.separator + "中国高等院校（大学）大全【官方推荐】.dic");
//        dic.add(dicDir + File.separator + "政府机关团体机构大全【官方推荐】.dic");
//        dic.add(dicDir + File.separator + "明星【官方推荐】.dic");
//        dic.add(dicDir + File.separator + "财经金融词汇大全【官方推荐】.dic");
//        stop.add(diretory + File.separator + "chinese_stop_word.txt");
//        stop.add(diretory + File.separator + "stopwords_en.txt");
//
        ansjMap = new HashMap<>();
        ansjMap.put("type", AnsjAnalyzer.TYPE.query_ansj.name());
        ansjMap.put("dic", DicLibrary.DEFAULT);
        ansjMap.put("stop", StopLibrary.DEFAULT + "_cn" + "," + StopLibrary.DEFAULT + "_en" + "," + StopLibrary.DEFAULT + "_sina");
    }

    public static void main(String[] args) throws IOException{
        LOG.info("Remove duplicate article!");
        String usage = "Usage: java -jar xx.jar $articleDataset $imageDataset $action[index|remove|check|rewrite|auto] $verboseStep $threadNum $threshold";
        if (args.length != 6) {
            System.out.println(usage);
            return;
        }
        articleDataset = args[0];
        imageDataset = args[1];
//        System.out.println(articleDataset);
        String action = args[2];
        verboseStep = Integer.parseInt(args[3]);
        threadNum = Integer.parseInt(args[4]);
        threshold = Double.parseDouble(args[5]);
        LOG.info("article: " + articleDataset);
        LOG.info("image: " + imageDataset);
        LOG.info("step: " + verboseStep);
        LOG.info("thread: " + threadNum);
        LOG.info("threshold: " + threshold);
        if (action.equals("index")) {
            new TextDeduplication().buildArticleIndex();
        }
        else if (action.equals("remove")) {
            new TextDeduplication().autoRemove();
        }
        else if (action.equals("statics")) {
            new TextDeduplication().check();
        }
        else if (action.equals("rewrite")) {
            new TextDeduplication().rewrite();
        }
        else if (action.equals("check")) {
            new TextDeduplication().check();
        }
        else if (action.equals("auto")) {
            new TextDeduplication().buildArticleIndex();
            new TextDeduplication().autoRemove();
            new TextDeduplication().check();
            new TextDeduplication().rewrite();
        }
        else {
            System.out.println(usage);
        }
    }

    private String getIndexName() {
        return articleDataset.replace(".json", "_Term_Index");
    }

    private String getTmpDir() {
        String dir = articleDataset.substring(0, articleDataset.lastIndexOf(File.separator) + 1) + "tmp";
        return dir;
    }

    private String getArticleFileName() {
        return articleDataset.substring(articleDataset.lastIndexOf(File.separator) + 1);
    }

    private String getDuplicationFile(String threadName) {
        return getTmpDir() + File.separator + getArticleFileName().replace(".json", "_duplication_" + threadName + ".json");
    }

    private String getCsvResultFile() {
        return articleDataset.replace(".json", "_result.csv");
    }

    private String getFinalResultJson(String dataset) {
        return dataset.replace(".json", "_deduplicate.json");
    }

    private IndexSearcher loadIndex() throws IOException {
        DirectoryReader directoryReader = DirectoryReader.open(
                FSDirectory.open(Paths.get(getIndexName())));
        return new IndexSearcher(directoryReader);
    }

    public void buildArticleIndex() throws IOException{
        long start = System.currentTimeMillis();
        String modelPath = getIndexName();
        File modelDir = new File(modelPath);
        if (modelDir.exists()) {
            modelDir.delete();
        }
        modelDir.mkdirs();

        Directory indexDir = FSDirectory.open(Paths.get(modelDir.getPath()));
        IndexWriter indexWriter = new IndexWriter(indexDir,
                new IndexWriterConfig(new AnsjAnalyzer(ansjMap)));

        JsonParser jsonParser = new JsonParser();
        BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(articleDataset)));

        String line;
        int count = 0;
        while ((line = reader.readLine()) != null) {
            JsonObject object = jsonParser.parse(line).getAsJsonObject();
            String articleId = object.get("_id").getAsString();
//            String articleId = object.get("_id").getAsJsonObject().get("$oid").getAsString();
            String title = object.get("title").getAsString();
            String content = object.get("content").getAsString();
            String channel = object.get("channel").getAsString();

            Document doc = new Document();
            doc.add(new StringField("articleId", articleId, Field.Store.YES));
            doc.add(new StringField("channel", channel, Field.Store.YES));
            doc.add(new Field("title", title, TYPE));
            doc.add(new Field("content", content, TYPE));
            indexWriter.addDocument(doc);

            if (++count % verboseStep == 0) {
                LOG.info("index article " + count);
            }
        }
        reader.close();
        indexWriter.close();
        LOG.info("total index image " + count);
        long end = System.currentTimeMillis();
        LOG.info("Build Index: " + String.valueOf(end - start) + "ms");
    }

    public void autoRemove() throws IOException {
        IndexSearcher searcher = loadIndex();
        IndexReader reader = searcher.getIndexReader();
        AnsjAnalyzer ansjAnalyzer = new AnsjAnalyzer(ansjMap);

        String tmpDir = getTmpDir();
        File tmpFile = new File(tmpDir);
        if (tmpFile.exists()) {
            tmpFile.delete();
        }
        tmpFile.mkdirs();

        Set<Integer> removed = new HashSet<>();
        Set<Integer> protect = new HashSet<>();
        LOG.info("total doc: " + reader.maxDoc());

        ExecutorService threadPool = Executors.newFixedThreadPool(threadNum);
        List<Callable<Object>> todo = new ArrayList<>(threadNum);
        for (int i = 0; i < threadNum; i++) {
            int start = i * reader.maxDoc() / threadNum;
            int end = (i + 1) * reader.maxDoc() / threadNum;
            String threadName = "Thread-" + i;
            todo.add(new Callable<Object>() {
                @Override
                public Object call() throws IOException {
                    LOG.info("start " + threadName + " from " + start + " to " + end);
                    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
                            getDuplicationFile(threadName))
                    ));
                    int count = 0;
                    for (int docNum = start; docNum < end; docNum++) {
                        if (++count % verboseStep == 0) {
                            synchronized (tagCountLock) {
                                tagCount += count;
                                count = 0;
                                LOG.info(threadName + " processed article " + tagCount);
                                System.out.println("raw: " + protect.size());
                                System.out.println("duplicate: " + removed.size());
                            }
                        }
                        try {
                            if (removed.contains(docNum)) {
                                continue;
                            }
                            protect.add(docNum);
                            Map<String, Integer> tf1 = getTermFrequencies(reader, docNum);
                            MoreLikeThis mlt = new MoreLikeThis(reader);
                            mlt.setMaxQueryTerms(100);
                            mlt.setBoost(true);
                            mlt.setFieldNames(new String[]{"content"});
                            mlt.setAnalyzer(ansjAnalyzer);
                            Query query = mlt.like(docNum);
                            TopDocs topDocs = searcher.search(query, 30);
                            ScoreDoc[] scoreDocs = topDocs.scoreDocs;
                            for (ScoreDoc sdoc : scoreDocs) {
                                if (sdoc.doc == docNum || protect.contains(sdoc.doc) || removed.contains(sdoc.doc)) {
                                    continue;
                                }
                                Map<String, Integer> tf2 = getTermFrequencies(reader, sdoc.doc);
                                double similarity = cosineSimilarity(tf1, tf2);
                                if (similarity > threshold) {
                                    writeResult(writer, reader, docNum, sdoc.doc);
                                    removed.add(sdoc.doc);
                                }
                            }
                        } catch (Exception e) {
//                            e.printStackTrace();
                            continue;
                        }
                    }
                    synchronized (tagCountLock) {
                        tagCount += count;
                        LOG.info(threadName + " processed article " + tagCount);
                        System.out.println("raw: " + protect.size());
                        System.out.println("duplicate: " + removed.size());
                    }
                    writer.close();
                    return null;
                }
            });
        }

        try {
            threadPool.invokeAll(todo);
            threadPool.shutdown();
            LOG.info("Main processed " + tagCount + " articles.(Main)");
        } catch (InterruptedException e) {
            LOG.info("failed.");
            e.printStackTrace();
        }

        JsonObject result = new JsonObject();
        result.addProperty("rawId", String.valueOf(protect.size()));
        result.addProperty("duplicateId", String.valueOf(removed.size()));
//        writer.write(result.toString());
//        writer.newLine();
        System.out.println("raw: " + protect.size());
        System.out.println("duplicate: " + removed.size());
        reader.close();
//        writer.close();
    }

    private void writeResult(BufferedWriter writer, IndexReader reader, int docRaw, int docNum) throws IOException{
        JsonObject jsonObject = new JsonObject();
        String rawId = reader.document(docRaw).get("articleId");
        String duplicatedId = reader.document(docNum).get("articleId");
        jsonObject.addProperty("rawId", rawId);
        jsonObject.addProperty("duplicateId", duplicatedId);
        writer.write(jsonObject.toString());
        writer.newLine();
//        System.out.println("raw: " + rawId);
//        System.out.println("duplicate: " + duplicatedId);
    }

    public void check() throws IOException {
        Set<String> protect = new HashSet<>();
        Set<String> remove = new HashSet<>();
        Map<String, Integer> typeRaw = new HashMap<>();
        Map<String, Integer> typeRemove = new HashMap<>();
        JsonParser jsonParser = new JsonParser();
        String line;
        for (int i = 0; i < threadNum; i++) {
            LOG.info("loading result " + i);
            BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(
                    getDuplicationFile("Thread-" + i))));
            while ((line = reader.readLine()) != null) {
                JsonObject object = jsonParser.parse(line).getAsJsonObject();
                String rawId = object.get("rawId").getAsString();
                String duplicateId = object.get("duplicateId").getAsString();
                protect.add(rawId);
                remove.add(duplicateId);
            }
            reader.close();
        }
        LOG.info("check diff");
        for (String item : protect) {
            if (remove.contains(item)) {
                System.out.println("Fuck " + item);
            }
        }
        LOG.info("statics");
        BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(
                 articleDataset)));
        while ((line = reader.readLine()) != null) {
            JsonObject object = jsonParser.parse(line).getAsJsonObject();
            String articleId = object.get("_id").getAsString();
            String channel = object.get("channel").getAsString();
            typeRaw.put(channel, typeRaw.getOrDefault(channel, 0) + 1);
            if (remove.contains(articleId)) {
                typeRemove.put(channel, typeRemove.getOrDefault(channel, 0) + 1);
            }
        }
        reader.close();
        BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
                getCsvResultFile())));
        for (String channel : typeRaw.keySet()) {
            writer.write(channel + "," + typeRaw.get(channel) + "," + typeRemove.getOrDefault(channel, 0));
            writer.newLine();
        }
        writer.close();
    }

    public void rewrite() throws IOException {
        Set<String> remove = new HashSet<>();
        JsonParser jsonParser = new JsonParser();
        String line;
        for (int i = 0; i < threadNum; i++) {
            LOG.info("loading result " + i);
            BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(
                    getDuplicationFile("Thread-" + i))));
            while ((line = reader.readLine()) != null) {
                JsonObject object = jsonParser.parse(line).getAsJsonObject();
                String duplicateId = object.get("duplicateId").getAsString();
                remove.add(duplicateId);
            }
            reader.close();
        }
        rewriteDataset(articleDataset, "_id", remove);
        rewriteDataset(imageDataset, "articleID", remove);
    }

    private void rewriteDataset(String dataset, String key, Set<String> removed) throws IOException{
        BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(dataset)));
        BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
                 getFinalResultJson(dataset))));
        JsonParser jsonParser = new JsonParser();
        String line;
        while ((line = reader.readLine()) != null) {
            JsonObject object = jsonParser.parse(line).getAsJsonObject();
            String articleId = object.get(key).getAsString();
//            String articleId = object.get(key).getAsJsonObject().get("$oid").getAsString();
            if (removed.contains(articleId)) {
                continue;
            }
            writer.write(line);
            writer.newLine();
        }
        reader.close();
        writer.close();
    }

    public void search() throws IOException{
        int docNum = 0;
        IndexSearcher searcher = loadIndex();
        IndexReader reader = searcher.getIndexReader();

        Map<String, Integer> tf1 = getTermFrequencies(reader, docNum);

        MoreLikeThis mlt = new MoreLikeThis(reader);
        mlt.setMaxQueryTerms(100);
        mlt.setBoost(true);
        mlt.setFieldNames(new String[] {"content"});
        mlt.setAnalyzer(new AnsjAnalyzer(ansjMap));
        Query query = mlt.like(docNum);

        System.out.println(reader.document(docNum));
        System.out.println(query.toString());

        TopDocs topDocs = searcher.search(query, 50);
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (ScoreDoc sdoc : scoreDocs) {
            if (sdoc.doc == docNum) {
                continue;
            }
            Map<String, Integer> tf2 = getTermFrequencies(reader, sdoc.doc);
            Document doc = reader.document(sdoc.doc);
            System.out.println(sdoc.doc);
            System.out.println(cosineSimilarity(tf1, tf2));
            System.out.println(doc.get("title"));
            System.out.println(sdoc.score);
            System.out.println();
        }

    }

    private Map<String, Integer> getTermFrequencies(IndexReader reader, int docId)
            throws IOException {
        Terms vector = reader.getTermVector(docId, "content");
        TermsEnum termsEnum = vector.iterator();
        Map<String, Integer> frequencies = new HashMap<>();
        BytesRef text = null;
        while ((text = termsEnum.next()) != null) {
            String term = text.utf8ToString();
            int freq = (int) termsEnum.totalTermFreq();
            frequencies.put(term, freq);
        }
        return frequencies;
    }

    private RealVector toRealVector(Map<String, Integer> map, Set<String> terms) {
        RealVector vector = new ArrayRealVector(terms.size());
        int i = 0;
        for (String term : terms) {
            int value = map.getOrDefault(term, 0);
            vector.setEntry(i++, value);
        }
        return vector.mapDivide(vector.getL1Norm());
    }

    private double cosineSimilarity(RealVector v1, RealVector v2) {
        return (v1.dotProduct(v2)) / (v1.getNorm() * v2.getNorm());
    }

    private double cosineSimilarity(Map<String, Integer> tf1, Map<String, Integer> tf2) {
        Set<String> terms = new HashSet<>();
        terms.addAll(tf1.keySet());
        terms.addAll(tf2.keySet());
        RealVector v1 = toRealVector(tf1, terms);
        RealVector v2 = toRealVector(tf2, terms);
        return cosineSimilarity(v1, v2);
    }

    private static class TestIndex {
        public static void main(String[] args) throws IOException{
//            new TextDeduplication().buildArticleIndex();
            new TextDeduplication().search();
        }
    }
}
