package com.infosys.newSimpleInvertedIndex.engine;

import java.util.*;
import java.util.concurrent.*;

import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.MinMaxPriorityQueue;

import com.infosys.newSimpleInvertedIndex.models.*;

public class InvertedIndex implements TextSearchInterface{
    private static int logicalCoreNum = Math.max(Runtime.getRuntime().availableProcessors(), 1);

    private Portfolio portfolio;

    private ImmutableMap<String, Set<ParsedDoc>> termToParsedDocs;

    private ImmutableMap<ParsedDoc, ParsedDocMetrics> parsedDocsToMetrics;

    // private ExecutorService executorService;
    private DocumentParser documentParser;

    public InvertedIndex(Portfolio portfolio){
        this.portfolio = portfolio;

        init();

        // this.executorService = Executors.newFixedThreadPool(logicalCoreNum);

        this.documentParser = new DocumentParser();
    }

    private void init(){
        // 给每个词建立一个文档集合
        Map<String, Set<ParsedDoc>> tempTermToParsedDocs = new HashMap<>();

        //for parsed documents in portfolio
        for (ParsedDoc parsedDoc : this.portfolio.getParsedDocuments()){
            //for unique words in parsed document
            for (String word : parsedDoc.getUniqueWords()){
                if (!tempTermToParsedDocs.containsKey(word)){
                    tempTermToParsedDocs.put(word, new HashSet<>());
                }
                tempTermToParsedDocs.get(word).add(parsedDoc);
            }
        }

        this.termToParsedDocs = ImmutableMap.copyOf(tempTermToParsedDocs);
        // 给每个文档建立一个文档度量
        Map<ParsedDoc, ParsedDocMetrics> tempMetricMap = new HashMap<>();

        //for parsed documents in portfolio
        for (ParsedDoc parsedDoc : this.portfolio.getParsedDocuments()){
            tempMetricMap.put(parsedDoc, new ParsedDocMetrics(this.portfolio, parsedDoc, this.termToParsedDocs));
        }
        this.parsedDocsToMetrics = ImmutableMap.copyOf(tempMetricMap);
    }

    @Override
    public int numDocuments(){ return this.portfolio.size(); }

    @Override
    public int numTerms() {
        return this.termToParsedDocs.size();
    }

    @Override
    public SearchResultCollection search(String searchTerm, int maxResults) {
        // parse search term
        Stopwatch stopwatch = Stopwatch.createStarted();
        Document searchDoc = new Document("search", searchTerm);
        ParsedDoc parsedSearchDoc = this.documentParser.parseDocument(searchDoc);
        Set<ParsedDoc> relevantDocs = getRelevantDocuments(parsedSearchDoc);
        if (relevantDocs.isEmpty() || parsedSearchDoc.isEmpty()){
            return new SearchResultCollection(Collections.emptyList(), stopwatch, 0);
        }

        // specific search
        // final search result collection
        final Collection<SearchResult> searchResults = new ConcurrentLinkedDeque<>();
        // Listify the relevant documents
        List<ParsedDoc> relevantDocsList = new ArrayList<>(relevantDocs);
        // create a list of metrics
        ParsedDocMetrics parsedSearchDocMetrics = new ParsedDocMetrics(portfolio, parsedSearchDoc, this.termToParsedDocs);
        // create FUTURE list LESSON: Future是concurrent类中用于储存异步运行结果的类
        // List<Future<SearchResult>> futureList = new ArrayList<>();
        List<Thread> threads = new ArrayList<>();


        for (final List<ParsedDoc> partition : Lists.partition(relevantDocsList, logicalCoreNum)){
            Thread thread = new Thread(() -> {
                // 每个线程遍历自己的那一部分
                for (ParsedDoc parsedDoc : partition){
                    double cosine = calcCosine(parsedSearchDocMetrics, parsedDoc);
                    SearchResult result = new SearchResult(parsedDoc.getId(), cosine);
                    searchResults.add(result);
                }
            });
            threads.add(thread);
            thread.start();
        }
        for (Thread thread : threads){
            try {
                thread.join();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }

        int heapSize = Math.min(searchResults.size(), maxResults);

        MinMaxPriorityQueue<SearchResult> maxHeap = MinMaxPriorityQueue.<SearchResult>orderedBy(
                new Comparator<SearchResult>() {
                    @Override
                    public int compare(SearchResult o1, SearchResult o2) {
                        if (o1.getRelevanceScore() <= o2.getRelevanceScore()){
                            return 1;
                        } else {
                            return -1;
                        }
                    }
                })
                .maximumSize(heapSize)
                .expectedSize(heapSize)
                .create(searchResults);

        ArrayList<SearchResult> r = new ArrayList<>();
        while (!maxHeap.isEmpty()){
            r.add(maxHeap.poll());
        }

        return new SearchResultCollection(r, stopwatch, searchResults.size());
    }

    private Set<ParsedDoc> getRelevantDocuments(ParsedDoc parsedSearchDoc){
        Set<ParsedDoc> relevantDocs = new HashSet<>();
        for (String word : parsedSearchDoc.getUniqueWords()){
            if (this.termToParsedDocs.containsKey(word)){
                relevantDocs.addAll(this.termToParsedDocs.get(word));
            }
        }
        return relevantDocs;
    }

    private double calcCosine(ParsedDocMetrics parsedSearchDocMetrics, ParsedDoc parsedDoc){
        double dotProduct = 0;
        Set<String> searchWordSet = parsedSearchDocMetrics.getParsedDoc().getUniqueWords();
        //if terms in docMetrics contains more than string
        if (parsedDoc.getUniqueWords().size() < searchWordSet.size()){
            searchWordSet = parsedDoc.getUniqueWords();
            // otherDocument = parsedSearchDocMetrics.getParsedDoc();
        }
        for (String word : searchWordSet){
            dotProduct += ((parsedSearchDocMetrics.calcTfidf(word) / parsedSearchDocMetrics.calcMagnitude())*
                    (parsedDocsToMetrics.get(parsedDoc).calcTfidf(word) / parsedDocsToMetrics.get(parsedDoc).calcMagnitude()));
        }
        return dotProduct;
    }
 
}
