package cs429_project.clustering;

import java.io.PrintStream;

import java.math.BigDecimal;
import java.math.RoundingMode;

import java.util.*;

/**
 * Class that performs k-means clustering for a group of documents, represented
 * as instances of the {@link cs429_project.clustering.DocVec} class.
 * @author Peter Ferrans
 */
public class KMeansClustering {

    /** Lambda for use in determining maximum likelihood estimate. */
    private static final BigDecimal LAMBDA = BigDecimal.ONE;
    /** Number of clusters to be used. */
    private Integer k;
    /** All of the clusters used in this clustering. */
    private DocCluster[] docClusters;
    /** Inverse document frequencies for each term that appears in a document
     * in these clusters. */
    private TreeMap<String, BigDecimal> invDocFreqs;
    /** Number of documents in these clusters. */
    private BigDecimal numDocs;

    /**
     * Creates a k-means clustering but does not sort documents into their
     * final clusters.  Users should call
     * {@link KMeansClustering#createClusters(java.lang.Integer, java.util.Set)}
     * instead of this.
     * @param k
     *    Number of clusters.
     * @param docs
     *    Documents to cluster.
     */
    private KMeansClustering(Integer k, Set<DocVec> docs) {
        this.k = k;
        this.numDocs = BigDecimal.valueOf((double) (docs.size()));
        setUpMembers(docs);
    }

    /**
     * Given a value for <em>k</em> and a set of document vectors, creates a
     * k-means clustering for the documents.  The clustering that it returns
     * has all of its documents sorted into their final clusters.
     * @param k
     *    The number of clusters to use in this clustering.
     * @param docs
     *    Documents to cluster.
     * @return
     *    Clustering of all the documents, placed into their final clusters.
     */
    public static KMeansClustering createClusters(Integer k, Set<DocVec> docs) {
        KMeansClustering kmc = new KMeansClustering(k, docs);
        kmc.makeClusters();

        return kmc;
    }

    /**
     * Ranks the clusters according to their centroids' similarity with the
     * specified query.  Comparisons are done using cosine similarity.
     * @param query
     *    Query to compare with the clusters.  This query should be broken up
     *    by term in an array.
     * @return
     *    The cluster that was closest to the query.
     */
    public NavigableMap<BigDecimal, DocCluster> rankClusters(String[] query) {
        TreeMap<BigDecimal, DocCluster> rankMap =
                new TreeMap<BigDecimal, DocCluster>();

        for (int i = 0; i < this.docClusters.length; i++) {
            BigDecimal clustRating = clusterRating(i, query);
            rankMap.put(clustRating, this.docClusters[i]);
        }

        return rankMap.descendingMap();
    }

    /**
     * Retrieves the document clusters in this clustering.
     * @return
     *    Document clusters in this clustering.
     */
    public DocCluster[] getDocClusters() {
        return this.docClusters;
    }

    /**
     * Prints the clusters' data to a print stream.
     * @param stream
     *    Stream to print the cluster data.
     */
    public void printClusters(PrintStream stream) {
        for (int i = 0; i < this.docClusters.length; i++) {
            stream.println(i + ":");
            this.docClusters[i].printDocs(stream);
            stream.println();
        }
    }

    /**
     * Computes the total number of words that appear in documents in this
     * clustering.
     * @return
     *    Total number of words in this clustering.
     */
    public BigDecimal numTotalWords() {
        BigDecimal numTotalWords = BigDecimal.ZERO;
        for (int i = 0; i < this.docClusters.length; i++) {
            numTotalWords =
                    numTotalWords.add(this.docClusters[i].numTotalWords());
        }

        return numTotalWords;
    }

    /**
     * Computes the frequency of a word in this clustering.
     * @param word
     *    Word whose frequency we want to find.
     * @return
     *    The frequency of the word.
     */
    public BigDecimal freqInCollection(String word) {
        BigDecimal freq = BigDecimal.ZERO;
        for (int i = 0; i < this.docClusters.length; i++) {
            freq = freq.add(this.docClusters[i].freqInCluster(word));
        }

        return freq;
    }

    /**
     * Computes the rating for the cluster with the specified ID for the given
     * query.  A helper method for
     * {@link KMeansClustering#rankClusters(java.lang.String[])}.
     * @param clusterId
     *    ID of the cluster to be scored.
     * @param query
     *    Query to use in scoring the cluster.
     * @return
     *    The score of the cluster.
     */
    private BigDecimal clusterRating(int clusterId, String[] query) {
        BigDecimal rating = BigDecimal.ONE;
        for (int i = 0; i < query.length; i++) {
            BigDecimal cwp = clusterWordProb(clusterId, query[i]);
            rating = rating.multiply(cwp);
        }

        return rating;
    }

    private BigDecimal clusterWordProb(int clusterId, String word) {
        DocCluster cluster = this.docClusters[clusterId];
        BigDecimal clustSize = cluster.numTotalWords();

        BigDecimal clustMle = cluster.maxLikeEst(word);
        BigDecimal collMle = collMaxLikeEst(word);

        BigDecimal clustMleWt = clustSize.divide(clustSize.add(LAMBDA),
                DocVec.DEFAULT_PRECISION, RoundingMode.UP);
        BigDecimal collMleWt = LAMBDA.divide(clustSize.add(LAMBDA),
                DocVec.DEFAULT_PRECISION, RoundingMode.UP);

        BigDecimal weightedClustMle = clustMleWt.multiply(clustMle);
        BigDecimal weightedCollMle = collMleWt.multiply(collMle);

        return (weightedClustMle.add(weightedCollMle));
    }

    private BigDecimal collMaxLikeEst(String word) {
        BigDecimal wordFreq = freqInCollection(word);
        BigDecimal collLength = numTotalWords();
        BigDecimal mle = wordFreq.divide(collLength, DocVec.DEFAULT_PRECISION,
                RoundingMode.UP);

        return mle;
    }

    private void makeClusters() {
        int lastNumMoves = Integer.MAX_VALUE;
        int thisNumMoves = moveClusters();
        while (thisNumMoves != lastNumMoves) {
            lastNumMoves = thisNumMoves;
            thisNumMoves = moveClusters();
        }
    }

    private void setUpMembers(Set<DocVec> docs) {
        TreeSet<String> termSet = getAllTerms(docs);
        docs = prepareForClustering(docs, termSet);
        this.invDocFreqs = getIdfs(docs, termSet);
        this.docClusters = new DocCluster[this.k];
        for (int i = 0; i < this.k; i++) {
            docClusters[i] = new DocCluster();
        }

        Iterator<DocVec> docIt = docs.iterator();
        int counter = 0;
        while (docIt.hasNext()) {
            if (counter >= this.k) {
                counter = 0;
            }
            this.docClusters[counter].addDocVec(docIt.next());
            counter++;
        }
    }

    private TreeMap<String, BigDecimal> getIdfs(Set<DocVec> docVecs,
            Set<String> termSet) {
        TreeMap<String, BigDecimal> idfMap = emptyFreqMap(termSet);
        Iterator<String> termIt = termSet.iterator();

        while (termIt.hasNext()) {
            String term = termIt.next();
            BigDecimal docFreq = BigDecimal.ZERO;
            Iterator<DocVec> docIt = docVecs.iterator();

            while (docIt.hasNext()) {
                DocVec dv = docIt.next();
                if (dv.containsNonZeroFreqTerm(term)) {
                    docFreq = docFreq.add(BigDecimal.ONE);
                }
            }
            docFreq = calcIdf(docFreq);
            idfMap.put(term, docFreq);
        }

        return idfMap;
    }

    private int moveClusters() {
        int numMoves = 0;
        for (int i = 0; i < this.docClusters.length; i++) {
            this.docClusters[i].resetMean();
        }

        for (int i = 0; i < this.docClusters.length; i++) {
            Iterator<DocVec> dvIt =
                    this.docClusters[i].getDocVecSet().iterator();
            while (dvIt.hasNext()) {
                DocVec docVec = dvIt.next();
                int nearestIndex = nearestClusterMean(docVec);
                if (nearestIndex != i) {
                    this.docClusters[i].removeDocVec(docVec);
                    this.docClusters[nearestIndex].addDocVec(docVec);
                    dvIt = this.docClusters[i].getDocVecSet().iterator();
                    numMoves++;
                }
            }
        }

        return numMoves;
    }

    private int nearestClusterMean(DocVec docVec) {
        DocVec tfIdfDocVec = docVec.tfIdfWeighted(this.invDocFreqs);
        int currentIndex = 0;
        BigDecimal currentSimilarity = BigDecimal.valueOf(-1);
        for (int i = 0; i < this.docClusters.length; i++) {
            BigDecimal thisSimilarity = tfIdfDocVec.cosineSimilarity(
                    this.docClusters[i].getCurrentMean());
            if (thisSimilarity != null) {
                if (thisSimilarity.compareTo(currentSimilarity) > 0) {
                    currentIndex = i;
                    currentSimilarity = thisSimilarity;
                }
            }
        }

        return currentIndex;
    }

    private BigDecimal calcIdf(BigDecimal docFreq) {
        BigDecimal idf;
        if (docFreq.compareTo(BigDecimal.ZERO) != 0) {
            idf = this.numDocs.divide(docFreq, DocVec.DEFAULT_PRECISION,
                    RoundingMode.UP);
        } else {
            idf = BigDecimal.ZERO;
        }

        return idf;
    }

    private static TreeMap<String, BigDecimal> emptyFreqMap(Set<String> termSet) {
        TreeMap<String, BigDecimal> freq = new TreeMap<String, BigDecimal>();
        Iterator<String> termIt = termSet.iterator();
        while (termIt.hasNext()) {
            String term = termIt.next();
            freq.put(term, BigDecimal.ZERO);
        }

        return freq;
    }

    private static TreeSet<String> getAllTerms(Set<DocVec> docVecs) {
        TreeSet<String> termSet = new TreeSet<String>();
        Iterator<DocVec> dvIt = docVecs.iterator();
        while (dvIt.hasNext()) {
            DocVec docVec = dvIt.next();
            Set<String> docTerms = docVec.getTerms();
            termSet.addAll(docTerms);
        }

        return termSet;
    }

    private static TreeSet<DocVec> prepareForClustering(Set<DocVec> docVecs,
            Set<String> termSet) {
        TreeSet<DocVec> preparedDocVecs = new TreeSet<DocVec>();
        Iterator<DocVec> docIt = docVecs.iterator();
        while (docIt.hasNext()) {
            DocVec docVec = docIt.next();
            docVec.expand(termSet);
            preparedDocVecs.add(docVec);
        }

        return preparedDocVecs;
    }
}
