﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using DocumentMining.Topic;
using System.Text.RegularExpressions;

namespace TextMining.Service
{
    public class KMeans
    {
    }

    #region Centroid
    public class Centroid
    {
        public List<DocumentVector> GroupedDocument { get; set; }

        //private List<string> _keyInDoc;
        //public List<string> KeyInDoc
        //{
        //    get { return _keyInDoc; }
        //    set { _keyInDoc = value; }
        //}
        //private List<string> _titleInDoc;

        //public List<string> TitleInDosc
        //{
        //    get { return _titleInDoc; }
        //    set { _titleInDoc = value; }
        //}

        //private Website _myWebsite;
        //public Website MyWebsite
        //{
        //    get { return _myWebsite; }
        //    set { _myWebsite = value; }
        //}
    }
    #endregion

    #region DocumentCollection

    class DocumentCollection
    {
        public List<String> DocumentList { get; set; }
    }
    #endregion

    #region DocumentVector

    public class DocumentVector
    {
        //Content represents the document(or any other object) to be clustered
        public string Content { get; set; }
        //represents the tf*idf of  each document
        public float[] VectorSpace { get; set; }

        private List<string> _keyInDoc;
        public List<string> KeyInDoc
        {
            get { return _keyInDoc; }
            set { _keyInDoc = value; }
        }
        private List<string> _titleInDoc;

        public List<string> TitleInDosc
        {
            get { return _titleInDoc; }
            set { _titleInDoc = value; }
        }

        public Website MyWebsite { get; set; }

    }
    #endregion

    #region StopWordsHandler

    public class StopWordsHandler
    {
        //you can defined other stop word list here
        public static string[] stopWordsList = new string[] { "The" };

        public static Boolean IsStotpWord(string word)
        {
            if (stopWordsList.Contains(word))
                return true;
            else
                return false;
        }
    }
    #endregion

    #region VectorSpaceModel
    class VectorSpaceModel
    {
        private static HashSet<string> distinctTerms;
        private static List<string> documentCollection;
        private static Regex r = new Regex("([ \\t{}():;. \n])");

        /// <summary>
        /// Prepares a collection of document in vector space
        /// </summary>
        /// <param name="collection">Document collection/corpus</param>
        /// <returns>List of, document in vector space</returns>
        public static List<DocumentVector> ProcessDocumentCollection(DocumentCollection collection)
        {
            distinctTerms = new HashSet<string>();
            documentCollection = collection.DocumentList;

            /*
             * Finds out the total no of distinct terms in the whole corpus so that it will be easy  
             * to represent the document in the vector space. The dimension of the vector space will
             * be equal to the total no of distinct terms.
             * 
             */


            foreach (string documentContent in collection.DocumentList)
            {
                foreach (string term in r.Split(documentContent))
                {
                    #region TamVu Customize to save to

                    #endregion

                    if (!StopWordsHandler.IsStotpWord(term))
                        distinctTerms.Add(term);
                    else
                        continue;
                }
            }

            List<string> removeList = new List<string>() { "\r", "\n", "(", ")", "[", "]", "{", "}", "", ".", " " };
            foreach (string s in removeList)
            {
                distinctTerms.Remove(s);
            }


            List<DocumentVector> documentVectorSpace = new List<DocumentVector>();
            DocumentVector _documentVector;
            float[] space;
            foreach (string document in documentCollection)
            {
                int count = 0;
                space = new float[distinctTerms.Count];
                foreach (string term in distinctTerms)
                {
                    space[count] = FindTFIDF(document, term);
                    count++;
                }
                _documentVector = new DocumentVector();
                _documentVector.Content = document;
                _documentVector.VectorSpace = space;
                documentVectorSpace.Add(_documentVector);

            }

            return documentVectorSpace;

        }
        #region Calculate TF-IDF

        //Calculates TF-IDF weight for each term t in document d
        private static float FindTFIDF(string document, string term)
        {
            float tf = FindTermFrequency(document, term);
            float idf = FindInverseDocumentFrequency(term);
            return tf * idf;
        }

        private static float FindTermFrequency(string document, string term)
        {

            int count = r.Split(document).Where(s => s.ToUpper() == term.ToUpper()).Count();
            //ratio of no of occurance of term t in document d to the total no of terms in the document
            return (float)((float)count / (float)(r.Split(document).Count()));
        }


        private static float FindInverseDocumentFrequency(string term)
        {
            //find the  no. of document that contains the term in whole document collection
            int count = documentCollection.ToArray().Where(s => r.Split(s.ToUpper()).ToArray().Contains(term.ToUpper())).Count();
            /*
             * log of the ratio of  total no of document in the collection to the no. of document containing the term
             * we can also use Math.Log(count/(1+documentCollection.Count)) to deal with divide by zero case; 
             */
            return (float)Math.Log((float)documentCollection.Count() / (float)count);

        }
        #endregion
    }
    #endregion

    #region SimilarityMatrics
    public class SimilarityMatrics
    {

        #region Cosine Similarity
        public static float FindCosineSimilarity(float[] vecA, float[] vecB)
        {
            var dotProduct = DotProduct(vecA, vecB);
            var magnitudeOfA = Magnitude(vecA);
            var magnitudeOfB = Magnitude(vecB);
            float result = dotProduct / (magnitudeOfA * magnitudeOfB);
            //when 0 is divided by 0 it shows result NaN so return 0 in such case.
            if (float.IsNaN(result))
                return 0;
            else
                return (float)result;
        }

        #endregion

        public static float DotProduct(float[] vecA, float[] vecB)
        {

            float dotProduct = 0;
            for (var i = 0; i < vecA.Length; i++)
            {
                dotProduct += (vecA[i] * vecB[i]);
            }

            return dotProduct;
        }

        // Magnitude of the vector is the square root of the dot product of the vector with itself.
        public static float Magnitude(float[] vector)
        {
            return (float)Math.Sqrt(DotProduct(vector, vector));
        }



        #region Euclidean Distance
        //Computes the similarity between two documents as the distance between their point representations. Is translation invariant.
        public static float FindEuclideanDistance(int[] vecA, int[] vecB)
        {
            float euclideanDistance = 0;
            for (var i = 0; i < vecA.Length; i++)
            {
                euclideanDistance += (float)Math.Pow((vecA[i] - vecB[i]), 2);
            }

            return (float)Math.Sqrt(euclideanDistance);

        }
        #endregion

        #region Extended Jaccard
        //Combines properties of both cosine similarity and Euclidean distance
        public static float FindExtendedJaccard(float[] vecA, float[] vecB)
        {
            var dotProduct = DotProduct(vecA, vecB);
            var magnitudeOfA = Magnitude(vecA);
            var magnitudeOfB = Magnitude(vecB);

            return dotProduct / (magnitudeOfA + magnitudeOfB - dotProduct);

        }
        #endregion


    }
    #endregion

    #region DocumnetClustering
    public static class DocumnetClustering
    {

        private static int globalCounter = 0;
        private static int counter;
        /// <summary>
        /// Prepares the document cluster, Grouping of similar 
        /// type of text document is done here
        /// </summary>
        /// <param name="k">initial cluster center</param>
        /// <param name="documentCollection">document corpus</param>
        /// <returns></returns>


        public static List<Centroid> PrepareDocumentCluster(int k, List<DocumentVector> documentCollection, MyDocumentCollection MyDocument, ref int _counter)
        {
            globalCounter = 0;
            //prepares k initial centroid and assign one object randomly to each centroid
            List<Centroid> centroidCollection = new List<Centroid>();
            Centroid c;

            /*
             * Avoid repeation of random number, if same no is generated more than once same document is added to the next cluster 
             * so avoid it using HasSet collection
             */
            HashSet<int> uniqRand = new HashSet<int>();
            GenerateRandomNumber(ref uniqRand, k, documentCollection.Count);

            foreach (int pos in uniqRand)
            {
                c = new Centroid();
                c.GroupedDocument = new List<DocumentVector>();
                c.GroupedDocument.Add(documentCollection[pos]);
                centroidCollection.Add(c);
            }

            Boolean stoppingCriteria;
            List<Centroid> resultSet;
            List<Centroid> prevClusterCenter;

            InitializeClusterCentroid(out resultSet, centroidCollection.Count);

            do
            {
                prevClusterCenter = centroidCollection;
                int indexKey = 0;
                foreach (DocumentVector obj in documentCollection)
                {
                    int index = FindClosestClusterCenter(centroidCollection, obj);
                    obj.KeyInDoc = new List<string>();


                    #region Calculate Max words

                    if (MyDocument.KeyInDoc.Count > 0)
                    {
                        obj.KeyInDoc.Add(MyDocument.KeyInDoc[indexKey]);
                    }

                    #endregion
                    resultSet[index].GroupedDocument.Add(obj);
                    indexKey += 1;
                }

                InitializeClusterCentroid(out centroidCollection, centroidCollection.Count());
                centroidCollection = CalculateMeanPoints(resultSet);
                stoppingCriteria = CheckStoppingCriteria(prevClusterCenter, centroidCollection);
                if (!stoppingCriteria)
                {
                    //initialize the result set for next iteration
                    InitializeClusterCentroid(out resultSet, centroidCollection.Count);
                }


            } while (stoppingCriteria == false);

            _counter = counter;
            return resultSet;

        }

        /// <summary>
        /// Generates unique random numbers and also ensures the generated random number 
        /// lies with in a range of total no. of document
        /// </summary>
        /// <param name="uniqRand"></param>
        /// <param name="k"></param>
        /// <param name="docCount"></param>

        private static void GenerateRandomNumber(ref HashSet<int> uniqRand, int k, int docCount)
        {

            Random r = new Random();

            if (k > docCount)
            {
                do
                {
                    int pos = r.Next(0, docCount);
                    uniqRand.Add(pos);

                } while (uniqRand.Count != docCount);
            }
            else
            {
                do
                {
                    int pos = r.Next(0, docCount);
                    uniqRand.Add(pos);

                } while (uniqRand.Count != k);
            }
        }

        /// <summary>
        /// Initialize the result cluster centroid for the next iteration, that holds the result to be returned
        /// </summary>
        /// <param name="centroid"></param>
        /// <param name="count"></param>
        private static void InitializeClusterCentroid(out List<Centroid> centroid, int count)
        {
            Centroid c;
            centroid = new List<Centroid>();
            for (int i = 0; i < count; i++)
            {
                c = new Centroid();
                c.GroupedDocument = new List<DocumentVector>();
                centroid.Add(c);
            }

        }

        /// <summary>
        /// Check the stopping criteria for the iteration, if centroid do not move their position it meets the criteria
        /// or if the global counter exist its predefined limit(minimum iteration threshold) than iteration terminates
        /// </summary>
        /// <param name="prevClusterCenter"></param>
        /// <param name="newClusterCenter"></param>
        /// <returns></returns>
        private static Boolean CheckStoppingCriteria(List<Centroid> prevClusterCenter, List<Centroid> newClusterCenter)
        {

            globalCounter++;
            counter = globalCounter;
            if (globalCounter > 11000)
            {
                return true;
            }

            else
            {
                Boolean stoppingCriteria;
                int[] changeIndex = new int[newClusterCenter.Count()]; //1 = centroid has moved 0 == centroid do not moved its position

                int index = 0;
                do
                {
                    int count = 0;
                    if (newClusterCenter[index].GroupedDocument.Count == 0 && prevClusterCenter[index].GroupedDocument.Count == 0)
                    {
                        index++;
                    }
                    else if (newClusterCenter[index].GroupedDocument.Count != 0 && prevClusterCenter[index].GroupedDocument.Count != 0)
                    {
                        for (int j = 0; j < newClusterCenter[index].GroupedDocument[0].VectorSpace.Count(); j++)
                        {
                            //
                            if (newClusterCenter[index].GroupedDocument[0].VectorSpace[j] == prevClusterCenter[index].GroupedDocument[0].VectorSpace[j])
                            {
                                count++;
                            }

                        }

                        if (count == newClusterCenter[index].GroupedDocument[0].VectorSpace.Count())
                        {
                            changeIndex[index] = 0;
                        }
                        else
                        {
                            changeIndex[index] = 1;
                        }
                        index++;
                    }
                    else
                    {
                        index++;
                        continue;

                    }


                } while (index < newClusterCenter.Count());

                // if index list contains 1 stopping criteria is set to flase
                if (changeIndex.Where(s => (s != 0)).Select(r => r).Any())
                {
                    stoppingCriteria = false;
                }
                else
                    stoppingCriteria = true;

                return stoppingCriteria;
            }


        }

        //returns index of closest cluster centroid
        private static int FindClosestClusterCenter(List<Centroid> clusterCenter, DocumentVector obj)
        {

            float[] similarityMeasure = new float[clusterCenter.Count()];

            for (int i = 0; i < clusterCenter.Count(); i++)
            {

                similarityMeasure[i] = SimilarityMatrics.FindCosineSimilarity(clusterCenter[i].GroupedDocument[0].VectorSpace, obj.VectorSpace);

            }

            int index = 0;
            float maxValue = similarityMeasure[0];
            for (int i = 0; i < similarityMeasure.Count(); i++)
            {
                //if document is similar assign the document to the lowest index cluster center to avoid the long loop
                if (similarityMeasure[i] > maxValue)
                {
                    maxValue = similarityMeasure[i];
                    index = i;

                }
            }
            return index;

        }

        //Reposition the centroid
        private static List<Centroid> CalculateMeanPoints(List<Centroid> _clusterCenter)
        {

            for (int i = 0; i < _clusterCenter.Count(); i++)
            {

                if (_clusterCenter[i].GroupedDocument.Count() > 0)
                {

                    for (int j = 0; j < _clusterCenter[i].GroupedDocument[0].VectorSpace.Count(); j++)
                    {
                        float total = 0;

                        foreach (DocumentVector vSpace in _clusterCenter[i].GroupedDocument)
                        {

                            total += vSpace.VectorSpace[j];

                        }

                        //reassign new calculated mean on each cluster center, It indicates the reposition of centroid
                        _clusterCenter[i].GroupedDocument[0].VectorSpace[j] = total / _clusterCenter[i].GroupedDocument.Count();

                    }

                }

            }

            return _clusterCenter;

        }
        /// <summary>
        /// Find Residual sum of squares it measures how well a cluster centroid represents the member of their cluster
        /// We can use the RSS value as stopping criteria of k-means algorithm when decreses in RSS value falls below a 
        /// threshold t for small t we can terminate the algorithm.
        /// </summary>
        private static void FindRSS(List<Centroid> newCentroid, List<Centroid> _clusterCenter)
        {
            //TODO:
        }

    }
    #endregion

}