﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;

namespace scomA3proj
{
    public class DocumentKeywordFinder_James : IDocumentKeywordFinder_James
    {

        /// <summary>
        /// splitTokens - all non letter/number characters used to tokenize the document content.
        /// </summary>
        private static string[] splitTokens = { " ", "-", "<", ">", "&", "*", "_", "{", "}", "[", "]", "`", "~", ":", ";", "!", "\\", "|", "@", "#", "$", "%", "^", "*", "(", ")", "+", "=", ",", ".", "/", "\"", "\t", "\n", "\r" };

        /// <summary>
        /// This web service function takes a set of documents and determines the possible keywords for 
        /// the document using simple document analysis techniques. The strategy is as follows:
        /// 1. For each url, get the CONTENT html (eg remove all html tags)
        ///     1a. Using Regex, parse the CONTENT, removing the "<head></head>" tag and contents, as well as all 
        ///     "<script></script>" and "<style></style>" tags and content. Finally all other html
        ///     tags are removed, leaving behind primarily content.
        ///     1b. The content is then converted to all lower case and tokenized by all special characters and whitespace.
        ///     1c. The tokens are then stored in an inverted index
        /// 2. Compute the inverted index, see <seealso cref="ConstructInvertedIndex"/>. This is useful for obtaining 
        /// metrics that take into account all documents in the set.
        /// 3. For each document, compute the term frequency * inverse document frequqncy(tf-idf) for each token in the 
        /// document.
        ///     3a. tf is term frequency- the number of times the word occurs in a document, 
        ///     but capped off wwhen tf is larged to avoid giving more weight to very common terms. 
        ///     if(tf > 7)
        ///         tf = 7 + (int)Math.Log(tf - 7);
        ///     3b. idf is inverse document frequency(idf)- this is defined as: 
        ///     idf = Math.Log((numDocsInCorpus + 1) / numDocsInCorpusWithTerm)
        ///     The goal is to give a lower value to terms that are present in many documents.
        ///     3c. tf*idf allows for finding terms that are frequent in a document, but not frequent in many
        ///     documents.
        /// </summary>
        /// <param name="documentUrls">The document url list.</param>
        /// <returns>A list of DocumentResults 1 result per the original documents submitted.</returns>
        public List<DocumentResult> FindKeywords(List<string> documentUrls, int MaxNumKeywords)
        {
            //Final result.
            List<DocumentResult> res = new List<DocumentResult>();
            
                //mapping of document url to tokens.
            Dictionary<string, List<string>> docUrlToContentMap = new Dictionary<string, List<string>>();
                
                //document content.
            Dictionary<string, string> docContent = new Dictionary<string, string>();
                
                //Get all document responses
            MultithreadedHttpRequests_James mthr = new MultithreadedHttpRequests_James();
            List<string> origContent = mthr.getHttpResponses(documentUrls, 1);
            for(int i = 0; i < documentUrls.Count; i++)
            {
                    //Parse out any html to get content only.
                string currDocContent = getContentHTMLDoc(origContent[i]);
                    //Tokenize the document
                Tokenizer_James tok = new Tokenizer_James();
                List<string> currDocTokens = tok.Tokenize(currDocContent, splitTokens.ToList(), true, true, true, true);
                    //Add the document to the hash table and content list.
                docContent.Add(documentUrls[i], currDocContent);
                docUrlToContentMap.Add(documentUrls[i], currDocTokens); 
            }

                //Construct the inverted index based on the given content(tokenized)
            Dictionary<string, DocIndexInfo> invIndex = ConstructInvertedIndex(docUrlToContentMap);

            //Find the keywords based off tf-idf
            foreach (string url in documentUrls)
            {
                    //Document retrieval error.
                if (docUrlToContentMap.Count == 1 && docUrlToContentMap.ContainsKey("error"))
                {
                    DocumentResult r1 = new DocumentResult(url, DateTime.Now, new List<string>(), DocumentResult.ResultStatus.UrlNotFound);
                    res.Add(r1);
                    continue;
                }


                List<string> tokens = docUrlToContentMap[url];
                List<KeywordWeight> weights = new List<KeywordWeight>();
                HashSet<string> usedTokens = new HashSet<string>();
                    //iterate over all tokens in a document and find the tf-idf for the term.
                foreach (string token in tokens)
                {
                        //make sure token has not already been counted.
                    if (!usedTokens.Contains(token))
                    {
                        usedTokens.Add(token);
                    }
                    else
                    {
                        continue;
                    }
                    //Corpus attributes
                    int numDocsWithTerm = invIndex[token].Documents.Count;
                    int numDocs = documentUrls.Count;
                    
                    //iterate through all documents for the current document.
                    foreach(DocInfo doc in invIndex[token].Documents)
                    {
                        //current document found
                        if (doc.Url == url)
                        {
                                //find tfidf
                            int tf = doc.NumOccurances;
                            if (tf > 7) tf = 7 + (int)Math.Log(tf - 7);
                            double tfidf = Math.Log((numDocs + 1) / numDocsWithTerm) * tf;
                                //Add it to the weights list
                            KeywordWeight kw = new KeywordWeight(token, tfidf);
                            weights.Add(kw);
                            break;
                        }
                    }
                }

                //sort the tokens by weight(tf-idf)
                List<string> keywords = new List<string>();
                weights.Sort();
                int currIndex = 0;
                    //find the highest weighted keywords.
                for(int i = 0; i < Math.Min(MaxNumKeywords, weights.Count); i++)
                {
                        //Make sure we do not exceed size of array.
                    if (currIndex >= weights.Count) break;
                        //If is a split token, just skip it.
                    if (splitTokens.Contains(weights[currIndex].Keyword))
                    {
                        i--;
                    }
                    else
                    {
                        keywords.Add(weights[currIndex].Keyword);
                    }
                    currIndex++;
                }
                    //add result to res.
                DocumentResult r = new DocumentResult(url, parseDateTime(docContent[url]), keywords, DocumentResult.ResultStatus.Ok);
                res.Add(r);
            }

            return res;
        }


       

        /// <summary>
        /// 1. The inverted index is composed of the following elements(with example)
        ///     {"word1"}-->{ NumberDocumentsWithTerm(2), TotalOccurances(8) 
        ///                     { document1(www.d1.com), NumOccurances(5) }
        ///                     { document2(www.d2.com), NumOccurances(3) }
        ///                 }
        ///     {"word2"}-->{ NumberDocumentsWithTerm(1), TotalOccurances(4) 
        ///                     { document1(www.d1.com), NumOccurances(4) }
        ///                 }
        ///     1a. By constructing the inverted index, we allow for computing metrics that depend on the
        ///     entire document set, which can vastly improve processing results and allows for extensions 
        ///     such as search later on.
        /// </summary>
        /// <param name="docUrlToContentMap">The document url mapped to the tokenized document content.</param>
        /// <returns></returns>
        public Dictionary<string, DocIndexInfo> ConstructInvertedIndex(Dictionary<string, List<string>> docUrlToContentMap)
        {
            Dictionary<string, DocIndexInfo> invIndex = new Dictionary<string, DocIndexInfo>();

            foreach (string docurl in docUrlToContentMap.Keys)
            {
                List<string> docContent = docUrlToContentMap[docurl];

                indexDocument(docurl, docContent, ref invIndex);
            }
            return invIndex;
        }

        

        /// <summary>
        /// This function indexes a single document into an inverted index and merges with the corpus index
        /// </summary>
        /// <param name="docUrl">The document url.</param>
        /// <param name="tokens">The document tokens.</param>
        /// <param name="InvIndex">The inverted index for the entire document corpus(ref)</param>
        private void indexDocument(string docUrl, List<string> tokens, ref Dictionary<string, DocIndexInfo> InvIndex)
        {
            Dictionary<string, DocIndexInfo> newIndex = new Dictionary<string, DocIndexInfo>();
            
                //foreach token, add to index if not exists, otherwise increment the number of occurances.
            foreach (string s in tokens)
            {
                string newS = s.ToLower().Trim();
                DocIndexInfo doc1;
                if (!newIndex.TryGetValue(newS, out doc1))
                {
                    DocIndexInfo dii = new DocIndexInfo();
                    dii.addDocument(new DocInfo(docUrl, 1));
                    newIndex.Add(newS, dii);
                }
                else
                {
                    doc1.Documents[0].incrementNumOccurances();
                }
            }
            //Merge with document corpus inverted index.
            foreach (string i in newIndex.Keys)
            {
                DocIndexInfo newDoc;
                DocIndexInfo invIndexDoc;
                newIndex.TryGetValue(i, out newDoc);
                if (!InvIndex.TryGetValue(i, out invIndexDoc))
                {
                    invIndexDoc = new DocIndexInfo();
                    invIndexDoc.addDocument(newDoc.Documents[0]);
                    InvIndex.Add(i, invIndexDoc);
                }
                else
                {
                    invIndexDoc.addDocument(newDoc.Documents[0]);
                }
            }
        }

        /// <summary>
        /// Parses out all the html tags that are not part of the page content.
        /// </summary>
        /// <param name="htmldoc">The html document to parse.</param>
        /// <returns>The content only.</returns>
        private string getContentHTMLDoc(string htmldoc)
        {
            string docContent = htmldoc;
            docContent = docContent.Replace('\n', ' ');
            docContent = docContent.Replace('\r', ' ');
            docContent = docContent.Replace('\t', ' ');

            docContent = Regex.Replace(docContent, "<head.*?</head>", " ");
            docContent = Regex.Replace(docContent, "<style.*?</style>", " ");
            docContent = Regex.Replace(docContent, "<script.*?</script>", " ");
            docContent = Regex.Replace(docContent, "<SCRIPT.*?</SCRIPT>", " ");
            docContent = Regex.Replace(docContent, "<.*?>", " ");

            return docContent;
        }

        /// <summary>
        /// Parses the date times out of a document.
        /// Currently this function returns the first date encountered.
        /// </summary>
        /// <param name="content">The document content.</param>
        /// <returns>The first date encountered.</returns>
        private DateTime parseDateTime(string content)
        {
            int monthint = 1, dayint = 1, yearint = 1;
            DateTime newdt = DateTime.Now;
            DateTime firstDate = new DateTime(0);
            
                string f0 = "(?:(\\d{1,2})/(\\d{1,2})/(\\d{2,4}))";
                string f1 = "(?:(\\s\\d{1,2})\\s+(jan(?:uary){0,1}\\.{0,1}|feb(?:ruary){0,1}\\.{0,1}|mar(?:ch){0,1}\\.{0,1}|apr(?:il){0,1}\\.{0,1}|may\\.{0,1}|jun(?:e){0,1}\\.{0,1}|jul(?:y){0,1}\\.{0,1}|aug(?:ust){0,1}\\.{0,1}|sep(?:tember){0,1}\\.{0,1}|oct(?:ober){0,1}\\.{0,1}|nov(?:ember){0,1}\\.{0,1}|dec(?:ember){0,1}\\.{0,1})\\s+(\\d{2,4}))";
                string f2 = "(?:(jan(?:uary){0,1}\\.{0,1}|feb(?:ruary){0,1}\\.{0,1}|mar(?:ch){0,1}\\.{0,1}|apr(?:il){0,1}\\.{0,1}|may\\.{0,1}|jun(?:e){0,1}\\.{0,1}|jul(?:y){0,1}\\.{0,1}|aug(?:ust){0,1}\\.{0,1}|sep(?:tember){0,1}\\.{0,1}|oct(?:ober){0,1}\\.{0,1}|nov(?:ember){0,1}\\.{0,1}|dec(?:ember){0,1}\\.{0,1})\\s+([0-9]{1,2})[\\s,]+(\\d{2,4}))";

       

                StringBuilder sb = new StringBuilder();

                MatchCollection mc = Regex.Matches(content, f0 + "|" + f1 + "|" + f2, RegexOptions.IgnoreCase | RegexOptions.IgnorePatternWhitespace);

                string year = "", day = "", month = "";
                foreach (Match m in mc)
                {
                    try
                    {
                        //Console.WriteLine("Match: " + m.Value);
                        newdt = DateTime.Now;
                        foreach (Group g in m.Groups)
                        {
                            //Console.WriteLine("\t" + g.Value);
                        }

                        if (!m.Groups[4].Value.Equals(""))
                        {
                            day = m.Groups[4].Value;
                            year = m.Groups[6].Value;
                            month = m.Groups[5].Value;
                        }
                        else if (!m.Groups[7].Value.Equals(""))
                        {
                            day = m.Groups[8].Value;
                            year = m.Groups[9].Value;
                            month = m.Groups[7].Value;
                        }
                        else if (!m.Groups[1].Value.Equals(""))
                        {
                            day = m.Groups[2].Value;
                            year = m.Groups[3].Value;
                            month = m.Groups[1].Value;
                        }
                        dayint = Convert.ToInt32(day);
                        yearint = Convert.ToInt32(year);
                        if (yearint < 1000) yearint = yearint + 1900;
                        monthint = 1;
                        if (month.ToLower().Trim().StartsWith("jan")) monthint = 1;
                        else if (month.ToLower().Trim().StartsWith("feb")) monthint = 2;
                        else if (month.ToLower().Trim().StartsWith("mar")) monthint = 3;
                        else if (month.ToLower().Trim().StartsWith("apr")) monthint = 4;
                        else if (month.ToLower().Trim().StartsWith("may")) monthint = 5;
                        else if (month.ToLower().Trim().StartsWith("jun")) monthint = 6;
                        else if (month.ToLower().Trim().StartsWith("jul")) monthint = 7;
                        else if (month.ToLower().Trim().StartsWith("aug")) monthint = 8;
                        else if (month.ToLower().Trim().StartsWith("sep")) monthint = 9;
                        else if (month.ToLower().Trim().StartsWith("oct")) monthint = 10;
                        else if (month.ToLower().Trim().StartsWith("nov")) monthint = 11;
                        else if (month.ToLower().Trim().StartsWith("dec")) monthint = 12;
                        else
                        {
                            monthint = Convert.ToInt32(month);
                        }
                        
                        newdt = new DateTime(yearint, monthint, dayint);
                        //Console.WriteLine(newdt.ToString());
                        if(firstDate.Equals(new DateTime(0)))
                        {
                            firstDate = newdt;
                        }
                    }
                    catch (Exception ex)
                    {
                        //return DateTime.Now;
                    }
                }
            
            return firstDate;
        }

    }

    /// <summary>
    /// Simple association class of Keywords to Weight.
    /// Implements IComparable for easy sorting.
    /// </summary>
    public class KeywordWeight : IComparable
    {
        public string Keyword { get; private set; }
        public double Weight { get; private set; }

        public KeywordWeight(string keyword, double weight)
        {
            this.Keyword = keyword;
            this.Weight = weight;
        }



        public int CompareTo(object obj)
        {
            KeywordWeight other = (KeywordWeight)obj;
            if (Weight > other.Weight)
            {
                return -1;
            }
            if (Weight < other.Weight)
            {
                return 1;
            }
            return 0;
        }
    }

    /// <summary>
    /// This class contains the information for a single term in a single document.
    /// This is associated with a term through the Dictionary invertedindex.
    /// </summary>
    public class DocInfo
    {
        /// <summary>
        /// Url of the document.
        /// </summary>
        public string Url { get; private set; }

        /// <summary>
        /// Number of occurances of the term.
        /// </summary>
        public int NumOccurances { get; private set; }

        public DocInfo(string url, int numoccur)
        {
            this.Url = url;
            this.NumOccurances = numoccur;
        }

        /// <summary>
        /// Increment the number of occurances of a term by 1.
        /// </summary>
        public void incrementNumOccurances()
        {
            NumOccurances++;
        }
    }

    /// <summary>
    /// This class holds the global corpus metrics for a given term.
    /// This is associated with a term through the Dictionary invertedidnex.
    /// </summary>
    public class DocIndexInfo
    {
        /// <summary>
        /// Total number of occurances in the document corpus.
        /// </summary>
        public int TotalOccurances { get; private set; }
        /// <summary>
        /// The list of documents containing the term.
        /// </summary>
        public List<DocInfo> Documents;

        public DocIndexInfo()
        {
            Documents = new List<DocInfo>();
            TotalOccurances = 0;
        }
        /// <summary>
        /// Add a document to the list and adds to the number of occurances.
        /// </summary>
        /// <param name="d"></param>
        public void addDocument(DocInfo d)
        {
            TotalOccurances += d.NumOccurances;
            Documents.Add(d);
        }
    }


}
