﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using DocumentsTagger.DM;
using DocumentsTagger.TextMiningUtils;

namespace DocumentsTagger.MatchingAlgorithms
{
    /// <summary>
    /// This class is an implementation of an idea i had few years back for catagorizing unknown languages texts 
    /// into two groups -- good and bad.
    /// The basic idea is using bayesian inference with a small twist to make it overcome language berrier and text variety of
    /// examples.
    /// For more information on the algorithm see http://documenttagger.codeplex.com
    /// </summary>
    class BetterBayesian:AMatchingAlgorithm
    {
        private readonly List<Document> r_GoodDocuments = new List<Document>();
        private readonly List<Document> r_BadDocuments = new List<Document>();
        private readonly Dictionary<string, WordHistogram> r_WordsHist = new Dictionary<string, WordHistogram>();
        private readonly List<string> r_ImportentWords = new List<string>();
        private float m_MinimalMatchScore = 0;
        private int m_TotalWordsInGoodDocs = 0;
        private int m_TotalWordsInBadDocs = 0;

        private readonly List<string> r_GoodWords = new List<string>();

        private readonly List<string> r_BadWords = new List<string>();

        public List<string> BadWords
        {
            get { return r_BadWords; }
        }

        public List<string> GoodWords
        {
            get { return r_GoodWords; }
        }

        public override bool IsMatch(Document doc)
        {
            bool res = false;
            int currGrade = calculateGrade(doc);
            if (currGrade >= m_MinimalMatchScore)
            {
                res = true;
            }
            return res;
        }

        public override void Learn(List<Document> GoodDocuments, List<Document> BadDocuments)
        {
            foreach (Document doc in GoodDocuments)
            {
                string fullPath = doc.FullPath;
                if ((r_GoodDocuments.Contains(doc)) || (r_BadDocuments.Contains(doc)))
                {
                    return;
                }

                Dictionary<string, int> wordsHist = TextMiningUtil.GetWordsFromFile(fullPath);
                LearnGoodDocument(doc, wordsHist);
            }

            foreach (Document doc in BadDocuments)
            {
                string fullPath = doc.FullPath;
                if ((r_GoodDocuments.Contains(doc)) || (r_BadDocuments.Contains(doc)))
                {
                    return;
                }

                Dictionary<string, int> wordsHist = TextMiningUtil.GetWordsFromFile(fullPath);
                LearnBadDocument(doc, wordsHist);
            }

            findImportentWords();
            CalculateMinimalGrade();

        }

        public override void Learn(Document doc, AMatchingAlgorithm.Catagory catagory)
        {
            string fullPath = doc.FullPath;
            if ((r_GoodDocuments.Contains(doc)) || (r_BadDocuments.Contains(doc)))
            {
                return;
            }

            Dictionary<string, int> wordsHist = TextMiningUtil.GetWordsFromFile(fullPath);

            switch (catagory)
            {
                case AMatchingAlgorithm.Catagory.GOOD:
                    LearnGoodDocument(doc, wordsHist);
                    break;
                case AMatchingAlgorithm.Catagory.BAD:
                    LearnBadDocument(doc, wordsHist);
                    break;
                default:
                    break;
            }
            findImportentWords();
            CalculateMinimalGrade();

        }

        private void LearnBadDocument(Document doc, Dictionary<string, int> wordsHist)
        {
            string senitizedWord;
            r_BadDocuments.Add(doc);
            foreach (string word in wordsHist.Keys)
            {
                senitizedWord = TextMiningUtil.SenityzeWord(word);
                m_TotalWordsInBadDocs++;

                if (r_WordsHist.ContainsKey(senitizedWord))
                {
                    r_WordsHist[senitizedWord].BadCount += wordsHist[word];
                }
                else
                {
                    if (TextMiningUtil.IsValidWord(senitizedWord))
                    {
                        WordHistogram hist = new WordHistogram(senitizedWord);
                        hist.BadCount = wordsHist[word];
                        r_WordsHist.Add(senitizedWord, hist);
                    }
                }
            }
        }

        private void LearnGoodDocument(Document doc, Dictionary<string, int> wordsHist)
        {
            string senitizedWord;
            r_GoodDocuments.Add(doc);
            foreach (string word in wordsHist.Keys)
            {
                senitizedWord = TextMiningUtil.SenityzeWord(word);
                m_TotalWordsInGoodDocs++;
                if (r_WordsHist.ContainsKey(senitizedWord))
                {
                    r_WordsHist[senitizedWord].GoodCount += wordsHist[word];
                }
                else
                {
                    if (TextMiningUtil.IsValidWord(senitizedWord))
                    {
                        WordHistogram hist = new WordHistogram(senitizedWord);
                        hist.GoodCount = wordsHist[word];
                        r_WordsHist.Add(senitizedWord, hist);
                    }
                }
            }
        }

        private void findImportentWords()
        {
            r_GoodWords.Clear();
            r_BadWords.Clear();
            r_ImportentWords.Clear();
            Dictionary<float, List<WordHistogram>> wordsGradesDictionary = new Dictionary<float, List<WordHistogram>>();
            List<float> gradesList = new List<float>();
            
            foreach (string word in r_WordsHist.Keys)
            {
                float grade = gradeWord(word);
                if (!gradesList.Contains(grade))
                {
                    gradesList.Add(grade);
                    wordsGradesDictionary.Add(grade, new List<WordHistogram>());
                    
                }

                wordsGradesDictionary[grade].Add(r_WordsHist[word]);
            }

            if (gradesList.Count < 3)
            {
                return;
            }

            // sort the grade list, and find the top 10% of good words, and top 10% of bad words.
            // the lowest grades are the bad matches. 
            // the highest grades are the good matches.
            // sort is sorting in ascending order -- > the record at 0 is bad...
            gradesList.Sort();
            int amountOfWords = Math.Max(gradesList.Count / 10, 1);

            //add the top 10 % to the importent list;
            for (int i = 0; i < amountOfWords; i++)
            {
                foreach (WordHistogram item in wordsGradesDictionary[gradesList[i]])
                {
                    r_ImportentWords.Add(item.Word);
                    r_BadWords.Add(item.Word);
                }
            }

            // add the bottom 10 % to the importentwords.
            for (int i = 0 ; i <  amountOfWords; i++)
            {
                foreach (WordHistogram item in wordsGradesDictionary[gradesList[gradesList.Count-1 - i]])
                {
                    r_ImportentWords.Add(item.Word);
                    r_GoodWords.Add(item.Word);
                }
            }
        }

        private int calculateGrade(Document doc)
        {
            float grade = 0;
            Dictionary<string, int> hist = new Dictionary<string, int>();

            Dictionary<string, int> wordsHist = TextMiningUtil.GetWordsFromFile(doc.FullPath);

            foreach (string word in wordsHist.Keys)
            {
                if (hist.ContainsKey(word))
                {
                    hist[word] += wordsHist[word];
                }
                else
                {
                    hist.Add(word, wordsHist[word]);
                }
            }

            foreach (string word in r_ImportentWords)
            {
                if (hist.ContainsKey(word))
                {
                    grade += hist[word] * gradeWord(word);
                }
            }

            return (int)Math.Ceiling(grade);
        }

        private float gradeWord(string word)
        {
            return ((float)(r_WordsHist[word].GoodCount / (float)m_TotalWordsInGoodDocs) - ((float)r_WordsHist[word].BadCount / (float)m_TotalWordsInBadDocs));
        }

        private void CalculateMinimalGrade()
        {
            int minGrade = 0;
            int tempGrade;
            bool first = true;
            foreach (Document doc in r_GoodDocuments)
            {
                tempGrade = calculateGrade(doc);
                if (first)
                {
                    minGrade = tempGrade;
                    first = false;
                }
                else
                {
                    minGrade = Math.Min(tempGrade, minGrade);
                }
            }

            m_MinimalMatchScore = minGrade;
        }


        internal class WordHistogram
        {
            private readonly string r_Word;
            private int m_GoodCount = 0;
            private int m_BadCount = 0;

            public int GoodCount { get { return m_GoodCount; } set { m_GoodCount = value; } }
            public int BadCount { get { return m_BadCount; } set { m_BadCount = value; } }
            public string Word { get { return r_Word; } }
            public WordHistogram(string Name)
            {
                this.r_Word = Name;
            }
        }
    }
}
