using System;
using System.Collections;
using System.IO;
using System.Text;
using Slaks.DocumentSerialization;
using Slaks.DocumentSerialization.Document;
using Slaks.DocumentSerialization.Document.Wordnet;
using Slaks.DocumentSerialization.ParsedFiles;
using Slaks.DocumentSerialization.TermIdInfo;
using Slaks.Readers;
using Slaks.TextParsing;
using Slaks.Web.Parser;

namespace TextParsing
{
    public class StructureConstruction
    {
        public event Slaks.ParserUtil.ParserHandler ReportProgressEvent;

        private BufferReader m_bufferReaderCore = null;
        private string m_outputPath;
        private Encoding m_encoding;
        private StreamWriter m_unknownTermWriter;

        #region Xml
        /// <summary>
        /// term to id mapper indicates that the term was encountered during parsing and is stored in the global hash
        /// </summary>
        private TermIdInfoMap m_term2id = null;

        /// <summary>
        /// global id counter (if the term is not present in m_term2id then the id is icremented
        /// </summary>
        private long m_idCounter = 0;

        /// <summary>
        /// indicates the last term that was encountered during parsing until the end sentence punctuation mark was fed.
        /// </summary>
        enum LastTermType { StopWord = 0, RegularTerm = 1 };

        /// <summary>
        /// holds information about term document frequency
        /// </summary>
        private Id2DocumentFrequencyMap m_id2DocumentFrequency;

        #endregion
        public StructureConstruction(BufferReader bufferReaderCore, string outputPath, Encoding encoding, bool useStopWords)
        {
            m_bufferReaderCore = bufferReaderCore;
            m_outputPath = outputPath;
            m_encoding = encoding;
            m_id2DocumentFrequency = new Id2DocumentFrequencyMap();
            Wnlib.WNCommon.path = "c:\\Program Files\\WordNet\\2.1\\dict\\";
            m_unknownTermWriter = new StreamWriter("unknown_term_list.txt");
        }
        public void ProceedParsing(IList filesToParse)
        {
            DocumentHtmlParser parser = null;
            DocumentHtmlParser.IsSaveLinkAsText = false;
            Slaks.ParserUtil.ParseEventArgs parsingArgs = new Slaks.ParserUtil.ParseEventArgs();
            Slaks.Web.Parser.Utils.LocalPageDownload.IsEncodingDetectionForced = true;

            //each term is mapped by its id - global hash which 
            m_term2id = new TermIdInfoMap();
            m_idCounter = 0;

            ParsedFiles parsedFiles = new ParsedFiles();

            string parsedFile;
            foreach (string file in filesToParse)
            {
                try
                {
                    parser = new DocumentHtmlParser(file, m_encoding);
                    parser.ParseMe();
                    parsedFile = SaveParsedFile(m_outputPath, file, parser, new Document());
                    parsingArgs.Message = file + " was parsed successfully at time " + DateTime.Now.ToString();
                    ReportProgressEvent(this, parsingArgs);

                    parsedFiles.Files.Add(parsedFile);


                }
                catch (Exception ex)
                {
                    parsingArgs.Message = "error occurred while parsing " + file + " file at time " + DateTime.Now.ToString() + " " + ex.Message + " " + ex.Source;
                    ReportProgressEvent(this, parsingArgs);
                }
            }

            TermIdInfo termIdInfo = new TermIdInfo();
            termIdInfo.TermIdDictionary = m_term2id;

            if (m_outputPath[m_outputPath.Length - 1] == '\\')
            {
                parsedFiles.XmlWrite(m_outputPath + "parsed_file_list.xml");
                termIdInfo.XmlWrite(m_outputPath + "term_id_info.xml");
                TermDocumentFrequencySerialization.WriteXml(m_outputPath + "term_document_frequency_info.xml", m_id2DocumentFrequency.Values.GetEnumerator());
            }
            else
            {
                parsedFiles.XmlWrite(m_outputPath + "\\parsed_file_list.xml");
                termIdInfo.XmlWrite(m_outputPath + "\\term_id_info.xml");
                TermDocumentFrequencySerialization.WriteXml(m_outputPath + "\\term_document_frequency_info.xml", m_id2DocumentFrequency.Values.GetEnumerator());
            }

            m_unknownTermWriter.Close();
        }

        private void ParseDocument(Document document, DocumentHtmlParser parser)
        {
            int sentenceCount = 0;
            int totalTermCount = 0;
            Hashtable id2NodeLocal = new Hashtable();
            ReadTitle(document, parser.Header, ref sentenceCount, ref totalTermCount, id2NodeLocal);
            ReadMetaNames(document, parser.Header, ref sentenceCount, ref totalTermCount, id2NodeLocal);
            ReadStructures(document, parser.RootStructure, ref sentenceCount, ref totalTermCount, id2NodeLocal);
            ReadAnchors(document, parser.RootStructure, ref sentenceCount, ref totalTermCount, id2NodeLocal);

            if (document.Nodes.Count > 0)
            {
                document.TotalSentenceCount = sentenceCount;
                document.UniqueTermCount = document.Nodes.Count;
                document.TotalTermCount = totalTermCount;

                long id;

                id = this.GetTotalMostFrequentTermId(document);
                document.MostFrequencyInfo.TotalMostFrequentId = id;

                id = this.GetMostFrequentTermIdBySection(document, Section.SectionType.Text);
                document.MostFrequencyInfo.SectionType2IdMap.Add(Grammar.SectionType.TextTypeAttribute, id);

                id = this.GetMostFrequentTermIdBySection(document, Section.SectionType.Link);
                document.MostFrequencyInfo.SectionType2IdMap.Add(Grammar.SectionType.LinkTypeAttribute, id);

                id = this.GetMostFrequentTermIdBySection(document, Section.SectionType.Title);
                document.MostFrequencyInfo.SectionType2IdMap.Add(Grammar.SectionType.TitleTypeAttribute, id);

                Document2Xml.WriteXml(document, document.FileName, System.Text.Encoding.UTF8);
            }
            else throw new Exception(document.FileName + " was parsed but no nodes where constructed ");
        }

        private long GetTotalMostFrequentTermId(Document document)
        {

            long id = -1;
            long maxFrequency = long.MinValue;

            foreach (Node node in document.Nodes)
            {
                if (node.TotalTermFrequency > maxFrequency)
                {
                    maxFrequency = node.TotalTermFrequency;
                    id = node.Id;
                }
            }

            return id;
        }

        private long GetMostFrequentTermIdBySection(Document document, Section.SectionType sectionType)
        {
            long id = -1;
            long maxFrequency = long.MinValue;

            foreach (Node node in document.Nodes)
            {
                Section section = node.GetSectionByType(sectionType);
                if (section == null) continue;
                if (section.TermFrequency > maxFrequency)
                {
                    maxFrequency = section.TermFrequency;
                    id = node.Id;
                }
            }

            return id;
        }
        private string SaveParsedFile(string outputPath, string originalFile, DocumentHtmlParser parser, Document document)
        {

            int index = originalFile.LastIndexOf('\\');

            string filename = originalFile.Substring(index + 1, originalFile.Length - index - 1);
            filename = filename.Replace('.', '_');
            filename += "_parsed.xml";

            if (outputPath[outputPath.Length - 1] == '\\') filename = outputPath + filename;
            else filename = outputPath + "\\" + filename;
            document.FileName = filename;


            try
            {
                this.ParseDocument(document, parser);
            }
            catch (Exception)
            {
                throw;
            }

            return filename;
        }

        public void ReadStructures(Document document, HtmlStructure structure, ref int sentenceCount, ref int totalTermCount, Hashtable id2Node)
        {
            if (structure == null) return;

            //IList structList = structure.Structure;
            ReaderState state = ReaderState.None;
            string nextTerm = String.Empty;
            Node currentNode = null;

            #region Text of structure
            m_bufferReaderCore.AssingNewBuffer(structure.TextArray);
            int place = 0;

            while (!m_bufferReaderCore.Eof())
            {
                nextTerm = m_bufferReaderCore.NextTerm(ref state);
                if (state == ReaderState.Term)
                {
                    this.ProcessTerm(nextTerm, sentenceCount, ref place, ref currentNode, Section.SectionType.Text, id2Node, ref totalTermCount, document);
                }
                else if (state == ReaderState.EndOfSentencePunctuation)
                {
                    place = 0;
                    ++sentenceCount;
                    currentNode = null;
                }
                else if (state == ReaderState.MiddleSentencePunctuation)
                {
                    currentNode = null;
                }
            }//end of while

            if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;

            #endregion

        }

        public void ReadTitle(Document document, HtmlHeader header, ref int sentenceCount, ref int totalTermCount, Hashtable id2Node)
        {
            ReaderState state = ReaderState.None;
            string nextTerm = String.Empty;
            Node currentNode = null;

            if (header == null) return;
            if (header.Title == String.Empty) return;

            m_bufferReaderCore.AssingNewBuffer(header.Title);

            int place = 0;

            while (!m_bufferReaderCore.Eof())
            {
                nextTerm = m_bufferReaderCore.NextTerm(ref state);
                if (state == ReaderState.Term)
                {
                    this.ProcessTerm(nextTerm, sentenceCount, ref place, ref currentNode, Section.SectionType.Title, id2Node, ref totalTermCount, document);
                }
                else if (state == ReaderState.EndOfSentencePunctuation)
                {
                    place = 0;
                    ++sentenceCount;
                    currentNode = null;
                }
                else if (state == ReaderState.MiddleSentencePunctuation)
                {
                    currentNode = null;
                }
            }//end of while

            if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;
        }


        public void ReadMetaNames(Document document, HtmlHeader header, ref int sentenceCount, ref int totalTermCount, Hashtable id2Node)
        {
            if (header == null) return;

            ReaderState state = ReaderState.None;
            string nextTerm = String.Empty;
            Node currentNode = null;

            IList metanameList = header.Metaname;

            for (int i = 0; i < metanameList.Count; i++)
            {
                m_bufferReaderCore.AssingNewBuffer(metanameList[i].ToString());
                int place = 0;

                while (!m_bufferReaderCore.Eof())
                {
                    nextTerm = m_bufferReaderCore.NextTerm(ref state);
                    if (state == ReaderState.Term)
                    {
                        this.ProcessTerm(nextTerm, sentenceCount, ref place, ref currentNode, Section.SectionType.Title, id2Node, ref totalTermCount, document);
                    }
                    else if (state == ReaderState.EndOfSentencePunctuation)
                    {
                        place = 0;
                        ++sentenceCount;
                        currentNode = null;
                    }
                    else if (state == ReaderState.MiddleSentencePunctuation)
                    {
                        currentNode = null;
                    }
                }//end of while

                if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;
            }
        }

        public void ReadAnchors(Document document, HtmlStructure structure, ref int sentenceCount, ref int totalTermCount, Hashtable id2Node)
        {

            IList anchorList = structure.Anchors;
            if (anchorList == null) return;

            ReaderState state = ReaderState.None;
            string nextTerm = String.Empty;
            Node currentNode = null;

            for (int i = 0; i < anchorList.Count; i++)
            {
                int place = 0;
                string link = ((HtmlAnchor)anchorList[i]).Text;
                if (link == null) continue;

                m_bufferReaderCore.AssingNewBuffer(link);
                while (!m_bufferReaderCore.Eof())
                {
                    nextTerm = m_bufferReaderCore.NextTerm(ref state);
                    if (state == ReaderState.Term)
                    {
                        this.ProcessTerm(nextTerm, sentenceCount, ref place, ref currentNode, Section.SectionType.Link, id2Node, ref totalTermCount, document);
                    }
                    else if (state == ReaderState.EndOfSentencePunctuation)
                    {
                        place = 0;
                        ++sentenceCount;
                        currentNode = null;
                    }
                    else if (state == ReaderState.MiddleSentencePunctuation)
                    {
                        currentNode = null;
                    }
                }//end of while

                if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;
            }
        }

        private void ProcessTerm(string term,
                                                         int sentenceNumber,
                                   ref int currentPlace,
                                   ref Node currentNode,
                                   Section.SectionType sectionType,
                                   Hashtable id2NodeLocal,
                                   ref int totalTermCount,
                                   Document document
                                   )
        {

            //find the term in wordnet dictionary with morphological changes
            WnLexicon.WordInfo wordinfo = WnLexicon.Lexicon.FindWordInfo(term, true);
            if (wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown)
            {
                m_unknownTermWriter.WriteLine(term);
                return;
            }
            if (wordinfo.text != String.Empty) term = wordinfo.text;

            string tempTerm = MakeBasicAdditionalStemming(term);
            if (tempTerm != null) term = tempTerm;



            Section currentSection = (currentNode == null) ? null : currentNode.GetSectionByType(sectionType);
            Sentence sentence = (currentSection == null) ? null : ((currentPlace > 0) ? currentSection.GetSentence(sentenceNumber, currentPlace - 1) : null);

            if (MDIDoc.ISStopword(term))
            {
                if (currentNode == null) return; //if current node is null so throw the connecting word away;
                if (sentence == null) return;
                sentence.AddConnectingWord(term);
            }
            else
            {
                ++totalTermCount;

                Node node = null;

                TermIdInfoElement termIdInfoElement = (m_term2id.Contains(term) == true) ? m_term2id[term] : null;
                if (termIdInfoElement == null) //this term not present in the hash
                {
                    ++m_idCounter;
                    termIdInfoElement = new TermIdInfoElement(term, m_idCounter, 1);
                    m_term2id.Add(term, termIdInfoElement);

                    node = Node.CreateNewNode();
                    node.Id = m_idCounter;
                    node.Term = term;


                    //------------ add senses from wordnet -------------------
                    Senses senses = new Senses();
                    // for each part of speech...
                    Wnlib.PartsOfSpeech[] enums = (Wnlib.PartsOfSpeech[])Enum.GetValues(typeof(Wnlib.PartsOfSpeech));
                    for (int i = 0; i < enums.Length; i++)
                    {
                        Wnlib.PartsOfSpeech pos = enums[i];

                        // skip "Unknown"
                        if (pos == Wnlib.PartsOfSpeech.Unknown) continue;
                        if (pos == Wnlib.PartsOfSpeech.Noun) senses.Nouns = wordinfo.senseCounts[i];
                        else if (pos == Wnlib.PartsOfSpeech.Verb) senses.Verbs = wordinfo.senseCounts[i];
                        else if (pos == Wnlib.PartsOfSpeech.Adj) senses.Adjectives = wordinfo.senseCounts[i];
                        else if (pos == Wnlib.PartsOfSpeech.Adv) senses.Adverbs = wordinfo.senseCounts[i];
                    }
                    //------------ add senses from wordnet -------------------


                    id2NodeLocal.Add(m_idCounter, node);
                    document.AddNode(node.Id, node);

                    //info about term document frequency
                    TermDocumentFrequencyElement element = new TermDocumentFrequencyElement(node.Id);
                    element.AddDocument(document.FileName);
                    m_id2DocumentFrequency.Add(node.Id, element);

                }
                else //the term is found in the global hash
                {
                    termIdInfoElement.Frequency += 1;

                    //info about term document frequency
                    m_id2DocumentFrequency[termIdInfoElement.Id].AddDocument(document.FileName);

                    long id = termIdInfoElement.Id;

                    node = (id2NodeLocal.ContainsKey(id)) ? (Node)id2NodeLocal[id] : null;

                    if (node == null) //the term does not exist in the local document but exists in the hash
                    {
                        node = Node.CreateNewNode();
                        node.Id = id;
                        node.Term = term;

                        //------------ add senses from wordnet -------------------
                        Senses senses = new Senses();
                        // for each part of speech...
                        Wnlib.PartsOfSpeech[] enums = (Wnlib.PartsOfSpeech[])Enum.GetValues(typeof(Wnlib.PartsOfSpeech));
                        for (int i = 0; i < enums.Length; i++)
                        {
                            Wnlib.PartsOfSpeech pos = enums[i];

                            // skip "Unknown"
                            if (pos == Wnlib.PartsOfSpeech.Unknown) continue;
                            if (pos == Wnlib.PartsOfSpeech.Noun) senses.Nouns = wordinfo.senseCounts[i];
                            else if (pos == Wnlib.PartsOfSpeech.Verb) senses.Verbs = wordinfo.senseCounts[i];
                            else if (pos == Wnlib.PartsOfSpeech.Adj) senses.Adjectives = wordinfo.senseCounts[i];
                            else if (pos == Wnlib.PartsOfSpeech.Adv) senses.Adverbs = wordinfo.senseCounts[i];
                        }
                        //------------ add senses from wordnet -------------------

                        id2NodeLocal.Add(id, node);
                        document.AddNode(node.Id, node);
                    }
                    else
                    {
                    }
                }

                if (sentence != null)
                {
                    sentence.NextId = node.Id;
                }

                currentNode = node;

                this.AddSentenceToNode(node, sentenceNumber, currentPlace, sectionType);
                ++currentPlace;

            }
        }

        private void AddSentenceToNode(Node node, int sentenceNumber, int termPlace, Section.SectionType sectionType)
        {
            Section currentSection = node.GetSectionByType(sectionType);
            if (currentSection == null)
            {
                currentSection = Section.CreateNewSection();
                currentSection.Type = sectionType;
                node.AddSection(sectionType, currentSection);
            }

            Sentence sentence = Sentence.CreateNewSentence();
            sentence.Number = sentenceNumber;
            sentence.Place = termPlace;

            currentSection.Sentences.Add(sentence);

        }
        private string MakeBasicAdditionalStemming(string term)
        {
            if (term.Length <= 3) return null;

            string newTerm = term;

            int termLength = term.Length;

            WnLexicon.WordInfo wordinfo;
            if (term[termLength - 2] == 'e' && term[termLength - 1] == 'd')
            {
                newTerm = term.Substring(0, termLength - 2);
                wordinfo = WnLexicon.Lexicon.FindWordInfo(newTerm, true);
                if (wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown)
                {
                    newTerm += 'e';
                    wordinfo = WnLexicon.Lexicon.FindWordInfo(newTerm, true);
                    if (wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown) return null;
                }
            }
            else if (term[termLength - 3] == 'i' && term[termLength - 2] == 'n' && term[termLength - 1] == 'g')
            {
                newTerm = term.Substring(0, termLength - 3);
                wordinfo = WnLexicon.Lexicon.FindWordInfo(newTerm, true);
                if (wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown) return null;
            }
            else if (term[termLength - 1] == 's')
            {
                newTerm = term.Substring(0, termLength - 1);
                wordinfo = WnLexicon.Lexicon.FindWordInfo(newTerm, true);
                if (wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown) return null;
            }

            return newTerm;
        }
    }
}
