using System;
using System.Collections;
using System.Text;
using Slaks.DocumentSerialization;
using Slaks.DocumentSerialization.Document;
using Slaks.DocumentSerialization.ParsedFiles;
using Slaks.DocumentSerialization.TermIdInfo;
using Slaks.Readers;
using Slaks.Web.Parser;

namespace TextParsing
{
    public class StructureConstruction
    {
        private PorterStemmerAlgorithm.PorterStemmer stemmer = new PorterStemmerAlgorithm.PorterStemmer();

        public event Slaks.ParserUtil.ParserHandler ReportProgressEvent;

        private BufferReader m_bufferReaderCore = null;
        private string m_outputPath;
        private Encoding m_encoding;
        private bool m_useStopWords;

        #region Xml
        /// <summary>
        /// term to id mapper indicates that the term was encountered during parsing and is stored in the global hash
        /// </summary>
        private TermIdInfoMap m_term2id = null;

        /// <summary>
        /// global id counter (if the term is not present in m_term2id then the id is icremented
        /// </summary>
        private long m_idCounter = 0;

        /// <summary>
        /// indicates the last term that was encountered during parsing until the end sentence punctuation mark was fed.
        /// </summary>
        enum LastTermType { StopWord = 0, RegularTerm = 1 };

        /// <summary>
        /// holds information about term document frequency
        /// </summary>
        private Id2DocumentFrequencyMap m_id2DocumentFrequency;

        #endregion
        public StructureConstruction(BufferReader bufferReaderCore, string outputPath, Encoding encoding, bool useStopWords)
        {
            m_bufferReaderCore = bufferReaderCore;
            m_outputPath = outputPath;
            m_encoding = encoding;
            m_useStopWords = useStopWords;
            m_id2DocumentFrequency = new Id2DocumentFrequencyMap();
			m_unknownTermWriter = new StreamWriter("unknown_term_list.txt");
        }
        public void ProceedParsing(IList filesToParse)
        {
            DocumentHtmlParser parser = null;
            DocumentHtmlParser.IsSaveLinkAsText = false;
            Slaks.ParserUtil.ParseEventArgs parsingArgs = new Slaks.ParserUtil.ParseEventArgs();
            Slaks.Web.Parser.Utils.LocalPageDownload.IsEncodingDetectionForced = true;

            //each term is mapped by its id - global hash which 
            m_term2id = new TermIdInfoMap();
            m_idCounter = 0;

            ParsedFiles parsedFiles = new ParsedFiles();

            string parsedFile;
            foreach (string file in filesToParse)
            {
                try
                {
                    parser = new DocumentHtmlParser(file, m_encoding);
                    parser.ParseMe();
                    parsedFile = SaveParsedFile(m_outputPath, file, parser, new Document());
                    parsingArgs.Message = file + " was parsed successfully at time " + DateTime.Now.ToString();
                    ReportProgressEvent(this, parsingArgs);
                    parsedFiles.Files.Add(parsedFile);
                }
                catch (Exception ex)
                {
                    parsingArgs.Message = "error occurred while parsing " + file + " file at time " + DateTime.Now.ToString() + " " + ex.Message + " " + ex.Source;
                    ReportProgressEvent(this, parsingArgs);
                }
            }

            TermIdInfo termIdInfo = new TermIdInfo();
            termIdInfo.TermIdDictionary = m_term2id;

            if (m_outputPath[m_outputPath.Length - 1] == '\\')
            {
                parsedFiles.XmlWrite(m_outputPath + "parsed_file_list.xml");
                termIdInfo.XmlWrite(m_outputPath + "term_id_info.xml");
                TermDocumentFrequencySerialization.WriteXml(m_outputPath + "term_document_frequency_info.xml", m_id2DocumentFrequency.Values.GetEnumerator());
            }
            else
            {
                parsedFiles.XmlWrite(m_outputPath + "\\parsed_file_list.xml");
                termIdInfo.XmlWrite(m_outputPath + "\\term_id_info.xml");
                TermDocumentFrequencySerialization.WriteXml(m_outputPath + "\\term_document_frequency_info.xml", m_id2DocumentFrequency.Values.GetEnumerator());
            }
			m_unknownTermWriter.Close();
        }

        private void ParseDocument(Document document, DocumentHtmlParser parser)
        {
            int sentenceCount = 0;
            int totalTermCount = 0;
            Hashtable id2NodeLocal = new Hashtable();
            Hashtable term2LocalNode = new Hashtable();// will hold map from the term to node
            //it need to be done because Schenker's stemming algorithm is following
            //when the term is recieved it gets 3 flavours of this word adding s, es, ing and checking
            //which combination of the word present in the text and if present which is has more frequency.

            ReadTitle(document, parser.Header, ref sentenceCount, ref totalTermCount, id2NodeLocal, term2LocalNode);
            ReadMetaNames(document, parser.Header, ref sentenceCount, ref totalTermCount, id2NodeLocal, term2LocalNode);
            ReadStructures(document, parser.RootStructure, ref sentenceCount, ref totalTermCount, id2NodeLocal, term2LocalNode);
            ReadAnchors(document, parser.RootStructure, ref sentenceCount, ref totalTermCount, id2NodeLocal, term2LocalNode);

            if (document.Nodes.Count > 0)
            {
                document.TotalSentenceCount = sentenceCount;
                document.UniqueTermCount = document.Nodes.Count;
                document.TotalTermCount = totalTermCount;

                long id;

                id = this.GetTotalMostFrequentTermId(document);
                document.MostFrequencyInfo.TotalMostFrequentId = id;

                id = this.GetMostFrequentTermIdBySection(document, Section.SectionType.Text);
                document.MostFrequencyInfo.SectionType2IdMap.Add(Grammar.SectionType.TextTypeAttribute, id);

                id = this.GetMostFrequentTermIdBySection(document, Section.SectionType.Link);
                document.MostFrequencyInfo.SectionType2IdMap.Add(Grammar.SectionType.LinkTypeAttribute, id);

                id = this.GetMostFrequentTermIdBySection(document, Section.SectionType.Title);
                document.MostFrequencyInfo.SectionType2IdMap.Add(Grammar.SectionType.TitleTypeAttribute, id);

                Document2Xml.WriteXml(document, document.FileName, System.Text.Encoding.UTF8);
            }
            else throw new Exception(document.FileName + " was parsed but no nodes where constructed ");
        }

        private long GetTotalMostFrequentTermId(Document document)
        {

            long id = -1;
            long maxFrequency = long.MinValue;

            foreach (Node node in document.Nodes)
            {
                if (node.TotalTermFrequency > maxFrequency)
                {
                    maxFrequency = node.TotalTermFrequency;
                    id = node.Id;
                }
            }

            return id;
        }

        private long GetMostFrequentTermIdBySection(Document document, Section.SectionType sectionType)
        {
            long id = -1;
            long maxFrequency = long.MinValue;

            foreach (Node node in document.Nodes)
            {
                Section section = node.GetSectionByType(sectionType);
                if (section == null) continue;
                if (section.TermFrequency > maxFrequency)
                {
                    maxFrequency = section.TermFrequency;
                    id = node.Id;
                }
            }

            return id;
        }

        private string SaveParsedFile(string outputPath, string originalFile, DocumentHtmlParser parser, Document document)
        {

            int index = originalFile.LastIndexOf('\\');

            string filename = originalFile.Substring(index + 1, originalFile.Length - index - 1);
            filename = filename.Replace('.', '_');
            filename += "_parsed.xml";

            if (outputPath[outputPath.Length - 1] == '\\') filename = outputPath + filename;
            else filename = outputPath + "\\" + filename;
            document.FileName = filename;

            try
            {
                this.ParseDocument(document, parser);
            }
            catch (Exception)
            {
                throw;
            }

            return filename;
        }

        public void ReadStructures(Document document, HtmlStructure structure, ref int sentenceCount, ref int totalTermCount, Hashtable id2Node, Hashtable term2LocalNode)
        {
            if (structure == null) return;

            //IList structList = structure.Structure;
            ReaderState state = ReaderState.None;
            string nextTerm = String.Empty;
            Node currentNode = null;

            #region Text of structure
            m_bufferReaderCore.AssingNewBuffer(structure.TextArray);
            int place = 0;

            while (!m_bufferReaderCore.Eof())
            {
                nextTerm = m_bufferReaderCore.NextTerm(ref state);
                if (state == ReaderState.Term)
                {
                    this.ProcessTerm(nextTerm, sentenceCount, ref place, ref currentNode, Section.SectionType.Text, id2Node, term2LocalNode, ref totalTermCount, document);
                }
                else if (state == ReaderState.EndOfSentencePunctuation)
                {
                    place = 0;
                    ++sentenceCount;
                    currentNode = null;
                }
                else if (state == ReaderState.MiddleSentencePunctuation)
                {
                    currentNode = null;
                }
            }//end of while

            if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;

            #endregion

            /*
			for (int i=0;i<structList.Count;i++)
			{
				ReadStructures(writer,(HtmlStructure)structList[i]);
			}
			*/
        }

        public void ReadTitle(Document document, HtmlHeader header, ref int sentenceCount, ref int totalTermCount, Hashtable id2Node, Hashtable term2LocalNode)
        {
            ReaderState state = ReaderState.None;
            string nextTerm = String.Empty;
            Node currentNode = null;

            if (header == null) return;
            if (header.Title == String.Empty) return;

            m_bufferReaderCore.AssingNewBuffer(header.Title);

            int place = 0;

            while (!m_bufferReaderCore.Eof())
            {
                nextTerm = m_bufferReaderCore.NextTerm(ref state);
                if (state == ReaderState.Term)
                {
                    this.ProcessTerm(nextTerm, sentenceCount, ref place, ref currentNode, Section.SectionType.Title, id2Node, term2LocalNode, ref totalTermCount, document);
                }
                else if (state == ReaderState.EndOfSentencePunctuation)
                {
                    place = 0;
                    ++sentenceCount;
                    currentNode = null;
                }
                else if (state == ReaderState.MiddleSentencePunctuation)
                {
                    currentNode = null;
                }
            }//end of while

            if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;
        }


        public void ReadMetaNames(Document document, HtmlHeader header, ref int sentenceCount, ref int totalTermCount, Hashtable id2Node, Hashtable term2LocalNode)
        {
            if (header == null) return;

            ReaderState state = ReaderState.None;
            string nextTerm = String.Empty;
            Node currentNode = null;

            IList metanameList = header.Metaname;

            for (int i = 0; i < metanameList.Count; i++)
            {
                m_bufferReaderCore.AssingNewBuffer(metanameList[i].ToString());
                int place = 0;

                while (!m_bufferReaderCore.Eof())
                {
                    nextTerm = m_bufferReaderCore.NextTerm(ref state);
                    if (state == ReaderState.Term)
                    {
                        this.ProcessTerm(nextTerm, sentenceCount, ref place, ref currentNode, Section.SectionType.Link, id2Node, term2LocalNode, ref totalTermCount, document);
                    }
                    else if (state == ReaderState.EndOfSentencePunctuation)
                    {
                        place = 0;
                        ++sentenceCount;
                        currentNode = null;
                    }
                    else if (state == ReaderState.MiddleSentencePunctuation)
                    {
                        currentNode = null;
                    }
                }//end of while

                if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;
            }
        }

        public void ReadAnchors(Document document, HtmlStructure structure, ref int sentenceCount, ref int totalTermCount, Hashtable id2Node, Hashtable term2LocalNode)
        {

            IList anchorList = structure.Anchors;
            if (anchorList == null) return;

            ReaderState state = ReaderState.None;
            string nextTerm = String.Empty;
            Node currentNode = null;

            for (int i = 0; i < anchorList.Count; i++)
            {
                int place = 0;
                string link = ((HtmlAnchor)anchorList[i]).Text;
                if (link == null) continue;
                m_bufferReaderCore.AssingNewBuffer(link);
                while (!m_bufferReaderCore.Eof())
                {
                    nextTerm = m_bufferReaderCore.NextTerm(ref state);
                    if (state == ReaderState.Term)
                    {
                        this.ProcessTerm(nextTerm, sentenceCount, ref place, ref currentNode, Section.SectionType.Link, id2Node, term2LocalNode, ref totalTermCount, document);
                    }
                    else if (state == ReaderState.EndOfSentencePunctuation)
                    {
                        place = 0;
                        ++sentenceCount;
                        currentNode = null;
                    }
                    else if (state == ReaderState.MiddleSentencePunctuation)
                    {
                        currentNode = null;
                    }
                }//end of while

                if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;
            }
        }

        private void ProcessTerm(string term,
                                                         int sentenceNumber,
                                   ref int currentPlace,
                                   ref Node currentNode,
                                   Section.SectionType sectionType,
                                   Hashtable id2NodeLocal,
                                                         Hashtable term2LocalNode,
                                   ref int totalTermCount,
                                   Document document
                                   )
        {

            Section currentSection = (currentNode == null) ? null : currentNode.GetSectionByType(sectionType);
            Sentence sentence = (currentSection == null) ? null : ((currentPlace > 0) ? currentSection.GetSentence(sentenceNumber, currentPlace - 1) : null);



            if (Slaks.TextParsing.MDIDoc.ISStopword(term))
            {
                if (currentNode == null) return; //if current node is null so throw the connecting word away;
                if (sentence == null) return;
                sentence.AddConnectingWord(term);
            }
            else
            {
                ++totalTermCount;

                Node node = null;

                ///apply schenker stemming algorithm
                //term = this.GetSchenkerStemmedWord(term, term2LocalNode);

                term = stemmer.StemTerm(term);

                TermIdInfoElement termIdInfoElement = (m_term2id.Contains(term) == true) ? m_term2id[term] : null;
                if (termIdInfoElement == null) //this term not present in the hash
                {
                    ++m_idCounter;
                    termIdInfoElement = new TermIdInfoElement(term, m_idCounter, 1);

                    m_term2id.Add(term, termIdInfoElement);
                    node = Node.CreateNewNode();
                    node.Id = m_idCounter;
                    node.Term = term;

                    id2NodeLocal.Add(m_idCounter, node);
                    term2LocalNode.Add(term, node);
                    document.AddNode(node.Id, node);

                    //info about term document frequency
                    TermDocumentFrequencyElement element = new TermDocumentFrequencyElement(node.Id);
                    element.AddDocument(document.FileName);
                    m_id2DocumentFrequency.Add(node.Id, element);

                }
                else //the term is found in the global hash
                {
                    termIdInfoElement.Frequency += 1;
                    long id = termIdInfoElement.Id;

                    //info about term document frequency
                    m_id2DocumentFrequency[id].AddDocument(document.FileName);


                    node = (id2NodeLocal.ContainsKey(id)) ? (Node)id2NodeLocal[id] : null;

                    if (node == null) //the term does not exist in the local document but exists in the hash
                    {
                        node = Node.CreateNewNode();
                        node.Id = id;
                        node.Term = term;

                        id2NodeLocal.Add(id, node);
                        term2LocalNode.Add(term, node);
                        document.AddNode(node.Id, node);

                    }
                    else
                    {
                    }
                }

                if (sentence != null)
                {
                    sentence.NextId = node.Id;
                }

                currentNode = node;

                this.AddSentenceToNode(node, sentenceNumber, currentPlace, sectionType);
                ++currentPlace;

            }
        }

        private void AddSentenceToNode(Node node, int sentenceNumber, int termPlace, Section.SectionType sectionType)
        {
            Section currentSection = node.GetSectionByType(sectionType);
            if (currentSection == null)
            {
                currentSection = Section.CreateNewSection();
                currentSection.Type = sectionType;
                node.AddSection(sectionType, currentSection);
            }

            Sentence sentence = Sentence.CreateNewSentence();
            sentence.Number = sentenceNumber;
            sentence.Place = termPlace;

            currentSection.Sentences.Add(sentence);

        }

        private string GetSchenkerStemmedWord(string oldTerm, Hashtable term2LocalNode)
        {
            string newTerm;
            string newTermWithS = oldTerm + "s";
            string newTermWithES = oldTerm + "es";
            string newTermWithING = oldTerm + "ing";

            string newTermWithoutS = String.Empty;
            if (oldTerm.Length > 1 && oldTerm[oldTerm.Length - 1] == 's') newTermWithoutS = oldTerm.Substring(0, oldTerm.Length - 1);

            string newTermWithoutEAndWithIng = String.Empty;
            if (oldTerm.Length > 1 && oldTerm[oldTerm.Length - 1] == 'e')
            {
                newTermWithoutEAndWithIng = oldTerm.Substring(0, oldTerm.Length - 1);
                newTermWithoutEAndWithIng += "ing";
            }



            Node nodeOld = (Node)term2LocalNode[oldTerm];
            Node nodeWithS = (Node)term2LocalNode[newTermWithS];
            Node nodeWithES = (Node)term2LocalNode[newTermWithES];
            Node nodeWithING = (Node)term2LocalNode[newTermWithING];
            Node nodeWithoutS = (Node)term2LocalNode[newTermWithoutS];
            Node nodeWithoutEAndWithIng = (Node)term2LocalNode[newTermWithoutEAndWithIng];

            long freqOld = 0;
            long freqS = 0;
            long freqES = 0;
            long freqING = 0;
            long freqWithoutS = 0;
            long freqWithoutEAndWithIng = 0;


            if (nodeOld != null) freqOld = nodeOld.TotalTermFrequency;
            if (nodeWithS != null) freqS = nodeWithS.TotalTermFrequency;
            if (nodeWithES != null) freqES = nodeWithES.TotalTermFrequency;
            if (nodeWithING != null) freqING = nodeWithING.TotalTermFrequency;
            if (nodeWithoutS != null) freqWithoutS = nodeWithoutS.TotalTermFrequency;
            if (nodeWithoutEAndWithIng != null) freqWithoutEAndWithIng = nodeWithoutEAndWithIng.TotalTermFrequency;

            if (freqOld >= freqS && freqOld >= freqES && freqOld >= freqING && freqOld >= freqWithoutS && freqOld >= freqWithoutEAndWithIng)
            {
                newTerm = oldTerm;
            }
            else if (freqS >= freqES && freqS >= freqING && freqS >= freqOld && freqS >= freqWithoutS && freqS >= freqWithoutEAndWithIng)
            {
                newTerm = newTermWithS;
            }
            else if (freqES >= freqS && freqES >= freqING && freqES >= freqOld && freqES >= freqWithoutS && freqES >= freqWithoutEAndWithIng)
            {
                newTerm = newTermWithES;
            }
            else if (freqWithoutEAndWithIng >= freqS && freqWithoutEAndWithIng >= freqES && freqWithoutEAndWithIng >= freqING && freqWithoutEAndWithIng >= freqWithoutS)
            {
                newTerm = newTermWithoutEAndWithIng;
            }
            else
            {
                newTerm = newTermWithING;
            }

            return newTerm;
        }
    }
}
