using System;
using System.Collections;
using System.Text;
using Slaks.DocumentSerialization;
using Slaks.DocumentSerialization.Document;
using Slaks.DocumentSerialization.Document.Wordnet;
using Slaks.DocumentSerialization.ParsedFiles;
using Slaks.DocumentSerialization.TermIdInfo;
using Slaks.Readers;
using Slaks.TextParsing;
using Slaks.Web.Parser;
using Wnlib;
using PartOfSpeech = Slaks.DocumentSerialization.Document.Wordnet.PartOfSpeech;
using Synonym = Slaks.DocumentSerialization.Document.Wordnet.Synonym;

namespace TextParsing
{
	public class StructureConstruction
	{
		public event Slaks.ParserUtil.ParserHandler ReportProgressEvent;

		private BufferReader m_bufferReaderCore = null;
		private string m_outputPath;
		private Encoding m_encoding;
		private bool m_useStopWords;
		private static Opt synonymsOpt = null;

		#region Xml
		/// <summary>
		/// term to id mapper indicates that the term was encountered during parsing and is stored in the global hash
		/// </summary>
		private TermIdInfoMap m_term2id = null;

		/// <summary>
		/// global id counter (if the term is not present in m_term2id then the id is icremented
		/// </summary>
		private long m_idCounter = 0;

		/// <summary>
		/// indicates the last term that was encountered during parsing until the end sentence punctuation mark was fed.
		/// </summary>
		enum LastTermType { StopWord = 0, RegularTerm =1 }; 

		/// <summary>
		/// holds information about term document frequency
		/// </summary>
		private Id2DocumentFrequencyMap m_id2DocumentFrequency;

		#endregion
		static StructureConstruction()
		{
			Opt opt;
			
			Wnlib.PartOfSpeech pos = Wnlib.PartOfSpeech.of("noun");
			
			for (int i=0;i<Opt.Count;i++)
			{
				opt = Opt.at(i);
				if (opt.pos == pos)
				{
					if (opt.label == "Synonyms/Hypernyms (Ordered by Estimated Frequency): brief")
					{
						synonymsOpt = opt;
						break;
					}
				}
			}
		}
		public StructureConstruction(BufferReader bufferReaderCore,string outputPath,Encoding encoding,bool useStopWords)
		{
			m_bufferReaderCore = bufferReaderCore;
			m_outputPath = outputPath;
			m_encoding = encoding;
			m_useStopWords = useStopWords;
			m_id2DocumentFrequency = new Id2DocumentFrequencyMap();
			Wnlib.WNCommon.path = "c:\\Program Files\\WordNet\\2.1\\dict\\";
		}
		public void ProceedParsing(IList filesToParse)
		{
			DocumentHtmlParser parser = null;
			DocumentHtmlParser.IsSaveLinkAsText = false;
			Slaks.ParserUtil.ParseEventArgs parsingArgs = new Slaks.ParserUtil.ParseEventArgs();
			Slaks.Web.Parser.Utils.LocalPageDownload.IsEncodingDetectionForced = true;

			//each term is mapped by its id - global hash which 
			m_term2id = new TermIdInfoMap();
			m_idCounter = 0;

			ParsedFiles parsedFiles = new ParsedFiles();

			string parsedFile;
			foreach(string file in filesToParse)
			{
				try
				{
					parser = new DocumentHtmlParser(file,m_encoding);
					parser.ParseMe();
					parsedFile = SaveParsedFile(m_outputPath,file,parser,new WordnetDocument());
					parsingArgs.Message = file + " was parsed successfully at time " + DateTime.Now.ToString();
					ReportProgressEvent(this,parsingArgs);

					parsedFiles.Files.Add(parsedFile);


				}
				catch(Exception ex)
				{
					parsingArgs.Message = "error occurred while parsing " + file + " file at time " + DateTime.Now.ToString() + " " + ex.Message + " " + ex.Source;
					ReportProgressEvent(this,parsingArgs);
				}
			}

			TermIdInfo termIdInfo = new TermIdInfo();
			termIdInfo.TermIdDictionary = m_term2id;

			if (m_outputPath[m_outputPath.Length - 1] == '\\')
			{
				parsedFiles.XmlWrite(m_outputPath + "parsed_file_list.xml");
				termIdInfo.XmlWrite(m_outputPath + "term_id_info.xml");
				TermDocumentFrequencySerialization.WriteXml(m_outputPath + "term_document_frequency_info.xml",m_id2DocumentFrequency.Values.GetEnumerator());
			}
			else
			{
				parsedFiles.XmlWrite(m_outputPath + "\\parsed_file_list.xml");
				termIdInfo.XmlWrite(m_outputPath + "\\term_id_info.xml");
				TermDocumentFrequencySerialization.WriteXml(m_outputPath + "\\term_document_frequency_info.xml",m_id2DocumentFrequency.Values.GetEnumerator());
			}
		}

		private void ParseDocument(WordnetDocument document,DocumentHtmlParser parser)
		{
			int sentenceCount = 0;
			int totalTermCount = 0;
      Hashtable id2NodeLocal = new Hashtable();
			ReadTitle(document,parser.Header,ref sentenceCount,ref totalTermCount,id2NodeLocal);
			ReadMetaNames(document,parser.Header,ref sentenceCount,ref totalTermCount,id2NodeLocal);
			ReadStructures(document,parser.RootStructure,ref sentenceCount,ref totalTermCount,id2NodeLocal);
			ReadAnchors(document,parser.RootStructure,ref sentenceCount,ref totalTermCount,id2NodeLocal);

			if (document.Nodes.Count > 0) 
			{
				document.TotalSentenceCount = sentenceCount;
				document.UniqueTermCount = document.Nodes.Count;
				document.TotalTermCount = totalTermCount;

				long id;

				id = this.GetTotalMostFrequentTermId(document);
				document.MostFrequencyInfo.TotalMostFrequentId = id;

				id = this.GetMostFrequentTermIdBySection(document,Section.SectionType.Text);
				document.MostFrequencyInfo.SectionType2IdMap.Add(Grammar.SectionType.TextTypeAttribute,id);

				id = this.GetMostFrequentTermIdBySection(document,Section.SectionType.Link);
				document.MostFrequencyInfo.SectionType2IdMap.Add(Grammar.SectionType.LinkTypeAttribute,id);

				id = this.GetMostFrequentTermIdBySection(document,Section.SectionType.Title);
				document.MostFrequencyInfo.SectionType2IdMap.Add(Grammar.SectionType.TitleTypeAttribute,id);

				WordnetDocument2Xml.WriteXml(document,document.FileName,System.Text.Encoding.UTF8);
			}
			else throw new Exception(document.FileName + " was parsed but no nodes where constructed ");
		}

		private long GetTotalMostFrequentTermId(WordnetDocument document)
		{

			long id = -1;
			long maxFrequency = long.MinValue;

			foreach(WordnetNode node in document.Nodes)
			{
				if (node.TotalTermFrequency > maxFrequency)
				{
					maxFrequency = node.TotalTermFrequency;
					id = node.Id;
				}
			}

			return id;
		}

		private long GetMostFrequentTermIdBySection(WordnetDocument document,Section.SectionType sectionType)
		{
			long id = -1;
			long maxFrequency = long.MinValue;

			foreach(WordnetNode node in document.Nodes)
			{
				Section section = node.GetSectionByType(sectionType);
				if (section == null) continue;
				if (section.TermFrequency > maxFrequency)
				{
					maxFrequency = section.TermFrequency;
					id = node.Id;
				}
			}

			return id;
		}
		private string SaveParsedFile(string outputPath, string originalFile,DocumentHtmlParser parser,WordnetDocument document)
		{

			int index = originalFile.LastIndexOf('\\');

			string filename = originalFile.Substring(index + 1,originalFile.Length - index - 1);
			filename = filename.Replace('.','_');
			filename += "_parsed.xml";

			if (outputPath[outputPath.Length - 1] == '\\') filename = outputPath + filename;
			else filename = outputPath +  "\\" + filename;
			document.FileName = filename;
		
			
			try
			{
				this.ParseDocument(document,parser);
			}
			catch(Exception)
			{
				throw;
			}

			return filename;
		}

		public void ReadStructures(WordnetDocument document, HtmlStructure structure,ref int sentenceCount,ref int totalTermCount,Hashtable id2Node)
		{
			if (structure == null) return;

			//IList structList = structure.Structure;
			ReaderState state = ReaderState.None;
			string nextTerm = String.Empty;
			WordnetNode currentNode = null;

			#region Text of structure
			m_bufferReaderCore.AssingNewBuffer(structure.TextArray);
			int place = 0;

			while (!m_bufferReaderCore.Eof())
			{
				nextTerm = m_bufferReaderCore.NextTerm(ref state);
				if (state == ReaderState.Term) 
				{
						this.ProcessTerm(nextTerm,sentenceCount,ref place,ref currentNode,Section.SectionType.Text,id2Node,ref totalTermCount,document);
				}
				else if (state == ReaderState.EndOfSentencePunctuation)
				{
					place = 0;
					++sentenceCount;
					currentNode = null;
				}
				else if (state == ReaderState.MiddleSentencePunctuation)
				{
          currentNode = null;
				}
			}//end of while

			if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;

			#endregion

		}

		public void ReadTitle(WordnetDocument document, HtmlHeader header,ref int sentenceCount,ref int totalTermCount,Hashtable id2Node)
		{
			ReaderState state = ReaderState.None;
			string nextTerm = String.Empty;
		  WordnetNode currentNode = null;

			if (header == null) return;
			if (header.Title == String.Empty) return;
		
			m_bufferReaderCore.AssingNewBuffer(header.Title);

			int place = 0;

			while (!m_bufferReaderCore.Eof())
			{
				nextTerm = m_bufferReaderCore.NextTerm(ref state);
				if (state == ReaderState.Term) 
				{
					this.ProcessTerm(nextTerm,sentenceCount,ref place,ref currentNode,Section.SectionType.Title,id2Node,ref totalTermCount,document);
				}
				else if (state == ReaderState.EndOfSentencePunctuation)
				{
					place = 0;
					++sentenceCount;
					currentNode = null;
				}
				else if (state == ReaderState.MiddleSentencePunctuation)
				{
					currentNode = null;
				}
			}//end of while

			if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;
		}
  

		public void ReadMetaNames(WordnetDocument document, HtmlHeader header,ref int sentenceCount,ref int totalTermCount,Hashtable id2Node)
		{
			if (header == null) return;

			ReaderState state = ReaderState.None;
			string nextTerm = String.Empty;
			WordnetNode currentNode = null;

			IList metanameList = header.Metaname;

			for (int i=0;i<metanameList.Count;i++)
			{
				m_bufferReaderCore.AssingNewBuffer(metanameList[i].ToString());
				int place = 0;

				while (!m_bufferReaderCore.Eof())
				{
					nextTerm = m_bufferReaderCore.NextTerm(ref state);
					if (state == ReaderState.Term) 
					{
						this.ProcessTerm(nextTerm,sentenceCount,ref place,ref currentNode,Section.SectionType.Title,id2Node,ref totalTermCount,document);
					}
					else if (state == ReaderState.EndOfSentencePunctuation)
					{
						place = 0;
						++sentenceCount;
						currentNode = null;
					}
					else if (state == ReaderState.MiddleSentencePunctuation)
					{
						currentNode = null;
					}
				}//end of while

				if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;
			}
		}

		public void ReadAnchors(WordnetDocument document, HtmlStructure structure,ref int sentenceCount,ref int totalTermCount,Hashtable id2Node)
		{

			IList anchorList = structure.Anchors;
			if (anchorList == null) return;

			ReaderState state = ReaderState.None;
			string nextTerm = String.Empty;
			WordnetNode currentNode = null;

				for (int i=0;i<anchorList.Count;i++)
				{
					int place = 0;
					string link = ((HtmlAnchor)anchorList[i]).Text;
					if (link == null) continue;

					m_bufferReaderCore.AssingNewBuffer(link);
					while (!m_bufferReaderCore.Eof())
					{
						nextTerm = m_bufferReaderCore.NextTerm(ref state);
						if (state == ReaderState.Term) 
						{
							this.ProcessTerm(nextTerm,sentenceCount,ref place,ref currentNode,Section.SectionType.Link,id2Node,ref totalTermCount,document);
						}
						else if (state == ReaderState.EndOfSentencePunctuation)
						{
							place = 0;
							++sentenceCount;
							currentNode = null;
						}
						else if (state == ReaderState.MiddleSentencePunctuation)
						{
							currentNode = null;
						}
					}//end of while

					if (state != ReaderState.EndOfSentencePunctuation) ++sentenceCount;
				}
		}

		private void ProcessTerm(string term,
														 int sentenceNumber,
			                       ref int currentPlace,
			                       ref WordnetNode currentNode,
			                       Section.SectionType sectionType,
			                       Hashtable id2NodeLocal,
			                       ref int totalTermCount,
			                       WordnetDocument document
			                       )
		{
			
			Wnlib.PartsOfSpeech partOfSpeech = Wnlib.PartsOfSpeech.Unknown;
			//find the term in wordnet dictionary with morphological changes
			WnLexicon.WordInfo wordinfo = WnLexicon.Lexicon.FindWordInfo( term, true );
			if( wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown ) return;
			if (wordinfo.text != String.Empty) term = wordinfo.text;

			partOfSpeech = wordinfo.partOfSpeech;
			string tempTerm = MakeBasicAdditionalStemming(term,ref partOfSpeech);
			if (tempTerm != null) term = tempTerm;

			Section currentSection = (currentNode == null) ? null : currentNode.GetSectionByType(sectionType);
			Sentence sentence = (currentSection == null) ? null : ((currentPlace > 0) ? currentSection.GetSentence(sentenceNumber,currentPlace-1) : null);

			if (MDIDoc.ISStopword(term))
			{
        if (currentNode == null) return; //if current node is null so throw the connecting word away;
				if (sentence == null) return;
				sentence.AddConnectingWord(term);
			}
			else
			{
				++totalTermCount;

				WordnetNode node = null;

				TermIdInfoElement termIdInfoElement =  (m_term2id.Contains(term) == true ) ? m_term2id[term] : null;
				if (termIdInfoElement == null) //this term not present in the hash
				{
					++m_idCounter;
					termIdInfoElement = new TermIdInfoElement(term,m_idCounter,1);
					m_term2id.Add(term,termIdInfoElement);

					node = WordnetNode.CreateNewNode();
					node.Id = m_idCounter;
					node.Term = term;

					id2NodeLocal.Add(m_idCounter,node);
					document.AddNode(node.Id,node);

					//info about term document frequency
					TermDocumentFrequencyElement element = new TermDocumentFrequencyElement(node.Id);
					element.AddDocument(document.FileName);
					m_id2DocumentFrequency.Add(node.Id,element);


					//------------ add senses from wordnet -------------------
					Senses senses = new Senses();
					// for each part of speech...
					Wnlib.PartsOfSpeech[] enums = (Wnlib.PartsOfSpeech[])Enum.GetValues( typeof( Wnlib.PartsOfSpeech ) );
					for( int i=0; i<enums.Length; i++ )
					{
						Wnlib.PartsOfSpeech pos = enums[i];

						// skip "Unknown"
						if(pos == Wnlib.PartsOfSpeech.Unknown) continue;
						if (pos == Wnlib.PartsOfSpeech.Noun) senses.Nouns = wordinfo.senseCounts[i];
						else if (pos == Wnlib.PartsOfSpeech.Verb) senses.Verbs = wordinfo.senseCounts[i];
						else if (pos == Wnlib.PartsOfSpeech.Adj) senses.Adjectives = wordinfo.senseCounts[i];
						else if (pos == Wnlib.PartsOfSpeech.Adv) senses.Adverbs = wordinfo.senseCounts[i];
					}
					node.SenseInfo = senses;
					//------------ add senses from wordnet -------------------

					//find synonyms------------------------------------------ 
					if (partOfSpeech == Wnlib.PartsOfSpeech.Noun)
					  this.FindSynonyms(node,ref m_idCounter);
					//find synonyms------------------------------------------ 

					
					
				}
				else //the term is found in the global hash
				{
					termIdInfoElement.Frequency += 1;

					//info about term document frequency
					if (m_id2DocumentFrequency.Contains(termIdInfoElement.Id))
					m_id2DocumentFrequency[termIdInfoElement.Id].AddDocument(document.FileName);

					long id = termIdInfoElement.Id;

					node = (id2NodeLocal.ContainsKey(id)) ? (WordnetNode)id2NodeLocal[id] : null;

					if (node == null) //the term does not exist in the local document but exists in the hash
					{
						node = WordnetNode.CreateNewNode();
						node.Id = id;
						node.Term = term;	

						id2NodeLocal.Add(id,node);
						document.AddNode(node.Id,node);

						//------------ add senses from wordnet -------------------
						Senses senses = new Senses();
						// for each part of speech...
						Wnlib.PartsOfSpeech[] enums = (Wnlib.PartsOfSpeech[])Enum.GetValues( typeof( Wnlib.PartsOfSpeech ) );
						for( int i=0; i<enums.Length; i++ )
						{
							Wnlib.PartsOfSpeech pos = enums[i];

							// skip "Unknown"
							if(pos == Wnlib.PartsOfSpeech.Unknown) continue;
							if (pos == Wnlib.PartsOfSpeech.Noun) senses.Nouns = wordinfo.senseCounts[i];
							else if (pos == Wnlib.PartsOfSpeech.Verb) senses.Verbs = wordinfo.senseCounts[i];
							else if (pos == Wnlib.PartsOfSpeech.Adj) senses.Adjectives = wordinfo.senseCounts[i];
							else if (pos == Wnlib.PartsOfSpeech.Adv) senses.Adverbs = wordinfo.senseCounts[i];
						}
						node.SenseInfo = senses;
						//------------ add senses from wordnet -------------------
						
						//find synonyms------------------------------------------ 
						if (partOfSpeech == Wnlib.PartsOfSpeech.Noun)
							this.FindSynonyms(node,ref m_idCounter);
						//find synonyms------------------------------------------ 

					
					}
					else
					{
					}
				}
       
				if (sentence != null) 
				{
					sentence.NextId = node.Id;
				}
					
				currentNode = node;

				this.AddSentenceToNode(node,sentenceNumber,currentPlace,sectionType);
				++currentPlace;

			}
		}

		private void AddSentenceToNode(WordnetNode node,int sentenceNumber,int termPlace, Section.SectionType sectionType)
		{
      Section currentSection = node.GetSectionByType(sectionType);
			if (currentSection == null)
			{
        currentSection = Section.CreateNewSection();
				currentSection.Type = sectionType;
				node.AddSection(sectionType,currentSection);
			}

			Sentence sentence = Sentence.CreateNewSentence();
			sentence.Number = sentenceNumber;
			sentence.Place = termPlace;

			currentSection.Sentences.Add(sentence);
			
		}
		private string MakeBasicAdditionalStemming(string term,ref Wnlib.PartsOfSpeech pos)
		{
			if (term.Length <= 3 ) return null;

			string newTerm = term;

			int termLength = term.Length;

			WnLexicon.WordInfo wordinfo = null;
			if (term[termLength - 2] == 'e' && term[termLength - 1] == 'd')
			{
				newTerm = term.Substring(0,termLength - 2);
				wordinfo = WnLexicon.Lexicon.FindWordInfo( newTerm, true );
				pos = wordinfo.partOfSpeech;
			  if( wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown )
			  {
			  	newTerm += 'e';
					wordinfo = WnLexicon.Lexicon.FindWordInfo( newTerm, true );
					pos = wordinfo.partOfSpeech;
					if( wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown ) return null;
			  }
			}
			else if (term[termLength - 3] == 'i' && term[termLength - 2] == 'n' && term[termLength - 1] == 'g')
			{
				newTerm = term.Substring(0,termLength - 3);
				wordinfo = WnLexicon.Lexicon.FindWordInfo( newTerm, true );
				pos = wordinfo.partOfSpeech;
				if( wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown ) return null;
			}
			else if (term[termLength - 1] == 's')
			{
				newTerm = term.Substring(0,termLength - 1);
				wordinfo = WnLexicon.Lexicon.FindWordInfo( newTerm, true );
				pos = wordinfo.partOfSpeech;
				if( wordinfo.partOfSpeech == Wnlib.PartsOfSpeech.Unknown ) return null;
			}

			return newTerm;
		}

		private void FindSynonyms(WordnetNode node,ref long idCounter)
		{
			

			Wnlib.Search search = new Wnlib.Search(node.Term, true, synonymsOpt.pos, synonymsOpt.sch, 0);

			if (search.senses == null) return;
			foreach(SynSet senses in search.senses)
			{
				foreach(Lexeme word in senses.words)
				{
					Synonym syn = new Synonym(node.Id,node);
					if (word.word != node.Term) this.AddSynonym(word.word,syn,ref idCounter); //root of the synonyms
					if (senses.senses == null)
					{
						if (syn.SynonymIdList().Count > 0) node.SynonymList.Add(syn);
					  continue;
					}
					foreach(SynSet senses2 in senses.senses)
					{
						foreach(Lexeme word2 in senses2.words) //synonyms of the one of the categories
						{
							if (word2.word != node.Term) this.AddSynonym(word2.word,syn,ref idCounter);
						}
					}

					if (syn.SynonymIdList().Count > 0) node.SynonymList.Add(syn);
				}
			}

		}
		private void AddSynonym(string term,Synonym synonym,ref long idCounter)
		{
			if (term.Length <= 2) return;
			if (term.IndexOf("_") >= 0) return;
			if (term.IndexOf("-") >= 0) return;
			term = term.ToLower();

				TermIdInfoElement termIdInfoElement =  (m_term2id.Contains(term) == true ) ? m_term2id[term] : null;

				if (termIdInfoElement == null) //this term not present in the hash
				{
					++idCounter;
					termIdInfoElement = new TermIdInfoElement(term,idCounter,0);
					m_term2id.Add(term,termIdInfoElement);
					synonym.AddSynonymId(idCounter);
				}
				else
				{
					long id = termIdInfoElement.Id;
					synonym.AddSynonymId(id);
				}
		}
	}
}
