﻿#region ... Copyright Notice ...
/*
   Copyright 2008 Tyler Jensen

	Author: Tyler Jensen    http://www.tsjensen.com/blog

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
 */
#endregion

using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using System.Web;

namespace Atrax.Html
{
	internal class ContentExtractor
	{
		internal ExtractedContent Extract(HtmlDocument doc, string url)
		{
			string pageTld = GetTLD(url);
			ExtractedContent extract = new ExtractedContent();
			Collection<HtmlTag> bodyTags = doc.GetList(DtdElement.BODY);
			foreach (HtmlTag btag in bodyTags)
			{
				if (btag.EndTag) continue;
				ContentSlice body = GetContentSlice(btag, doc, pageTld, 0, null);
				if (body.Content.Count != 0 || body.Text.Trim().Length != 0) extract.Content.Add(body); //only add non-empty
			}

			Classify(extract, extract.Content[0]);
			Evaluate(extract);
			Group(extract);
			Associate(extract);
			return extract;
		}

		void Associate(ExtractedContent extract)
		{
			//first pass creates sequential list of paths? follow each until path changes and then add that path to the list
			pathSequence = new List<string>();
			contentSequence = new List<ContentSlice>();

			MapPathSequence(extract.Content[0]);
			extract.ClassPathsSequence = pathSequence;
			extract.ContentSequence = contentSequence;

			//determine where paths repeat/group, break on those
			/* 
			 * A A A
			 * AB AB AB
			 * ABC ABC ABC
			 * AB AB ABC ABC AB AB
			 * 
			 * pattern block rule
			 * if prev is same, keep in same block
			 * if prev is different and current exists in prev block but not in the current, new block (AB AB)
			 * if prev is different and current does not exist in prev block, keep in same block, unless Level is less than prev Level
			 */

			Dictionary<int, List<ContentSlice>> blocks = new Dictionary<int, List<ContentSlice>>();
			int currentBlockIndex = 0;
			blocks.Add(currentBlockIndex, new List<ContentSlice>());
			ContentSlice prevSlice = null;
			for (int i = 0; i < extract.ContentSequence.Count; i++)
			{
				ContentSlice currSlice = extract.ContentSequence[i];

				if (currSlice.Relevant)
				{
					List<ContentSlice> currBlock = blocks[currentBlockIndex];
					List<ContentSlice> prevBlock = currentBlockIndex > 0 ? blocks[currentBlockIndex - 1] : new List<ContentSlice>();
					//ContentSlice prevSlice = i > 0 ? extract.ContentSequence[i - 1] : null;

					string prevPath = prevSlice == null ? string.Empty : prevSlice.Path;
					string currPath = currSlice.Path;
					int prevLevel = prevSlice == null ? 0 : prevSlice.Level;
					int currLevel = currSlice.Level;

					bool keepInBlock = false;
					if (currPath == prevPath) 
						keepInBlock = true;
					else if (ExistsIn(currPath, currBlock))
						keepInBlock = false;
					else if (!ExistsIn(currPath, prevBlock) && currLevel < prevLevel)
						keepInBlock = false;
					else
						keepInBlock = true;

					if (keepInBlock)
					{
						blocks[currentBlockIndex].Add(currSlice);
					}
					else
					{
						currentBlockIndex++;
						blocks.Add(currentBlockIndex, new List<ContentSlice>());
						blocks[currentBlockIndex].Add(currSlice);
					}
					prevSlice = currSlice; //set previously considered slice
				}
			}

			//TODO - evaluate blocks for filter rules like lists, etc.
			//implement these type of rules for the block - marking the block as 	
			//         else if (max < 11 && avg + 2 > max && avg - 2 < min)  //eliminate short lists
				//   relevant = false;
				//else if (max < 11 && (avg < ((max - min) / 2))) //eliminate lists where avg less than the mid point of max and min
				//   relevant = false;

			//if (s.WordCount < 3 && hasProcessedFirstWithText) //dump all two word or less lines that are not the 
			//   s.Relevant = false;

			foreach (KeyValuePair<int, List<ContentSlice>> item in blocks)
			{
				int itemCount = item.Value.Count;
				int sum = 0;
				int max = 0;
				int min = 99999999;
				foreach (ContentSlice s in item.Value)
				{
					if (s.Relevant)
					{
						sum += s.WordCount;
						if (s.WordCount > max) max = s.WordCount;
						if (s.WordCount < min) min = s.WordCount;
					}
				}
				if (min == 99999999) min = 0;
				double avg = item.Value.Count > 0 ? (double)sum / item.Value.Count : 0.0;

				string failureCode = string.Empty;
				bool relevant = true;
				if (itemCount > 3 && max < 11 && avg + 2 > max && avg - 2 < min)  //eliminate short lists
				{
					relevant = false;
					failureCode = "short list (130)";
				}
				else if (itemCount > 3 && max < 11 && (avg < ((max - min) / 2))) //eliminate lists where avg less than the mid point of max and min
				{
					relevant = false;
					failureCode = "avg less than mid point (135)";
				}

				if (!relevant)
				{
					foreach (ContentSlice s in item.Value)
					{
						s.Relevant = false;
						s.FailureCode = failureCode;
					}
				}
				else
				{
					for (int i = 0; i < item.Value.Count; i++)
					{
						if (i > 0 && item.Value[i].WordCount < 4)
						{
							item.Value[i].Relevant = false;
							item.Value[i].FailureCode = "< 3 words (152)";
						}

						if (item.Value[i].Relevant)
						{
							//check for 3 or more lines that are duplicates - make those lines irrelevant
							string t = item.Value[i].Text;
							int dupeCount = 0;
							foreach (ContentSlice s in item.Value)
							{
								if (s.Text == t) dupeCount++;
							}
							if (dupeCount > 2)
							{
								item.Value[i].Relevant = false;
								item.Value[i].FailureCode = "dupeCount > 2 (168)";
							}
						}
					}
				}
			}
			extract.ContentBlocks = blocks;			
		}

		bool ExistsIn(string currPath, List<ContentSlice> prevBlock)
		{
			foreach (ContentSlice s in prevBlock)
			{
				if (s.Path == currPath) return true;
			}
			return false;
		}

		List<string> pathSequence = new List<string>();
		List<ContentSlice> contentSequence = new List<ContentSlice>();
		void MapPathSequence(ContentSlice slice)
		{
			contentSequence.Add(slice);
			if (slice.Relevant) pathSequence.Add(slice.Path);
			foreach (ContentSlice s in slice.Content)
			{
				MapPathSequence(s);
			}
		}

		Dictionary<string, PathToken> paths = new Dictionary<string, PathToken>();
		void Group(ExtractedContent extract)
		{
			paths = new Dictionary<string, PathToken>();
			//group into content groups
			MapPaths(extract.Content[0], string.Empty, 0);
			extract.ClassPaths = paths;
		}

		void MapPaths(ContentSlice slice, string slicePath, int relevantCount)
		{
			//create list of unique tree paths to use for grouping
			if (slice.Relevant) //only add to the slice path if it's relevant (this creates a relevant only group of slices)
			{
				string c = slice.ContainerDtdElement.ToString() + slice.Class;
				slicePath += c + "|";
				relevantCount++;
			}
			if (slice.Content.Count > 0)
			{
				foreach (ContentSlice s in slice.Content)
				{
					MapPaths(s, slicePath, relevantCount);
				}
			}
			else
			{
				slicePath = slicePath.TrimEnd('|');
				if (!paths.ContainsKey(slicePath)) paths.Add(slicePath, new PathToken() { Count = 0, RelevantCount = 0 });
				paths[slicePath].Count++;
				paths[slicePath].RelevantCount = relevantCount;
				paths[slicePath].Path = slicePath;

				//add slice path to this slice and parents as Group
				slice.Path = slicePath;
				ContentSlice parentSlice = slice.Parent;
				while (parentSlice != null)
				{
					parentSlice.Path = slicePath;
					parentSlice = parentSlice.Parent;
				}
			}
		}

		void Evaluate(ExtractedContent extract)
		{
			//initialize metrics
			foreach (KeyValuePair<string, List<ContentSlice>> item in extract.ClassifiedContent)
			{
				//initialize
				if (!extract.ClassMaxWordCount.ContainsKey(item.Key)) extract.ClassMaxWordCount.Add(item.Key, 0);
				if (!extract.ClassMinWordCount.ContainsKey(item.Key)) extract.ClassMinWordCount.Add(item.Key, 0);
				if (!extract.ClassWordSum.ContainsKey(item.Key)) extract.ClassWordSum.Add(item.Key, 0);
				if (!extract.ClassWithWordsCount.ContainsKey(item.Key)) extract.ClassWithWordsCount.Add(item.Key, 0);
				if (!extract.ClassRelevant.ContainsKey(item.Key)) extract.ClassRelevant.Add(item.Key, true);

				int maxWordCount = 0;
				int minWordCount = 99999999;
				int wordSum = 0;
				int withWordsCount = 0;
				foreach (ContentSlice s in item.Value)
				{
					string text = s.Text.Trim();
					if (text.Length > 0 && InAllowed(s.ContainerDtdElement))
					{
						MatchCollection mc = regCount.Matches(s.Text);
						int wordCount = mc.Count;
						s.WordCount = wordCount;
						if (wordCount > maxWordCount) maxWordCount = wordCount;
						if (wordCount < minWordCount) minWordCount = wordCount;
						wordSum += wordCount;
						withWordsCount++;
					}
					else
					{
						s.Relevant = false; //empty text is not relevant
						s.FailureCode = "empty text (274)";
					}
				}
				if (minWordCount == 99999999) minWordCount = 0;
				extract.ClassWithWordsCount[item.Key] = withWordsCount;
				extract.ClassMaxWordCount[item.Key] = maxWordCount;
				extract.ClassMinWordCount[item.Key] = minWordCount;
				extract.ClassWordSum[item.Key] = wordSum;
			}

			//evaluate metrics and tag slices as NOT Relevant if the metrics fall into a specific
			foreach (KeyValuePair<string, List<ContentSlice>> item in extract.ClassifiedContent)
			{
				int itemCount = item.Value.Count;
				int max = extract.ClassMaxWordCount[item.Key];
				int min = extract.ClassMinWordCount[item.Key];
				int sum = extract.ClassWordSum[item.Key];
				int count = extract.ClassWithWordsCount[item.Key];
				double avg = count == 0 ? 0.0 : (double)sum / count;

				string failureCode = string.Empty;
				bool relevant = true;
				if (sum == 0) //EMPTY
				{
					relevant = false;
					failureCode = "sum == 0 (298)";
				}
				else if (itemCount > 2 && avg == max && avg == min) //REPEATS or NOVAR
				{
					relevant = false;
					failureCode = "itemCount > 2 && avg == max && avg == min (303)";
				}
				else if (itemCount > 2 && max < 11 && avg + 2 > max && avg - 2 < min)  //eliminate short lists
				{
					relevant = false;
					failureCode = "itemCount > 2 && max < 11 && avg + 2 > max && avg - 2 < min (308)";
				}
				else if (itemCount > 2 && max < 11 && (avg < ((max - min) / 2))) //eliminate lists where avg less than the mid point of max and min
				{
					relevant = false;
					failureCode = "itemCount > 2 && max < 11 && (avg < ((max - min) / 2)) (313)";
				}

				if (!relevant)
				{
					extract.ClassRelevant[item.Key] = false; //set metadata value
					//now flag each slice
					foreach (ContentSlice s in item.Value)
					{
						s.Relevant = false;
						s.FailureCode = failureCode;
					}
				}
				else
				{
					//check for relevance based on data pulled from content slices using attributes of each slice
					foreach (ContentSlice s in item.Value)
					{
						//1. check for date patterns and extract date - add date to 
						if (s.Text.Length > 0)
						{
							Match dm = regDateTimePatterns.Match(s.Text);
							if (dm.Success)
							{
								if (dm.Value.Length < 100) //throw out if month.year match has too much in between
								{
									s.DateText = dm.Value; //set the datetext value with any date pattern found in the text for latter analysis
									MatchCollection dw = regCount.Matches(dm.Value);
									int dtcount = dw.Count;
									//mark not relevant if date words is 40% or more of total words for this slice
									double pct = s.WordCount > 0 ? (double)dtcount / s.WordCount : 0.99;
									if (pct >= 0.15)
									{
										s.Relevant = false; //if date is >= 15% of the words, kill it
										s.FailureCode = "pct >= 0.15  (347)";
									}
								}
							}
						}

						if (s.Relevant)
						{
							//# Stanley Kamel (© Jeffrey Mayer/WireImage.com) #
							if (s.Text.StartsWith("#") && s.Text.EndsWith("#") && s.WordCount < 6)
							{
								s.Relevant = false;
								s.FailureCode = "# # (359)";
							}
							else
							{

								//[^a-z]  (when there are no alpha characters, just numbers and punctuation)
								Match m = regAnyAlpha.Match(s.Text);
								if (!m.Success)
								{
									s.Relevant = false;
									s.FailureCode = "no alpha chars (369)";
								}
								else
								{
									//check for 10%+ peg words 
									MatchCollection pmc = regPegWords.Matches(s.Text);
									int pcount = pmc.Count;
									double pegpct = s.WordCount > 0 ? (double)pcount / s.WordCount : 0.99;
									if (pegpct >= 0.10)
									{
										s.Relevant = false;
										s.FailureCode = "pegpct >= 0.10 (380)";
									}
								}
							}
						}
					}
				}
			}
		}

		static string pegwords = @"(\b(click|also|help|email|stories|download|add|edit|delete|city|state|zip|section|figure|before|after|next|previous|prev|back|forward|start|beginning|beg|end|finish)\b)";
		static Regex regPegWords = new Regex(pegwords, RegexOptions.Compiled | RegexOptions.IgnoreCase);	

		static string months = @"(\b(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|sept)\b)"
			+ @"|(\b(january|february|march|april|may|june|july|august|september|october|november|december)\b)";

		static Regex regMonths = new Regex(months, RegexOptions.Compiled | RegexOptions.IgnoreCase);

		static Regex regDateTimePatterns = new Regex(@"\b[0-9]{2}/[0-9]{2}/[12][0-9]{3}\b"  //12/31/1965 or 31/12/1965
			+ @"|\b[0-9]{2}/[0-9]{2}/[0-9]{2}\b" //12/31/65
			+ @"|\b[0-9]{2}\\[0-9]{2}\\[12][0-9]{3}\b"  //12\31\1965 or 31\12\1965
			+ @"|\b[0-9]{2}\\[0-9]{2}\\[0-9]{2}\b" //12\31\65
			+ @"|\b[0-9]{2}" + months + @"[12][0-9]{3}\b" //28 june 2002
			+ @"|" + months + @"[12][0-9]{3}\b" //june 2002
			+ @"|" + months + @".+?[12][0-9]{3}\b" //june of 2002  or december . 1999
			+ @"|" + months + @".[0-9]{2},.[12][0-9]{3}\b" //April 09, 2008
			, RegexOptions.Compiled | RegexOptions.IgnoreCase);
		
		static Regex regAnyAlpha = new Regex(@"[a-zA-Z]", RegexOptions.Compiled);

		private bool InAllowed(DtdElement dtdElement)
		{
			switch (dtdElement)
			{
				case DtdElement.TABLE:
				case DtdElement.TR:
					return false;
				default: 
					return true;
			}
		}
		static Regex regCount = new Regex(@"\b\S+?\b", RegexOptions.Compiled);

		void Classify(ExtractedContent extract, ContentSlice slice)
		{
			//classify content slices into groups and mark reptitive or irrelevant content slices as such
			string c = slice.ContainerDtdElement.ToString() + slice.Class;
			if (!extract.ClassifiedContent.ContainsKey(c)) extract.ClassifiedContent.Add(c, new List<ContentSlice>());
			extract.ClassifiedContent[c].Add(slice);
			foreach (ContentSlice s in slice.Content)
			{
				Classify(extract, s);
			}
		}

		private ContentSlice GetContentSlice(HtmlTag tag, HtmlDocument doc, string pageTld, int parentLevel, ContentSlice parent)
		{
			StringBuilder sb = new StringBuilder();
			ContentSlice cs = new ContentSlice() { ContainerDtdElement = tag.Element, Level = parentLevel + 1, Parent = parent };

			Dictionary<string, string> atts = doc.GetAttributes(tag);
			if (atts.ContainsKey("class"))
				cs.Class = atts["class"];
			else if (atts.ContainsKey("id"))
				cs.Class = atts["id"];
			else if (atts.ContainsKey("style"))
				cs.Class = atts["style"];

			HtmlTag next = doc.Tags.Next(tag);
			int endCount = 1;
			while (next != null)
			{
				if (next.Element == tag.Element && next.EndTag)
				{
					endCount--;
					if (endCount == 0) break; //we've reached the close of this tag
				}
				else if (next.Element == tag.Element && next.EndTag)
				{
					endCount++; //nested tag so requires additional end tag
				}
				//process if not an end tag
				if (!next.EndTag)
				{
					if (ContentDtds.Contains(next.Element))
					{
						HtmlTag nestTag = next;
						ContentSlice nestedSlice = GetContentSlice(nestTag, doc, pageTld, cs.Level, cs);  //recursive call if the next tag is a new content tag
						if (nestedSlice.Content.Count != 0 || nestedSlice.Text.Trim().Length != 0) cs.Content.Add(nestedSlice); //only add non-empty
						//move to the end tag of this new content tag
						int nestCount = 0;
						next = doc.Tags.Next(nestTag);
						while (next != null)
						{
							if (next.Element == nestTag.Element && !next.EndTag)
								nestCount++;
							else if (next.Element == nestTag.Element && next.EndTag)
							{
								if (nestCount == 0)
									break; //close tag found
								else
									nestCount--; //closed a nested tag
							}
							next = doc.Tags.Next(next);
						}
						if (next == null) break; //break out if close tag is null because there was no close tag
					}
					else if (next.Element == DtdElement.NOSCRIPT)
					{
						next = doc.GetCloseTag(next); //skip no script content
						if (next == null) break;
					}
					else
					{
						if (next.Element == DtdElement.PCDATA)
						{
							string text = HttpUtility.HtmlDecode(doc.ReadSlice(next.Slice));
							text = regUniformSpaces.Replace(text, " "); //.Trim(); //replace all spaces with a simple \u0020 and trim
							if (text.Length > 0)
							{
								sb.Append(text); //only add lines with text - each line is considered to be a paragraph
							}
						}
						else if (next.Element == DtdElement.BR)
						{
							HtmlTag n = doc.Tags.Next(next);
							if (n.Element == DtdElement.BR)
								sb.Append(Environment.NewLine);
							else
								sb.Append(" ");

							cs.BRCount++;
						}
						else if (next.Element == DtdElement.A && !next.EndTag)
						{
							//increment external or internal link count
							atts = doc.GetAttributes(next);
							if (atts.ContainsKey("href"))
							{
								try
								{
									string href = Uri.EscapeUriString(Uri.UnescapeDataString(atts["href"]));
									string linkTld = GetTLD(href);
									if (linkTld == pageTld)
										cs.InternalLinkCount++;
									else
										cs.ExternalLinkCount++;
								}
								catch
								{
									cs.InternalLinkCount++; //give credit to internal
								}
							}

							//get title text if next tag is an image
							HtmlTag n = doc.Tags.Next(next);
							if (n.Element == DtdElement.IMG)
							{
								if (atts.ContainsKey("title"))
								{
									if (atts["title"].Length > 0)
										sb.Append("# " + HttpUtility.HtmlDecode(atts["title"]) + " # ");
								}
							}
						}
						else if (next.Element == DtdElement.IMG && !next.EndTag)
						{
							//get alt or title text
							atts = doc.GetAttributes(next);
							if (atts.ContainsKey("alt"))
							{
								if (atts["alt"].Length > 0)
									sb.Append("# " + HttpUtility.HtmlDecode(atts["alt"]) + " # ");
							}
							else if (atts.ContainsKey("title"))
							{
								if (atts["title"].Length > 0)
									sb.Append("# " + HttpUtility.HtmlDecode(atts["title"]) + " # ");
							}

							cs.ImageCount++; //increment count
						}
						else if (next.Element == DtdElement.SCRIPT && !next.EndTag)
						{
							cs.ScriptCount++; 
						}
						else if ((next.Element == DtdElement.B || next.Element == DtdElement.STRONG) && !next.EndTag)
						{
							cs.BoldCount++;
						}
					}
				}
				next = doc.Tags.Next(next);
			}
			//cs.Text = regColonSep.Replace(sb.ToString().Trim(), ": ");
			string all = regWhiteSpace.Replace(sb.ToString().Trim(), " ");   //.Replace("\t", " ");
			//while (all.IndexOf("  ") > -1) all = all.Replace("  ", " "); //removed double spaces
			cs.Text = all.Trim(); // sb.ToString().Trim();
			return cs;
		}
		static Regex regWhiteSpace = new Regex(@"\s+", RegexOptions.Compiled);
		static Regex regColonSep = new Regex(@"(?<=([a-zA-Z])):(?=([a-zA-Z0-9]))", RegexOptions.Compiled);

		string GetTLD(string url)
		{
			string retval = null;
			try
			{
				Uri uri = new Uri(url, UriKind.Absolute);
				string host = uri.Host.ToLower();
				string[] parts = host.Split('.');
				int ux = parts.Length - 1;

				int i = 0;
				if (int.TryParse(parts[ux], out i))
				{
					retval = null;
				}
				else
				{
					if (parts.Length > 2)
					{
						if (parts[ux].Length == 2 && (parts[ux - 1] == "com" || parts[ux - 1] == "net" || parts[ux - 1] == "org") && parts[ux - 1] != parts[ux])
						{
							retval = parts[ux - 2] + "." + parts[ux - 1] + "." + parts[ux]; //do first three such as http://www.sparxsystems.com.au/  == sparxsystems.com.au
						}
						else
						{
							retval = parts[ux - 1] + "." + parts[ux]; //do first two such as http://www.sparxsystems.com/  == sparxsystems.com
						}
					}
					else
					{
						retval = host;  //case where url is like http://mydomain.com/jdjdjd/  == gets mydomain.com as TLD
					}
				}
			}
			catch
			{
				retval = null;
			}
			return retval;
		}
		static Regex regUniformSpaces = new Regex(@"\u205f|\u3000|\u1680|\u2002|\u2003|\u2004|\u2005|\u2006|\u2007|\u2008|\u2009|\u200b|\ufeff", RegexOptions.Compiled);
		
		static List<DtdElement> ContentDtds = new List<DtdElement>(new DtdElement[] { 
			DtdElement.P, 
			DtdElement.DIV, 
			DtdElement.PRE, 
			DtdElement.TABLE, 
			DtdElement.TR, 
			DtdElement.TD, 
			DtdElement.BLOCKQUOTE,
			DtdElement.H1, 
			DtdElement.H2, 
			DtdElement.H3, 
			DtdElement.H4, 
			DtdElement.H5, 
			DtdElement.H6, 
			DtdElement.LI });
	}

	public class PathToken
	{
		public string Path { get; set; }
		public int Count { get; set; }
		public int RelevantCount { get; set; }
	}
}
