﻿using System;
using System.IO;
using System.Net;
using System.Text.RegularExpressions;

 // post key/value pairs 



// This file implements classes to simplify screen scraping.

namespace Outfish {

	/// <summary>
	/// A web scraping utility designed to simplify web scraping, parsing, and validation.
	/// Primary abilities are: 
	///    - getting a WebResponse
	///    - getting a WebPage
	///    - saving a webpage to a file.
	///    - generating an event when transprort or parse/validation exception occurs.
	/// ExceptionEvents and exceptions will all contain the REQUEST and CONTENT in the Data dictionary
	/// </summary>
	public class WebScraper : IWebScraper {

		#region constructor

		/// <summary>
		/// Creates a new ScreenScraper object.
		/// </summary>
		public WebScraper() {
			this.DefaultTimeout = 10000; // 10 seconds
		}

		/// <summary>
		/// Creates a new ScreenScraper object.
		/// </summary>
		/// <param name="defaultTime">the time out to use when one isn't provided with the request</param>
		public WebScraper(int defaultTimeout) {
			this.DefaultTimeout = defaultTimeout;
		}
		
		#endregion

		#region public properties
		
		/// <summary>
		/// Use default .NET redirection mechanism (that misses some cookies.)
		/// When this value is false (default), the scraper will perform the redirection updating the cookies itself.
		/// </summary>
		public bool UseFrameworkRedirect {get; set;}

		/// <summary>
		/// The cookies for the session.
		/// </summary>
		public CookieContainer CookieJar {
			get { return this._cookieJar; }
		}

		/// <summary>
		/// Clears all cookies
		/// </summary>
		public void ClearCookies() {
			this._cookieJar = new CookieContainer();
		}

		/// <summary>Set to something like System.Net.CredentialCache.DefaultCredentials</summary>
		public ICredentials Credentials { get; set; }
		
		public bool KeepAlive {
			get { return this._KeepAlive; }
			set { this._KeepAlive = value; }
		}

		public ResponseReader ResponseReader{ 
			get{ if( this._responseReader == null ){ this._responseReader = new ResponseReader(); }
				return this._responseReader;
			}
			set{ this._responseReader = value; }
		}

		public string ProtocolVersion {
			get { return this._ProtocolVersion; }
			set { this._ProtocolVersion = value; }
		}

		/// <summary>
		/// If true, WebException with Response will swallow the exception and just returns the response text
		/// If false, throws the WebException with Data["CONTENT"] and Data["REQUEST"] 
		/// </summary>
		public bool ReturnWebExceptionResponse{
			get{ return this._returnWebExceptionResponse; }
			set{ this._returnWebExceptionResponse =value; }
		}
		
		/// <summary>
		/// Get/Set the maximum forwards/redirections allowed before we throw an exception.
		/// </summary>
		public int MaxForwards {
			get { return this._MaxForwards; }
			set { this._MaxForwards = value; }
		}

		/// <summary>
		/// Gets the last web page requested.  
		/// For overriding referrer for a particular request, set it in the Request object.
		/// </summary>
		public string LastPage { get; private set; }

		/// <summary>Timeout in mS.  Default is 10000. (10 seconds)</summary>
		public int DefaultTimeout{ get; set; }
		
		#endregion

		#region public GetPage methods

		/// <summary>
		/// Gets web page where content type is not known, or we don't care.
		/// </summary>
		/// <returns></returns>
		public WebPage GetPage(IScrapeRequest request){
			return (HtmlPage)this.GetPage_Internal( request, typeof(WebPage) );
		}

		/// <summary>
		/// Gets an object that parses and validates the response from an http request.
		/// </summary>
		public Page GetPage<Page>(IScrapeRequest request) where Page : WebPage, new() {
			return (Page)GetPage_Internal( request, typeof(Page) );
		}

		#endregion

		#region public Get Stream/File methods

		/// <summary>Writes the Web Response to an output stream.</summary>
		/// <returns>content type</returns>
		public string GetWebStream(IScrapeRequest request, int timeout, Stream oStream, Regex contentTypeRegex, string actionDescription ){
			request = this.Coerce( request );

			HttpWebResponse response = null;
			try {
				response = this.GetWebResponse(request, timeout);

				// validate
				if(		contentTypeRegex != null 
					&&	contentTypeRegex.IsMatch(response.ContentType) == false
				) {
					throw new LoadContentException("Content type did not match.");
				}

				using(System.IO.Stream responseStream = response.GetResponseStream()) {

					byte[] buf = new byte[4096];
					int bytesRead;
					while((bytesRead = responseStream.Read(buf, 0, buf.Length)) > 0) {
						oStream.Write( buf, 0, bytesRead );
					}

					return response.ContentType;
				}
					
			}
			catch(WebException wex){
				string content = wex.LoadResponseContent( this.ResponseReader );
			
				// if they want the exception content, give it to them
				if( this._returnWebExceptionResponse ) {
					return content;
				}

				AttachRequestResponse( wex, request, content );
				this.OnScrapeFailure( new ScrapeEventArgs( request, content, wex ) );
				throw;
			}
			finally {
				if(response != null) {
					response.Close();
				}
			}

		}

		/// <summary>
		/// Saves the document retrieved from the web response to a flie.
		/// </summary>
		/// <param name="timeout">Milliseconds before timeout.</param>
		/// <param name="fileName">Filename to save file as.</param>
		/// <param name="contentTypeRegex">Expression to test the returned content type, or null of for no testing.</param>
		/// <returns>HTML formatted Content type (image/jpeg, application/xml, etc)</returns>
		/// <example>SaveResponse("http://www.google.com/", null, 10000, "C:\\temp\\temp.html", new Regex("www.google.com"),null );</example>
		public string GetWebFile(IScrapeRequest request, string fileName, Regex contentTypeRegex, string actionDescription) {
			request = this.Coerce( request );
			
			int timeout = request.Timeout.HasValue ? request.Timeout.Value : this.DefaultTimeout;
			
			try{
				using( System.IO.FileStream fileStream = new FileStream(fileName, FileMode.Append) ){
					return this.GetWebStream( request, timeout, fileStream, contentTypeRegex, actionDescription );
				}
			}
			catch(WebException wex){
				string content = wex.LoadResponseContent( this.ResponseReader );
				AttachRequestResponse( wex, request, content );
				this.OnScrapeFailure( new ScrapeEventArgs( request, content, wex ) );
				throw;
			}
		}

		#endregion

		#region public events
		
		/// <summary>
		/// Occurs when Page cannot be scraped.
		/// Transport/Connection problems will contain System.Net.WebException.
		/// Parsing/Validate problems will contain Cot.Web.LoadcontentException.
		/// </summary>
		/// <remarks>Exceptions that occur </remarks>
		public event Action<object,ScrapeEventArgs> ScrapeFailure;

		/// <summary>
		/// Occurs for every string-type scraping event.
		/// Handlers may modify content to change the data returned from the scraper.
		/// </summary>
		/// <remarks>Any exceptions in event handlers will bubble up through calls to .GetPage</remarks>
		public event Action<object,ScrapeEventArgs> ScrapeStringEvent;

		public const string REQUEST_KEY = "REQUEST";

		public const string CONTENT_KEY = "RESPONSE_SOURCE";

		#endregion

		#region private static

		/// <summary>
		/// puts cookies in the header that httpwebresponse failed to process properly
		/// </summary>
		/// <remarks>finds cookies in the response from Countrywide that HttpWebRequest failed to detect.
		/// County wide uses a .net.blahblahblah cookie that isn't detected, so we need to find in manually</remarks>
		/// <param name="response"></param>
		/// <returns></returns>
		private static CookieCollection GetMissingCookies(HttpWebResponse response) {

			// stores cookies that are not detected by response
			CookieCollection cc = new CookieCollection();

			string cookies_str = response.GetResponseHeader("Set-Cookie");


			//   res_countrywide_com=anakincookieookie;  domain=.countrywide.com; expires=Wed, 29-Mar-2006 21:01:06 GMT; path=/,			
			MatchCollection matches = Regex.Matches(cookies_str, @"(.*?)=(.*?);\s?domain=(.*?);\s?expires=(.*?);\s?path=([^,]*),?");

			foreach(Match match in matches) {
				string name = match.Groups[1].Value;
				string value = match.Groups[2].Value;
				string domain = match.Groups[3].Value;
				string expires = match.Groups[4].Value;
				string path = match.Groups[5].Value;

				Cookie c = new Cookie(name, value, path, domain);
				c.Expires = DateTime.Parse(expires);

				if(response.Cookies[c.Name] == null && !c.Expired) {
					cc.Add(c);
				}
			}

			return cc;
		}

		[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Performance", "CA1810:InitializeReferenceTypeStaticFieldsInline", Justification = "We don't declare this static object so we can't initialize it there.")]
		static WebScraper() {
			ServicePointManager.ServerCertificateValidationCallback = new System.Net.Security.RemoteCertificateValidationCallback( ValidateServerCertificate );
		}

		/// <summary>
		/// invoked by the RemoteCertificateValidationDelegate to accept all certificates
		/// </summary>
		static bool ValidateServerCertificate(
			  object sender,
			  System.Security.Cryptography.X509Certificates.X509Certificate certificate,
			  System.Security.Cryptography.X509Certificates.X509Chain chain,
			  System.Net.Security.SslPolicyErrors sslPolicyErrors) {

			return true; // accept all

		}

		#endregion

		#region protected events
		
		/// <summary>
		/// Adds content and request to exception's Data dictionary, and triggers event
		/// </summary>
		/// <param name="ex"></param>
		/// <param name="request"></param>
		/// <param name="content"></param>
		/// <param name="pageType"></param>
		virtual protected void OnScrapeFailure( ScrapeEventArgs args ){
		
			// Not catching/swallowing exceptions because it is the handlers responsibility to not throw any
			// if handler code does throw, then caller can deal with the mess.
			if( this.ScrapeFailure != null ){
				this.ScrapeFailure( this, args );
			}
			
		}
		
		void AttachRequestResponse(Exception ex, IScrapeRequest request, string responseSource){
			ex.Data[REQUEST_KEY] = request.ToString();  // Request is not serializeable so convert it to something that is
			ex.Data[CONTENT_KEY] = responseSource;	// it should already be hear for WebExceptions but InvalidTargetPageExceptions still need it.
		}
		
		/// <summary>
		/// Allows host application to log/monitor any GetHtmlString type requests
		/// Without building logging directly into the class (like log4net)
		/// </summary>
		/// <param name="request"></param>
		/// <param name="content"></param>
		/// <returns></returns>
		/// <returns>content to make it easier to insert inline</returns>
		virtual protected string OnScrapeStringEvent( IScrapeRequest request, string content, Type pageType ){
			// its ok to throw exception here because we are not handling any exception
			if( request == null ){ throw new ArgumentNullException("request"); }
			if( content == null ){ throw new ArgumentNullException("content"); }
			
			ScrapeEventArgs args = new ScrapeEventArgs(request,content,pageType);
			
			// I have decided to not catch/swallow any exceptions because it is the event handlers responsibility to behave amiably.  
			// If calling code throws the exception in the eventhanlder, then calling code can deal with the consequences.
			if( this.ScrapeStringEvent != null ){
				this.ScrapeStringEvent( this, args );
			}
			// use whatever handlers replaced content with
			// can't think of a use case but might be good if we ever need to intercept...
			return args.Content;
		}

		/// <summary>
		/// Override this to provide automatic redirection.
		/// </summary>
		/// <param name="request"></param>
		/// <example>replace http: with https:</example>
		/// <example>prepend https proxy such ass psiphon host/fetch...</example>
		/// <returns></returns>
		virtual protected IScrapeRequest OnCoerce(IScrapeRequest request){
			return request;
		}
		
		private IScrapeRequest Coerce( IScrapeRequest request ){
			// wrap OnCoerce in a try{} so we can easily identify any exceptions user code causes.
			try{
				return this.OnCoerce( request );
			}
			catch(Exception ex){
				throw new Exception("Unable to Coerce request [" + request.ToString() + "]", ex );
			}
		}
		
		#endregion

		#region private methods

		// gets an html string and does all the exception stuff
		// regarding WebException
		string GetHtmlString_Internal(IScrapeRequest request, Type pageType, out string contentType) {
			request = this.Coerce( request );

			int timeout = request.Timeout.HasValue ? request.Timeout.Value : this.DefaultTimeout;

			try {
				HttpWebResponse response = this.GetWebResponse(request, timeout);
				contentType = response.ContentType;
				// if users event handler has exception, it will bubble up here.
				return this.OnScrapeStringEvent( request
					, this.ResponseReader.ReadAllText( response )
					, null 
				); 
			}
			catch(WebException wex){
				string content = wex.LoadResponseContent( this.ResponseReader );
				if( content != null && this._returnWebExceptionResponse ){
					contentType = "text/html"; // seems the most likely type.  Can I determine this from somewhere???
					return content;
				}
				// bad - trigger event and rethrow exception
				AttachRequestResponse( wex, request, content );
				this.OnScrapeFailure( new ScrapeEventArgs( request, content, pageType, wex ) );
				throw;
			}
		}

		/// <summary>
		/// Constructs an object that derives from web page and calls LoadContent on it.
		/// </summary>
		/// <param name="pageType">type that derives from WebPage</param>
		/// <returns></returns>
		WebPage GetPage_Internal(IScrapeRequest request, Type pageType){
			
			// Try to retrieve content
			// (internal will trigger Web-exception event)
			string contentType;
			string content = this.GetHtmlString_Internal(request,pageType, out contentType);
			
			// if type is WebPage(abstract class), pick concrete class
			if( pageType == typeof(WebPage) ){
				pageType = contentType.Contains("javascript")
					? typeof(JavaScriptPage)
					: typeof(HtmlPage);
			}
			
			WebPage webPage = (WebPage)System.Activator.CreateInstance( pageType );
			
			try{
				webPage.LoadContent( content );
			}
			catch(Exception ex){
				var vex = new LoadContentException( ex, pageType, request, content );
				AttachRequestResponse( ex, request, content ); // no need to attach page type because that is part of the LoadContentException message.
				this.OnScrapeFailure( new ScrapeEventArgs( request, content, pageType, vex ) );
				throw vex; 				
			}
			
			return webPage;
		}
		
		HttpWebResponse GetWebResponse(IScrapeRequest scrapeRequest, int timeout) {

			int breakout = this._MaxForwards; // number of forwards before we die

			Uri uri = scrapeRequest.Uri; // used for manual forwarding
			IPostData postData = scrapeRequest.PostData;
			
			do {
				// build next request
				HttpWebRequest request = (HttpWebRequest)WebRequest.Create(uri);	// new request
				request.ProtocolVersion = new Version(this.ProtocolVersion);
				request.AllowAutoRedirect = this.UseFrameworkRedirect;	// we can do this manually
				request.CookieContainer = this._cookieJar;
				request.Accept = "text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5";
				request.Headers["Accept-Language"] = "en-us,en;q=0.5";
				request.Expect = null;
				request.KeepAlive = this._KeepAlive;
				
				// Causes some pages to be unreadable	request.Headers["Accept-Encoding"] = "gzip,deflate";
				request.Headers["Accept-Charset"] = "ISO-8859-1,utf-8;q=0.7,*;q=0.7";
				request.UserAgent = "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.3) Gecko/20070309 Firefox/2.0.0.3";
			
				// probably not all needed but doesn't hurt to be thorough
				request.Timeout = timeout;
				// Referrer - if null, fill in from last page requested
				request.Referer = scrapeRequest.Referrer 
					?? this.LastPage;  
				
				// Credentials
				if( this.Credentials != null ){ request.Credentials = this.Credentials; }
				
				// convert it to a post message if needed
				if(postData != null) {
					byte[] postBytes = postData.PostBytes;

					// set request headers as appropriat
					request.Method = "POST";
					request.ContentLength = postBytes.Length;
					request.ContentType = postData.ContentType;

					// feed post data into the request
					using(System.IO.Stream requestStream = request.GetRequestStream()) {
						requestStream.Write(postBytes, 0, postBytes.Length);
					}
				}

				// get response
				HttpWebResponse response = null;
				try {
					// get response
					response = (HttpWebResponse)request.GetResponse(); // could get WebException - timeout
					this.LastPage = response.ResponseUri.AbsoluteUri;
					// save cookies (for authentication)
					if(response.Cookies != null) {
						foreach(Cookie c in response.Cookies) {
							if(!c.Expired) {
								this._cookieJar.Add(c);
							}
						}
					}

					this._cookieJar.Add(WebScraper.GetMissingCookies(response));

					// if we are done fowarding, return final destination document
					if(scrapeRequest.IgnoreRedirection || response.Headers["Location"] == null) {
						return response;
					}

					// store next location
					uri = new Uri(uri, response.Headers["Location"]);
					response.Close();
					response = null;
					postData = null;

				}
				catch {
					// cleanup response if this is an exception only
					if(response != null) {
						response.Close();
						response = null;
					}
					throw;
				}
				// don't close response in finally{} because we need it open for calling method

				// prevent lockup by bad forwarding pages
			} while(--breakout != 0);

			throw new System.Net.WebException("Too many page forwarding");
		}
		
		#endregion

		#region private fields

		// stores cookies needed for this site
		CookieContainer _cookieJar = new CookieContainer();
		// log history of the request - useful for multiple forwards

		string _ProtocolVersion = "1.1"; // best guess might work for ECF

		bool _KeepAlive = true;

		int _MaxForwards = 10;

		bool _returnWebExceptionResponse = false; // throws exception instead
		
		ResponseReader _responseReader;
		
		
		#endregion

	}



}