﻿using System;
using System.Net;
using System.Text.RegularExpressions;

namespace Outfish {

	
	public interface IWebScraper {
	
		/// <summary>
		/// Saves the document retrieved from the web response to a flie, using the default timeout
		/// </summary>
		/// <param name="location">Uri of the starting web request.</param>
		/// <param name="postData">Any post data or null to use method="GET"</param>
		/// <param name="fileName">Filename to save file as.</param>
		/// <param name="contentTypeRegex">Expression to test the returned content type, or null of for no testing.</param>
		/// <returns>HTML formatted Content type (image/jpeg, application/xml, etc)</returns>
		string GetWebFile( IScrapeRequest request, string fileName, Regex contentTypeRegex, string actionDescription );

		/// <summary>
		/// Gets the html page and loads it into a generic WebPage that is handy-dandy for extracting info from.
		/// </summary>
		/// <param name="request"></param>
		WebPage GetPage(IScrapeRequest request);
		
		/// <summary>
		/// Gets an object that represents a parsed and validated web page.
		/// The pages .LoadContent(source) is automatically called.
		/// </summary>
		/// <returns>
		/// Throws a WebExceptoin if exception occurs during transprt,
		/// and LoadContentException if exception occurs during .LoadContent(source) call.
		/// </returns>
		Page GetPage<Page>(IScrapeRequest request) where Page : WebPage, new();

		int MaxForwards { get; set; }
		
		string ProtocolVersion { get; set; }

		/// <summary>
		/// Gets the last web page requested.  
		/// Used for the Request.Referer if not is provided in the request.
		/// </summary>
		string LastPage { get; }
		
		bool KeepAlive { get; set; }
		
		/// <summary>Accesses the scraper's Cookies</summary>
		CookieContainer CookieJar { get; }
		
		/// <summary>Handy way to remove all the Cookies from the scrapers Cookie Jar</summary>
		void ClearCookies();

	}

}
