﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using MIL.Html;
using System.Net;
using System.IO;
using Common;
using System.Text.RegularExpressions;
using System.Threading;
using HttpHelper;
using HtmlAgilityPack;
using Amib.Threading;
using System.Collections;
using Rss;

namespace Crawling
{

    public class HtmlDownloadEventArgs : EventArgs
    {
        public string HtmlPage { get; set; }
        public CrawlUrl CrawlUrl { get; set; }
        public HtmlDownloadEventArgs(string htmlPage, CrawlUrl crawlUrl)
        {
            HtmlPage = htmlPage;
            CrawlUrl = crawlUrl;
        }
    }
    /// <summary>
    /// This class is used to get all relevant links.
    /// 
    /// </summary>
	public class Crawler
	{
        IWorkItemsGroup _workGroup;
        private Hashtable _resultHash = new Hashtable();
        private Hashtable _visitedHash = new Hashtable();
        private object _lockVisitedLinks = new object();
        private object _lockResultLinks = new object();
        private List<string> _seeds;
        private int _maxDepth;
        //What kind of 
        private Common.CrawlUrl.CrawlUrlType _crawlUrlType;
        //Plug in a function to determine whether this is a relevant result.
        public delegate bool RelevantResultDelegate(string url);
        public delegate void OnHtmlDownloadedDelegate(HtmlDownloadEventArgs e);
        public OnHtmlDownloadedDelegate _onHtmlDownloadedFunction;
        public RelevantResultDelegate _relevantResultFunction;
        string _encoding; 
        public event EventHandler<HtmlDownloadEventArgs> HtmlDownloadedEvent;


        // Wrap event invocations inside a protected virtual method
        // to allow derived classes to override the event invocation behavior
        protected virtual void OnHtmlDownloadedEvent(HtmlDownloadEventArgs e)
        {
            // Make a temporary copy of the event to avoid possibility of
            // a race condition if the last subscriber unsubscribes
            // immediately after the null check and before the event is raised.
            EventHandler<HtmlDownloadEventArgs> handler = HtmlDownloadedEvent;

            // Event will be null if there are no subscribers
            if (handler != null)
            {
                // Format the string to send inside the CustomEventArgs parameter
                // Use the () operator to raise the event.
                handler(this, e);
            }
        }

        public Crawler(IWorkItemsGroup workGroup, int maxDepth, int maxThreads, string encoding, Common.CrawlUrl.CrawlUrlType cut)
        {
            
        }

        public Crawler(OnHtmlDownloadedDelegate ohdd, IWorkItemsGroup workGroup, int maxDepth, int maxThreads, string encoding,  Common.CrawlUrl.CrawlUrlType cut)
        {
            _workGroup = workGroup;
            _maxDepth = maxDepth;
            _relevantResultFunction = isRelevantFunction;
            _crawlUrlType = cut;
            _encoding = encoding;
            _onHtmlDownloadedFunction = ohdd;
        }

       /// <summary>
       /// 
       /// </summary>
       /// <param name="maxDepth"></param>
       /// <param name="maxThreads"></param>
       /// <param name="isRelevantResult">Bool func(string url) , should return true if the 
       /// URL is relevant, false if not</param>
        public Crawler(OnHtmlDownloadedDelegate ohdd, IWorkItemsGroup workGroup, int maxDepth, int maxThreads, string encoding, Common.CrawlUrl.CrawlUrlType cut, RelevantResultDelegate isRelevantResult)
        {
            _onHtmlDownloadedFunction = ohdd;
            _workGroup = workGroup;
            _maxDepth = maxDepth;
            _relevantResultFunction = isRelevantResult;
            _crawlUrlType = cut;
            _encoding = encoding;
        }


        private class CrawlArgs
        {
            public string Url;
            public int Depth;
            public bool AllowBadChars;

            public CrawlArgs(string url, int depth, bool allowBadChars)
            {
                Url = url;
                Depth = depth;
                AllowBadChars = allowBadChars;
            }
        }

        private bool isRelevantFunction(string url)
        {
            return true;
        }

        public void Seed(List<string> seedUrls)
        {
            _seeds = seedUrls;
        }

        /// <summary>
        /// 
        /// </summary>
        /// <returns></returns>
        public List<string> Crawl()
		{
            foreach (string url in _seeds)
            {
                Crawl(url, _maxDepth, false);
            }
           // _workGroup.WaitForIdle();
            return StringHashToList(_resultHash);
		}

        private List<string> StringHashToList(Hashtable hash)
        {
            List<string> res = new List<string>();
            foreach (DictionaryEntry de in hash)
            {
                res.Add(de.Key as string);
            }
            return res;
        }

        /// <summary>
        /// Inits the crawling process for some URL for some Depth and waits for it to finish.
        /// </summary>
        /// <param name="url"></param>
        /// <param name="depth"></param>
        /// <param name="allowBadChars"></param>
        private void Crawl(string url, int depth, bool allowBadChars)
        {
            CrawlMain(new CrawlArgs(url, depth, allowBadChars));
           
        }


		private object CrawlMain(object crawlArgs)
		{
            CrawlArgs args = crawlArgs as CrawlArgs;
            string url = args.Url;
            int depth = args.Depth;
            bool allowBadChars = args.AllowBadChars;
           
            //Check if malformed URL
			Uri tmpUri, uri;
            try
            {
                uri = new Uri(url);
            }
            catch //invalid URI.
            {
                Console.WriteLine("Crawler : Invalid URL = " + url);
                return null;
            }
            //Check if we have visited this page before.
            lock (_lockVisitedLinks)
            {
                if (_visitedHash.Contains(url))
                { return null; }
                _visitedHash.Add(url, url);
            }

            //Make sure URL is in correct form for download
            string baseAddress = uri.Scheme + "://" + uri.Host;
            string address = baseAddress;
			for (int i = 0; i < uri.Segments.Length; i++)
			{
				if (!uri.Segments[i].EndsWith("/")) break;
				address += uri.Segments[i];
			}   
            
			//Download Page
            string HTML = downloadHTML(uri);
            //Page contains no data/Error downloading page.
            if (HTML.Equals(string.Empty))
                return null;
            //Console.WriteLine("Page Downloaded : " + uri.ToString());
            //Html page was downloaded. Make sure somebody handles it.
            if (_onHtmlDownloadedFunction != null)
                _onHtmlDownloadedFunction(new HtmlDownloadEventArgs(HTML, new CrawlUrl(uri,
                "", DateTime.Now, DateTime.Now, CrawlUrl.CrawlUrlType.Story)));
            //OnHtmlDownloadedEvent(new HtmlDownloadEventArgs(HTML, new CrawlUrl(uri, 
             //   "", DateTime.Now, DateTime.Now, CrawlUrl.CrawlUrlType.Story)));

            //If depth < 0 then we do not want to harvest the links on this page.
            //Except if this is an XML (RSS) page and then we do.
            if (depth < 0 && !IsXml(HTML)) {
                return null; }

            //get all links from HTML/XML page
            var gathered = GetAllLinks(HTML);
            string slash = "/", javascript = "javascript";
			//Console.WriteLine("{0} link(s) found", gathered.Length);
            foreach (string link in gathered)
            {

                if (link == string.Empty) continue;
                if (link.ToLower().StartsWith(javascript)) continue;

                string nLink = link;

                if (nLink.StartsWith("\"") && nLink.EndsWith("\"")) nLink = nLink.Substring(1, link.Length - 2);
                if (!nLink.ToLower().StartsWith("http"))
                {
                    if (nLink.StartsWith(slash)) nLink = baseAddress + nLink;
                    else nLink = address + nLink;
                }
                //nLink = nLink.Replace("//", "/");
                //lock(resultLinks)
                //  if (links.Contains(nLink)) continue;
                if (!IsLink(nLink, allowBadChars)) continue;
                //Remove anything after # from string.
                nLink = (nLink.Contains("#")) ? nLink.Substring(0, nLink.IndexOf('#') - 1) : nLink;
                //if (!SameDomain(url, nLink)) continue;

                //If this is a relevant result according to plugged-in function 
                if (_relevantResultFunction(nLink))
                    // if we have this URL in resultlinks, don't add it again.
                    lock (_lockResultLinks)
                        if (!_resultHash.Contains(nLink))
                        {
                            _resultHash.Add(nLink, nLink);
                         
                        }

                //Crawl url, subtract one depth level.
                _workGroup.QueueWorkItem(new WorkItemCallback(CrawlMain), new CrawlArgs(nLink, depth - 1, allowBadChars));
            }
            return null;
		}

		private static bool IsLink(string link, bool allowBadChars)
		{
            char[] badChars = { };//'#' };
			string[] badSuffixes = { ".jpg", ".bmp", ".jpeg", ".mp3", ".avi", ".wav" };
			if (!allowBadChars)
			{
				foreach (char c in badChars)
				{
					if (link.Contains(c)) return false;
				}
			}
			foreach (string bad in badSuffixes)
			{
				if (link.ToLower().EndsWith(bad)) return false;
			}
			return true;
		}
        
        /*
		private static bool IsLink(string link)
		{
			char[] badChars = { '#' };
			string[] badSuffixes = { ".jpg", ".bmp", ".jpeg", ".mp3", ".avi", ".wav"};
			//if (!link.StartsWith("http://")) return false;
			foreach (char c in badChars)
			{
				if (link.Contains(c)) return false;
			}
			foreach (string bad in badSuffixes)
			{
				if (link.EndsWith(bad)) return false;
			}
			return true;
		}*/


        private List<string> GetRssFeedLinks(string XmlPage)
        {
            List<string> links = new List<string>();
            try
            {
                Rss.RssReader RssReader = new Rss.RssReader(new StringReader(XmlPage));
                while (true)
                {

                    RssElement element = RssReader.Read();
                    if (null == element) throw new Exception();
                    if (!(element is RssItem)) continue;
                    var item = element as RssItem;
                    links.Add(item.Link.ToString());
                }
            }
            catch (Exception e)
            {
              
                if (e is System.Net.WebException)
                     Console.WriteLine("GetRssLinks , {0}", e.Message);
                return links;
            }
           return links;
        
        }

        /// <summary>
        /// Downloads HTML pages.
        /// </summary>
        /// <param name="uri"></param>
        /// <returns>string.empty if page is empty or d/l failure.</returns>
        private string downloadHTML(Uri uri)
        {
            try
            {
                return HttpHelper.HttpDownloader.DownloadPage(uri, _encoding);
            }
            catch (Exception e)
            {
                Console.WriteLine("Crawler : Couldn't download " + uri.ToString() +
                    "Error : " + e.Message);
                return string.Empty;
            }
        }

        private bool IsXml(string html)
        {
            return html.StartsWith("<?xml");
        }
        /// <summary>
        /// downloads an HTML page from url and returns an array of all HREFs in it.
        /// </summary>
        /// <param name="url"></param>
        /// <returns></returns>
		private string[] GetAllLinks(string html)
		{
            try
            {
                List<string> links = new List<string>();
                if (IsXml(html))
                    return GetRssFeedLinks(html).ToArray();
                HtmlAgilityPack.HtmlDocument doc = new HtmlAgilityPack.HtmlDocument();
                doc.OptionReadEncoding = false;
                doc.LoadHtml(html);

                HtmlAgilityPack.HtmlNodeCollection hrefs = doc.DocumentNode.SelectNodes("//a[@href]");
                //No links here return empty list.
                if (hrefs == null)
                    return links.ToArray();

                foreach (HtmlAgilityPack.HtmlNode href in hrefs)
                {
                    links.Add(href.Attributes["href"].Value);

                }
              
                return links.ToArray();
            }
            catch (Exception e)
            {
                Console.WriteLine("Exception: {0}", e.Message);
                return new string[] { };
            }
		}

        private void AddToResultHash(CrawlUrl cu)
        {
            lock (_lockResultLinks)
                if (!_resultHash.Contains(cu.Uri.ToString()))
                    _resultHash.Add(cu.Uri.ToString(), cu);
        }

        private void AddToVisitedLinks(string url)
        {
            lock (_lockVisitedLinks)
                if (!_visitedHash.Contains(url))
                    _visitedHash.Add(url, url);
        }

       
	}

	
}
