﻿using System;
using System.Collections.Generic;
using System.Text;
using System.Text.RegularExpressions;

using ShootSearch.Util;
using ShootSearch.Common;

using HtmlAgilityPack;

namespace ShootSearch.Spiders
{
    public class UrlParser: IUrlParser
    {
        public const string REGULAR_URL = 
            @"^((https|http|ftp|rtsp)?://)?(([0-9a-z_!~*'().&=+$%-]+: )?[0-9a-z_!~*'().&=+$%-]+@)?(([0-9]{1,3}\.){3}[0-9]{1,3}|([0-9a-z_!~*'()-]+\.)*([0-9a-z][0-9a-z-]{0,61})?[0-9a-z]\.[a-z]{2,6})(:[0-9]{1,4})?((/?)|(/[0-9a-z_!~*'().;?:@&=+$,%#-]+)+/?)";
        
        /// <summary>
        /// Url格式检查器
        /// </summary>
        public Regex RegexRegularUrl { get; set; }



        public UrlParser()
        {
            RegexRegularUrl = new Regex(REGULAR_URL);
        }

        #region IUrlParser Members

        public List<string> ParsePage(string url, string page)
        {
            List<string> urls = new List<string>();
            Uri baseUri = new Uri(url);
            HtmlDocument doc = new HtmlAgilityPack.HtmlDocument();
            doc.LoadHtml(page);            
            HtmlNodeCollection atts = doc.DocumentNode.SelectNodes("//*[@background or @lowsrc or @src or @href]");
            if (atts != null)
            {
                foreach (HtmlNode n in atts)
                {
                    ParseLink(n, "background", urls, baseUri);
                    ParseLink(n, "href", urls, baseUri);
                    ParseLink(n, "src", urls, baseUri);
                    ParseLink(n, "lowsrc", urls, baseUri);
                }
            }
            HtmlNodeCollection hrefs = doc.DocumentNode.SelectNodes("//a[@href]");
            if (hrefs != null)
            {
                foreach (HtmlNode href in hrefs)
                {
                    EnQueue(href.Attributes["href"].Value, urls, baseUri);
                    
                }
            }
            return urls;
        }

        #endregion

        #region 获取标签属性值
        /// <summary>
        /// 获取标签属性值
        /// </summary>
        /// <param name="node">Html节点</param>
        /// <param name="name">属性名称</param>
        private void ParseLink(HtmlNode node, string name, List<string> urls, Uri baseUri)
        {
            HtmlAttribute att = node.Attributes[name];
            if ((att != null) && (!(name == "href") || !(node.Name != "link")))
            {
                EnQueue(att.Value, urls, baseUri);
            }

        } 
        #endregion

        #region 入队操作
        /// <summary>
        /// 入队操作
        /// </summary>
        /// <param name="link">链接</param>
        private void EnQueue(string link, List<string> urls, Uri baseUri)
        {
            Uri uri;
            try
            {
                //格式化链接，把相对链接变成绝对链接
                link = WebPage.FormatLink(link);
                //这里改变了直接使用正则表达式字符串匹配,而是使用正则表达式对象,防止用户对这个字符串设置错误.
                //这里主要是防止类似"http://ww\\\ww/"这样的非正常的Url出现从而抛出UriFormatException
                //过滤设置可以减少异常抛出的次数,从而降低CPU使用率
                if (((link.IndexOf("http:") > -1) || ((link.IndexOf("https:") > -1)
                    && (RegexRegularUrl != null))) && !RegexRegularUrl.IsMatch(link))
                {
                    return;
                }
                uri = new Uri(baseUri, link);
            }
            catch (UriFormatException e)
            {
                //Log.Warn("Invalid URI:" + link + " Error:" + e.Message);
                return;
            }

            //这里只处理Http的链接
            if (!uri.Scheme.ToLower().Equals("http")
                && !uri.Scheme.ToLower().Equals("https"))
                return;

            urls.Add(uri.ToString());

        }
        #endregion

        #region IConfigable Members

        public ShootSearch.Core.SpiderManConfig Config { get; set; }

        #endregion
    }
}
