﻿
namespace NWebCrawlerLib
{
    using System;
    using System.Collections.ObjectModel;
    using System.Linq;
    using System.Text.RegularExpressions;
    using NWebCrawlerLib.Common;
    using System.Collections.Generic;

    /// <summary>
    /// foamliu, 2009/12/27, 页面解析.
    /// HTML代码不需要像程序语言那样经过严格的语法检查, 而且有为数众多的非专业网页编辑人员的存在, 
    /// 因此爬虫的页面解析必须足够宽容, 不能因为小小错误而把许多重要的网页丢弃.
    /// 
    /// 下面的简单实现只提取页面的链接信息.
    /// 它会去:
    /// 1.寻找相应标签并获取其href属性值.
    /// 
    /// 它不会去:
    /// 1.区分静态和动态页面, 更加不会自主构造查询URL.
    /// </summary>
    public class Parser
    {
        /// <summary>
        /// 匹配链接内容
        /// </summary>
        /// <param name="strHtml">待分析的html大妈</param>
        /// <param name="strKeyWords">查找的关键字</param>
        /// <param name="strZblx">招标类型</param>
        /// <returns></returns>
        public static List<string> matchUrl(String strHtml, string strKeyWords, List<string> strZblx)
        {
            //String regEx = "<a.*?href=\"(.*?)\"/a>";
            //String regEx = "<a[\\s\\S]*?href=['\"]?([\\w\\.]*)['\"]?[\\s\\S]*?>(.*)</a>";
            //String regEx = "<a.*?/a>";
            String regEx = "<a[\\s\\S]*?>[\\s\\S]*?[\\s\\S]</a>";

            strHtml = strHtml.Replace("\r\n", "").Replace("\t", "");
            MatchCollection matches = new Regex(regEx).Matches(strHtml);

            List<string> lstReturn = new List<string>();
            foreach (Match match in matches)
            {
                if (match.Value.IndexOf(strKeyWords) > -1)//先判断关键字
                {
                    //bool bFuhe = false;
                    //foreach (string strReg in strZblx)//再判断招标类型
                    //{
                    //    if (match.Value.IndexOf(strReg) > -1)
                    //    {
                    //        bFuhe = true;
                    //        break;
                    //    }
                    //}

                    //if (bFuhe)//招标类型符合后再添加
                    //{
                        lstReturn.Add(match.Value);
                    //}
                }
            }

            return lstReturn;
        }

        /// <summary>
        /// 
        /// </summary>
        /// <param name="baseUri">根地址，类似www.baidu.com</param>
        /// <param name="strFxUrl">网站下开始分析的子目录地址，类似www.baidu.com/pic/upload</param>
        /// <param name="htmlcode">待分析的html代码</param>
        /// <returns></returns>
        public static string[] ExtractLinks(string baseUri, string strFxUrl,string htmlcode,int k)
        {
            Collection<string> urls = new Collection<string>();
            string strRef2 = "";
            try
            {
                string strRef = @"(href|HREF|src|SRC)[ ]*=[ ]*[""'][^""'#>]+[""']";
                MatchCollection matches = new Regex(strRef).Matches(htmlcode);

                foreach (Match match in matches)
                {
                    strRef = match.Value.Substring(match.Value.IndexOf('=') + 1).Trim('"', '\'', '#', ' ', '>');
                    try
                    {
                        if (IsGoodUri(strRef))
                        {
                            string[] sArray = strRef.Split('/');
                            if (sArray[0] == ".")
                            {
                                if(k!=0)
                                    strFxUrl = strFxUrl.Substring(0, strFxUrl.LastIndexOf("/"));
                                Utility.Normalize(baseUri, strFxUrl, ref strRef);
                            }
                            else
                            {
                                if (strRef.IndexOf("/")>-1)
                                {
                                    strRef2 = strRef.Substring(0, strRef.LastIndexOf("/"));
                                }
                                
                                if (k != 0)
                                    strFxUrl = strFxUrl.Substring(0, strFxUrl.LastIndexOf("/"));
                                if (strRef.IndexOf("/") > -1)
                                {
                                    strFxUrl=strFxUrl.Replace(strRef2, "");
                                }
                                //Uri baseUrl = new Uri(strRef);// 基本网页URI
                                //string strTempUrl = baseUrl.AbsoluteUri;// 解析相对URL，得到绝对URI
                                Utility.Normalize(baseUri, strFxUrl, ref strRef);
                            }
                            urls.Add(strRef);
                        }
                    }
                    catch (Exception)
                    {
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }

            //Logger.Info("Found: " + urls.Count + " ref(s)\r\n");

            return urls.ToArray();
        }

        /// <summary>
        /// 
        /// </summary>
        /// <param name="baseUri">根地址，类似www.baidu.com</param>
        /// <param name="strFxUrl">网站下开始分析的子目录地址，类似www.baidu.com/pic/upload</param>
        /// <param name="htmlcode">待分析的html代码</param>
        /// <returns></returns>
        public static string[] ExtractLinks(string strFxUrl, string htmlcode)
        {
            Collection<string> urls = new Collection<string>();

            try
            {
                string strRef = @"(href|HREF|src|SRC)[ ]*=[ ]*[""'][^""'#>]+[""']";
                MatchCollection matches = new Regex(strRef).Matches(htmlcode);

                foreach (Match match in matches)
                {
                    strRef = match.Value.Substring(match.Value.IndexOf('=') + 1).Trim('"', '\'', '#', ' ', '>');
                    try
                    {
                        if (IsGoodUri(strRef))
                        {
                            Uri baseUrl = new Uri(strRef);// 基本网页URI
                            string strTempUrl = baseUrl.AbsoluteUri;// 解析相对URL，得到绝对URI

                            Utility.Normalize(ref strRef);
                            urls.Add(strRef);
                        }
                    }
                    catch (Exception)
                    {
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }

            //Logger.Info("Found: " + urls.Count + " ref(s)\r\n");

            return urls.ToArray();
        }


        static bool IsGoodUri(string strUri)
        {
            if (strUri.ToLower().StartsWith("javascript:"))
                return false;
            if (strUri.ToLower().EndsWith("/"))
                return false;
            return true;
        }
       
    }
}
