﻿using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading;

namespace WinFormsSimpleCrawler
{
    public class Crawler
    {
        public event Action<Crawler> CrawlerStopped;//下载停止时的函数委托
        public event Action<Crawler, string, string> PageDownloaded;//下载页面时的函数委托

        public Hashtable urls = new Hashtable();
        //待下载队列
        //public Queue<string> pending = new Queue<string>();

        public int count = 0;

        public string startURL;//起始地址

        //使用了readonly的属性，只能在定义时，或者构造函数中初始化，其他的地方都不能再修改其值
        //URL检测表达式，用于在HTML文本中查找URL
        private readonly string urlDetectRegex = @"(href|HREF)[]*=[]*[""'](?<url>[^""'#>]+)[""']";

        //是不是html页面
        private readonly string urlHTML = "!DOCTYPE html";
        //URL解析表达式
        public static readonly string urlParseRegex = @"^(?<site>https?://(?<host>[\w.-]+)(:\d+)?($|/))(\w+/)*(?<file>[^#?]*)";

        public string HostFilter { get; set; } //主机过滤规则
        public string FileFilter { get; set; } //文件过滤规则


        public void Crawl()//爬行
        {
            count = 0;
            // Console.WriteLine("开始爬行了....");
            urls.Clear();//字典清空
            //pending.Clear();//队列清空
            //pending.Enqueue(startURL);//队列加入元素
            if (startURL == null) return;

            // Console.WriteLine("爬行" + current + "页面！");


            string html = Download(startURL);//下载
            urls[startURL] = true;//将解析完的这个链接加入done字典中
            if (html == "")
            {
                PageDownloaded(this, startURL, "fail");
            }
            else
            {
                PageDownloaded(this, startURL, "success");
            }
            if (Regex.IsMatch(html, urlHTML))
            {
                Parse(html, startURL);//解析,并加入新的链接
            }
            //urls[current] = true;
            //count++;
            //Parse(html);

            while (true)//如果队列里还有地址，字典里面未超过10个
            {
                string current = null;
                foreach (string url in urls.Keys)//找到一个还没有下载过的链接
                {
                    if ((bool)urls[url]) continue;//已经下载过的，不再下载
                    current = url;
                    
                }
                if (current == null || count > 10) break;
                html = Download(startURL);//下载

                if (html == "")
                {
                    PageDownloaded(this, current, "fail");
                    
                }
                else
                {
                    PageDownloaded(this, current, "success");
                    
                }
                count++;
                urls[current] = true;//将解析完的这个链接加入done字典中
                //current = pending.Dequeue();//队列出队一个并且赋值给current


            }

            CrawlerStopped(this);
            //Console.WriteLine("爬行结束");
        }

        public string Download(string url)//下载
        {
            try
            {
                WebClient webClient = new WebClient();
                webClient.Encoding = Encoding.UTF8;//编码格式
                string html = webClient.DownloadString(url);

                string fileName = count.ToString();
                File.WriteAllText(fileName, html, Encoding.UTF8);
                return html;
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
                return "";
            }
        }

        public void Parse(string html, string url)///解析，并加入新的链接,要判断url是不是HTML文本，只有当爬取的是HTML文本时，才解析并爬取下一级URL。
        {
            //string strRef = @"(href|HREF)[ ]*=[ ]*[""'][^""'#>]+[""']";
            MatchCollection matches = new Regex(urlDetectRegex).Matches(html);
            foreach (Match match in matches)
            {
                //trim() [1]  函数移除字符串两侧的空白字符或其他预定义字符
                //strRef = match.Value.Substring(match.Value.IndexOf('=') + 1).Trim('"', '\"', '#', '>');
                //if (strRef.Length == 0) continue;
                //if (urls[strRef] == null) urls[strRef] = false;
                string linkUrl = match.Groups["url"].Value;
                if (linkUrl == null || linkUrl == "") continue;
                linkUrl = fixUrl(linkUrl, startURL);//转绝对路径
                //解析出host和file两个部分，进行过滤
                Match linkUrlMatch = Regex.Match(linkUrl, urlParseRegex);
                string host = linkUrlMatch.Groups["host"].Value;
                string file = linkUrlMatch.Groups["file"].Value;
                if (/*Regex.IsMatch(host, HostFilter) && Regex.IsMatch(file, FileFilter) &&*/ !urls.ContainsKey(linkUrl))
                {
                    urls[linkUrl] = false;
                }
            }
        }

        //urlX 传入相对URL objurl 传入绝对基URL  基URL 一定要带HTTP://
        private string fixUrl(string urlX, string objurl)
        {
            Uri baseUri = new Uri(objurl); // http://www.enet.com.cn/enews/inforcenter/designmore.jsp
            Uri absoluteUri = new Uri(baseUri, urlX);//相对绝对路径都在这里转 这里的urlx ="../test.html"
            return absoluteUri.ToString();//   http://www.enet.com.cn/enews/test.html   
        }
    }
}
