﻿using System;
using System.Collections.Generic;
using System.Text ;
using System.IO;
using System.Net ;
using System.Collections;
using System.Text.RegularExpressions ;
using System.Threading;
/*h.改进教材p412例9 - 10的爬虫程序，要求如下：
1）只爬取初始网站上的网页。
2）只有当爬取的是HTML文本时，才解析并爬取下一级URL。
3）将相对地址转成绝对地址进行爬取。
4）使用WinForm来配置初始URL、启动爬虫，显示已经爬取的URL和错误的URL信息。
*/
public class Crawler
{
    private Hashtable urls = new Hashtable();
    private int count = 0;
    private string baseUrl; // 基本URL

    static void Main(string[] args)
    {
        Crawler myCrawler = new Crawler();
        string startUrl = "http://www.cnblogs.com/dstang2000/";
        if (args.Length >= 1) startUrl = args[0];
        myCrawler.urls.Add(startUrl, false);//加入初始页面
        myCrawler.baseUrl = GetBaseUrl(startUrl); // 设置基本URL
        new Thread(myCrawler.Crawl).Start();//开始爬行
    }
    private static string GetBaseUrl(string url)
    {
        Uri uri = new Uri(url);
        return uri.Scheme + "://" + uri.Host;
    }
    private void Crawl()
    {
        Console.WriteLine("开始爬行了....");

        while (true)
        {
            string current = null;
            foreach (string url in urls.Keys)//找到一个还没有下载过的链接
            {
                if ((bool)urls[url]) continue;//已经下载过的,不再下载

                current = url;
                break;
            }
            if (current == null || count > 10) break;
            Console.WriteLine("爬行" + current + "页面!");
            string html = DownLoad(current);//下载
            urls[current] = true;
            count++;
            if (IsHtmlContent(html))//如果是html则解析
            {
                Parse(html);//解析,并加入新的链接
                            // 添加逻辑：只处理初始网站上的链接
                Uri uri = new Uri(current);
                string host = uri.Host;
                foreach (string key in new ArrayList(urls.Keys)) // 使用ArrayList遍历是为了在迭代过程中修改Hashtable
                {
                    if (!key.Contains(host))
                        urls.Remove(key);
                }
            }
        }
        Console.WriteLine("爬行结束");
    }
    public string DownLoad(string url)
    {
        try
        {
            WebClient webClient = new WebClient();
            webClient.Encoding = Encoding.UTF8;
            string html = webClient.DownloadString(url);
            string fileName = count.ToString();
            File.WriteAllText(fileName, html, Encoding.UTF8);
            return html;
        }
        catch (Exception ex)
        {
            Console.WriteLine(ex.Message);
            return "";
        }
    }
    public bool IsHtmlContent(string content)
    {
        // 简单的判断，假设内容包含"<html"则认为是HTML文本
        return content.Contains("<html");
    }
    public void Parse(string html)
    {
        string strRef = @"(href|HREF)[]*=[]*[""'][^""'#>]+[""']";
        MatchCollection matches = new Regex(strRef).Matches(html);
        foreach (Match match in matches)
        {
            strRef = match.Value.Substring(match.Value.IndexOf('=') + 1).Trim('"','\\','#',' ','>');
            if (strRef.Length == 0) continue;
            if (urls[strRef] == null) urls[strRef] = false;
           }
        }
    }