﻿using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Text.RegularExpressions;
using System.Threading;
using System.Windows.Forms;

namespace SimpleCrawler
{
    public partial class Form1 : Form
    {
        private Crawler myCrawler;

        public Form1()
        {
            InitializeComponent();
        }

        private void btnStart_Click(object sender, EventArgs e)
        {
            string startUrl = txtUrl.Text;
            if (string.IsNullOrEmpty(startUrl)) return;

            myCrawler = new Crawler();
            myCrawler.UrlProcessed += MyCrawler_UrlProcessed;
            myCrawler.UrlError += MyCrawler_UrlError;

            myCrawler.urls.Add(startUrl, false); // 加入初始页面
            new Thread(myCrawler.Crawl).Start(); // 开始爬行
        }

        private void MyCrawler_UrlProcessed(string url)
        {
            Invoke(new Action(() => lbUrls.Items.Add(url)));
        }

        private void MyCrawler_UrlError(string url, string error)
        {
            Invoke(new Action(() => lbErrors.Items.Add($"{url} - {error}")));
        }
    }

    public class Crawler
    {
        public Hashtable urls = new Hashtable();
        private int count = 0;
        public event Action<string> UrlProcessed;
        public event Action<string, string> UrlError;

        public void Crawl()
        {
            while (true)
            {
                string current = null;
                foreach (string url in urls.Keys)
                {
                    if ((bool)urls[url]) continue;
                    current = url;
                    break;
                }

                if (current == null || count > 10) break;

                try
                {
                    string html = DownLoad(current); // 下载
                    urls[current] = true;
                    count++;
                    UrlProcessed?.Invoke(current);
                    Parse(html, current); // 解析, 并加入新的链接
                }
                catch (Exception ex)
                {
                    UrlError?.Invoke(current, ex.Message);
                }
            }
        }

        public string DownLoad(string url)
        {
            try
            {
                WebClient webClient = new WebClient();
                webClient.Encoding = Encoding.UTF8;
                string html = webClient.DownloadString(url);
                string contentType = webClient.ResponseHeaders["Content-Type"];
                if (!contentType.Contains("text/html"))
                {
                    throw new Exception("Non-HTML content");
                }
                string fileName = count.ToString();
                File.WriteAllText(fileName, html, Encoding.UTF8);
                return html;
            }
            catch (Exception ex)
            {
                throw new Exception("Download Error: " + ex.Message);
            }
        }

        private void Parse(string html, string pageUrl)
        {
            string strRef = @"(href|HREF)[ ]*=[ ]*[""'][^""'#>]+[""']";
            MatchCollection matches = new Regex(strRef).Matches(html);
            foreach (Match match in matches)
            {
                strRef = match.Value.Substring(match.Value.IndexOf('=') + 1)
                          .Trim('"', '\"', '#', '>');
                if (strRef.Length == 0) continue;

                Uri baseUri = new Uri(pageUrl);
                Uri absoluteUri = new Uri(baseUri, strRef);
                if (!absoluteUri.ToString().StartsWith(baseUri.Scheme + "://" + baseUri.Host)) continue;
                if (urls[absoluteUri.ToString()] == null) urls[absoluteUri.ToString()] = false;
            }
        }
    }
}
