﻿using System;
using System.Collections.Generic;
using System.Windows.Forms;
using System.Threading;
using h爬虫;
using System.IO;
using System.Net;
using System.Text.RegularExpressions;

namespace h爬虫
{
    public partial class Form1 : Form
    {
        Crawler myCrawler;

        public Form1()
        {
            InitializeComponent();
            myCrawler = new Crawler();
        }

        public string GetBaseUrl()
        {
            return textBox1.Text;
        }

       

        public void AddFetchedUrl(string url)
        {
            Invoke((MethodInvoker)delegate {
                listBox1.Items.Add(url);
            });
        }

        public void AddError(string message)
        {
            Invoke((MethodInvoker)delegate {
                listBox2.Items.Add(message);
            });
        }

        private void button1_Click_1(object sender, EventArgs e)
        {
            listBox1.Items.Clear();
            listBox2.Items.Clear();
            myCrawler.StartCrawling(GetBaseUrl(), this);
        }
    }
}

public class Crawler
{
    public void StartCrawling(string baseUrl, Form1 form)
    {
        new Thread(() => Crawl(baseUrl, form)).Start();
    }

    private void Crawl(string url, Form1 form)
    {
        if (string.IsNullOrEmpty(url)) return;
        try
        {
            HttpWebRequest request = (HttpWebRequest)WebRequest.Create(url);
            request.Method = "GET";
            using (HttpWebResponse response = (HttpWebResponse)request.GetResponse())
            {
                if (response.StatusCode == HttpStatusCode.OK && response.ContentType.StartsWith("text/html"))
                {
                    Stream stream = response.GetResponseStream();
                    using (StreamReader reader = new StreamReader(stream))
                    {
                        string html = reader.ReadToEnd();
                        form.AddFetchedUrl(url);

                        // 解析HTML以寻找链接
                        MatchCollection matches = Regex.Matches(html, @"href\s*=\s*[""'](.*?)[""']");
                        foreach (Match match in matches)
                        {
                            string nextUrl = match.Groups[1].Value;
                            if (!nextUrl.StartsWith("http"))
                            {
                                nextUrl = new Uri(new Uri(url), nextUrl).AbsoluteUri;
                            }
                            if (nextUrl.StartsWith(form.GetBaseUrl()))
                            {
                                Crawl(nextUrl, form);
                            }
                        }
                    }
                }
            }
        }
        catch (Exception ex)
        {
            form.AddError($"Error crawling {url}: {ex.Message}");
        }
    }
}
