﻿using System;
using System.Collections.Generic;
using System.Net.Http;
using System.Threading.Tasks;
using HtmlAgilityPack;

class SimpleCrawler
{
    static HttpClient client = new HttpClient();
    static HashSet<string> visited = new HashSet<string>();
    static Queue<string> toVisit = new Queue<string>();
    static Uri baseUri;

    // 网页后缀白名单
    static readonly string[] webPageExtensions = { ".htm", ".html", ".aspx", ".php", ".jsp" };

    static async Task Main(string[] args)
    {
        // 起始URL，改成你需要爬的地址
        string startUrl = "https://www.example.com/";
        baseUri = new Uri(startUrl);
        toVisit.Enqueue(startUrl);

        while (toVisit.Count > 0)
        {
            string url = toVisit.Dequeue();

            if (visited.Contains(url))
                continue;

            try
            {
                Console.WriteLine("Crawling: " + url);
                var response = await client.GetAsync(url);
                if (!response.IsSuccessStatusCode)
                    continue;

                visited.Add(url);

                var contentType = response.Content.Headers.ContentType?.MediaType ?? "";

                // 判断是否是网页类型Content-Type
                bool isHtmlContent = contentType.Contains("text/html");

                // 判断URL后缀是否是网页类型
                bool hasWebPageExtension = false;
                foreach (var ext in webPageExtensions)
                {
                    if (url.EndsWith(ext, StringComparison.OrdinalIgnoreCase))
                    {
                        hasWebPageExtension = true;
                        break;
                    }
                }

                if (isHtmlContent || hasWebPageExtension)
                {
                    string html = await response.Content.ReadAsStringAsync();

                    var doc = new HtmlDocument();
                    doc.LoadHtml(html);

                    foreach (var link in doc.DocumentNode.SelectNodes("//a[@href]") ?? new HtmlNodeCollection(null))
                    {
                        var href = link.GetAttributeValue("href", string.Empty).Trim();
                        if (string.IsNullOrEmpty(href))
                            continue;

                        // 忽略mailto:, javascript: 等非HTTP链接
                        if (href.StartsWith("mailto:", StringComparison.OrdinalIgnoreCase) ||
                            href.StartsWith("javascript:", StringComparison.OrdinalIgnoreCase))
                            continue;

                        Uri linkUri;
                        if (!Uri.TryCreate(href, UriKind.Absolute, out linkUri))
                        {
                            // href是相对地址，转成绝对地址
                            linkUri = new Uri(baseUri, href);
                        }

                        // 只爬取起始站点域名内的链接
                        if (linkUri.Host == baseUri.Host)
                        {
                            string absoluteUrl = linkUri.AbsoluteUri;

                            // 去除URL后面的锚点（#后面）方便去重
                            int anchorIndex = absoluteUrl.IndexOf('#');
                            if (anchorIndex >= 0)
                                absoluteUrl = absoluteUrl.Substring(0, anchorIndex);

                            if (!visited.Contains(absoluteUrl) && !toVisit.Contains(absoluteUrl))
                            {
                                toVisit.Enqueue(absoluteUrl);
                            }
                        }
                    }
                }
                else
                {
                    Console.WriteLine("Non-html content, skip parsing links: " + url);
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine("Error crawling " + url + ": " + ex.Message);
            }
        }

        Console.WriteLine($"Crawling finished. Total pages crawled: {visited.Count}");
    }
}