﻿using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Security.Cryptography;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading;

namespace SimpleCrawler
{
    class SimpleCrawler
    {
        private ConcurrentDictionary<string, bool> urls = new ConcurrentDictionary<string, bool>();
        private int count = 0;
        private string downloadPath = "C:\\Users\\七七司十八\\Desktop\\worm"; // 下载路径
        private readonly object lockObject = new object(); // 用于锁

        static void Main(string[] args)
        {
            SimpleCrawler crawler = new SimpleCrawler();
            string startUrl = "http://www.cnblogs.com/dstang2000/ "; // 替换为实际的起始URL
            if (args.Length >= 1) startUrl = args[0];
            if (string.IsNullOrWhiteSpace(crawler.downloadPath))
                crawler.downloadPath = Directory.GetCurrentDirectory();
            crawler.urls.TryAdd(startUrl, false);//加入初始页面
            crawler.Crawl();
        }

        private void Crawl()
        {
            Console.WriteLine("开始爬行了.... ");
            while (true)
            {
                if (count >= 10)
                    break;

                Parallel.ForEach(urls.Keys, new ParallelOptions { MaxDegreeOfParallelism = 10 }, url => {
                    if (!urls[url])
                    {
                        lock (lockObject)
                        {
                            if (count >= 10)
                                return; 
                            Console.WriteLine("爬行" + url + "页面!");
                            string html = Download(url); // 下载
                            if (html != null)
                            {
                                lock (lockObject)
                                {
                                    count++; // 记录下载页面数
                                }
                                urls[url] = true;
                                Parse(html);
                                Console.WriteLine("爬行结束");
                            }
                        }
                    }
                });
            }
        }

        private string Download(string url)
        {
            try
            {
                WebClient webClient = new WebClient();
                webClient.Encoding = Encoding.UTF8;
                string html = webClient.DownloadString(url);
                string fileName;
                lock (lockObject )
                {
                    fileName = Path.Combine(downloadPath, $"{count}.html");//生成文件名
                    count++;
                }
                File.WriteAllText(fileName, html, Encoding.UTF8);
                return html;
            }
            catch (Exception ex)//异常
            {
                Console.WriteLine(ex.Message);
                return "";
            }
        }

        private void Parse(string html)
        {
            string strRef = @"(href|HREF)[]*=[]*[""'][^""'#>]+[""']";//正则表达式
            MatchCollection matches = new Regex(strRef).Matches(html);
            foreach (Match match in matches)
            {
                strRef = match.Value.Substring(match.Value.IndexOf('=') + 1)
                          .Trim('"', '\"', '#', '>');
                if (strRef.Length == 0) continue;
                urls.TryAdd(strRef, false);
            }
        }
    }
}

