﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using static PoemCrawler.CrawlerEvent;

namespace PoemCrawler
{
    internal class PoemCrawler
    {
        public event CrawStartHandler CrawStart;
        public event CrawEndHandler CrawEnd;
        public event CrawFailHandler CrawFail;

        private readonly string rootUrl;
        private readonly string relativeUrl;
        /// <summary>
        /// 输入对应作者的诗词首页，例如https://www.gushicimingju.com/shiren/libai/
        /// </summary>
        /// <param name="url">对应作者的诗词首页网址</param>
        public PoemCrawler(string rootUrl, string relativeUrl)
        {
            this.rootUrl = rootUrl;
            this.relativeUrl = relativeUrl;
            CrawStart += ((sender, args) => Console.WriteLine($"开始爬取{args.Url}"));
            CrawEnd += ((sender, args) => Console.WriteLine($"爬取{args.Url}结束"));
            CrawFail += ((sender, args) => Console.WriteLine($"爬取{args.Url}失败"));
            CrawFail += ((sender, args) => Logger.LogToFile($"爬取{args.Url}失败\n{args.Reason}", true, true));
        }

        /// <summary>
        /// 多线程异步爬取诗词
        /// </summary>
        public void Craw()
        {
            string url = rootUrl + relativeUrl;
            // 第一步获取总页数
            string? html = GetHtml(url);
            if (html == null) return;
            int pageNumber = HtmlParser.GetPoemPageCount(html);
            Console.WriteLine("总页数：" + pageNumber);

            List<string> indexUrls = [];
            for (int i = 2; i <= pageNumber; i++)
                indexUrls.Add(rootUrl + relativeUrl + $"page{i}/");

            List<Task<WebPoems>> computeTasks = new List<Task<WebPoems>>();
            computeTasks.Add(Task.Run(() => HtmlParser.GetPoemUrls(html)));

            foreach (var indexUrl in indexUrls)
            {
                string? indexHtml = GetHtml(indexUrl);
                if (indexHtml == null) continue;
                computeTasks.Add(Task.Run(() => HtmlParser.GetPoemUrls(indexHtml)));
            }
            Task.WaitAll(computeTasks.ToArray());

            List<string> poems = new List<string>();
            List<string> poemUrls = new List<string>();
            foreach (var computeTask in computeTasks)
            {
                WebPoems webPoems = computeTask.Result;
                poemUrls.AddRange(webPoems.urls);
                poems.AddRange(webPoems.poems);
            }

            List<Task<string?>> computeTasks2 = [];
            foreach (var poemUrl in poemUrls)
            {
                string? poemHtml = GetHtml(rootUrl + poemUrl);
                if (poemHtml == null) continue;
                computeTasks2.Add(Task.Run(() => HtmlParser.GetPoem(poemHtml)));
            }
            Task.WaitAll(computeTasks2.ToArray());

            foreach (var computeTask in computeTasks2)
            {
                string? poem = computeTask.Result;
                if (poem != null) poems.Add(poem);
            }

            File.WriteAllLines("libai_poems.txt", poems);
        }

        public string? GetHtml(string url)
        {
            CrawStart?.Invoke(this, new CrawStartEventArgs(url));
            var task = DownloadString.DownloadStringAsync(url);
            Thread.Sleep(1000); //防止被封IP
            task.Wait();

            var taskRes = task.Result;
            string? h5 = null;
            if (taskRes.exception != null)
            {
                CrawFail?.Invoke(this, new CrawFailEventArgs(url, taskRes.exception.Message));
            }
            else
            {
                if (taskRes.result == null)
                {
                    CrawFail?.Invoke(this, new CrawFailEventArgs(url, "html返回结果为空"));
                } else
                {
                    h5 = taskRes.result;
                    CrawEnd?.Invoke(this, new CrawEndEventArgs(url));
                }
            }
            return h5;
        }
    }
}
