﻿using Abot.Crawler;
using Abot.Demo.CNBlogs;
using Abot.Poco;
using System;
using System.Collections.Generic;
//using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading.Tasks;

namespace Abot.Demo
{
    /// <summary>
    /// 利用Abot 抓取博客园新闻数据
    /// http://www.cnblogs.com/VectorZhang/p/5475663.html
    /// </summary>
    public class CNBlogsNewsSpider
    {
        public static void MainEntry()
        {
            var crawler = GetManuallyConfiguredWebCrawler();
            var result = crawler.Crawl(FeedUrl);

            System.Console.WriteLine(result.ErrorException);
        }

        /// <summary>
        /// 种子Url
        /// </summary>
        public static readonly Uri FeedUrl = new Uri(@"https://news.cnblogs.com/");

        /// <summary>
        ///匹配新闻详细页面的正则
        ///
        /// https://news.cnblogs.com/n/557526  识别不了？
        /// </summary>
        public static Regex NewsUrlRegex = new Regex("^https://news.cnblogs.com/n/\\d+", RegexOptions.Compiled);

        /// <summary>
        /// 匹配分页正则
        /// </summary>
        public static Regex NewsPageRegex = new Regex("^https://news.cnblogs.com/n/page/\\d+/$", RegexOptions.Compiled);

        private static IList<CNBlogsNews> m_DataList = new List<CNBlogsNews>();
        public static IWebCrawler GetManuallyConfiguredWebCrawler()
        {
            m_DataList.Clear();
            CrawlConfiguration config = new CrawlConfiguration();
            config.CrawlTimeoutSeconds = 0;
            config.DownloadableContentTypes = "text/html, text/plain";
            config.IsExternalPageCrawlingEnabled = false;
            config.IsExternalPageLinksCrawlingEnabled = false;
            config.IsRespectRobotsDotTextEnabled = false;
            config.IsUriRecrawlingEnabled = false;
            config.MaxConcurrentThreads = System.Environment.ProcessorCount;
            config.MaxPagesToCrawl = 30;
            config.MaxPagesToCrawlPerDomain = 0;
            config.MinCrawlDelayPerDomainMilliSeconds = 1000;

            var crawler = new PoliteWebCrawler(config, null, null, null, null, null, null, null, null);

            crawler.ShouldCrawlPage(ShouldCrawlPage);

            crawler.ShouldDownloadPageContent(ShouldDownloadPageContent);

            crawler.ShouldCrawlPageLinks(ShouldCrawlPageLinks);

            crawler.PageCrawlStartingAsync += crawler_ProcessPageCrawlStarting;

            //爬取页面后的回调函数
            crawler.PageCrawlCompletedAsync += crawler_ProcessPageCrawlCompletedAsync;
            crawler.PageCrawlDisallowedAsync += crawler_PageCrawlDisallowed;
            crawler.PageLinksCrawlDisallowedAsync += crawler_PageLinksCrawlDisallowed;

            return crawler;
        }

        public static void crawler_ProcessPageCrawlCompletedAsync(object sender, PageCrawlCompletedArgs e)
        {
            if (DuplicateURL(e))
            { return; }

            //判断是否是新闻详细页面
            if (NewsUrlRegex.IsMatch(e.CrawledPage.Uri.AbsoluteUri))
            {
                CNBlogsNews news = new CNBlogsNews();
                news.Parse(e.CrawledPage.AngleSharpHtmlDocument);
                System.Diagnostics.Debug.WriteLine(news.ToString());
                System.IO.File.AppendAllText("news.txt", news.ToString());

                //判断是不是今天发表的
                //if (IsPublishToday(dateString.Text()))
                //{
                //    bool isTaday = false;
                //    //var str = (e.CrawledPage.Uri.AbsoluteUri + "\t" + HtmlData.HtmlDecode(linkDom.InnerText) + "\r\n");
                //    //
                //}
                m_DataList.Add(news);
            }
        }

        /// <summary>
        /// ??? 奇怪问题，爬虫会爬到2个重复的URL，一模一样？？？
        /// </summary>
        /// <param name="e"></param>
        /// <returns></returns>
        private static bool DuplicateURL(PageCrawlCompletedArgs e)
        {
            string url = e.CrawledPage.Uri.AbsoluteUri.TrimEnd('/');
            if (url.Equals(lastAbsoluteUrl))
            {
                return true;
            }
            lastAbsoluteUrl = url;
            return false;
        }

        /// <summary>
        /// "发布于 2016-05-09 11:25" => true
        /// </summary>
        public static bool IsPublishToday(string str)
        {
            if (string.IsNullOrEmpty(str))
            {
                return false;
            }

            const string prefix = "发布于";
            int index = str.IndexOf(prefix, StringComparison.OrdinalIgnoreCase);
            if (index >= 0)
            {
                str = str.Substring(prefix.Length).Trim();
            }

            DateTime date;
            return DateTime.TryParse(str, out date) && date.Date.Equals(DateTime.Today);
        }

        /// <summary>
        /// 如果是Feed页面或者分页或者详细页面才需要爬取
        /// </summary>
        private static CrawlDecision ShouldCrawlPage(PageToCrawl pageToCrawl, CrawlContext context)
        {
            if (pageToCrawl.IsRoot || pageToCrawl.IsRetry || FeedUrl == pageToCrawl.Uri
                || NewsPageRegex.IsMatch(pageToCrawl.Uri.AbsoluteUri)
                || NewsUrlRegex.IsMatch(pageToCrawl.Uri.AbsoluteUri))
            {
                return new CrawlDecision { Allow = true };
            }
            else
            {
                return new CrawlDecision { Allow = false, Reason = "Not match uri" };
            }
        }
        /// <summary>
        /// 如果是Feed页面或者分页或者详细页面才需要爬取
        /// </summary>
        private static CrawlDecision ShouldDownloadPageContent(PageToCrawl pageToCrawl, CrawlContext crawlContext)
        {
            if (pageToCrawl.IsRoot || pageToCrawl.IsRetry || FeedUrl == pageToCrawl.Uri
                || NewsPageRegex.IsMatch(pageToCrawl.Uri.AbsoluteUri)
                || NewsUrlRegex.IsMatch(pageToCrawl.Uri.AbsoluteUri))
            {
                return new CrawlDecision
                {
                    Allow = true
                };
            }

            return new CrawlDecision { Allow = false, Reason = "Not match uri" };
        }

        private static string lastAbsoluteUrl;
        private static CrawlDecision ShouldCrawlPageLinks(CrawledPage crawledPage, CrawlContext crawlContext)
        {
            if (!crawledPage.IsInternal)
                return new CrawlDecision { Allow = false, Reason = "We dont crawl links of external pages" };

            if (crawledPage.IsRoot || crawledPage.IsRetry || crawledPage.Uri == FeedUrl
                || NewsPageRegex.IsMatch(crawledPage.Uri.AbsoluteUri))
            {
                return new CrawlDecision { Allow = true };
            }
            else
            {
                return new CrawlDecision { Allow = false, Reason = "We only crawl links of pagination pages" };
            }
        }

        static void crawler_ProcessPageCrawlStarting(object sender, PageCrawlStartingArgs e)
        {
            PageToCrawl pageToCrawl = e.PageToCrawl;
            Console.WriteLine("About to crawl link {0} which was found on page {1}", pageToCrawl.Uri.AbsoluteUri, pageToCrawl.ParentUri.AbsoluteUri);
        }

        static void crawler_PageLinksCrawlDisallowed(object sender, PageLinksCrawlDisallowedArgs e)
        {
            CrawledPage crawledPage = e.CrawledPage;
            Console.WriteLine("Did not crawl the links on page {0} due to {1}", crawledPage.Uri.AbsoluteUri, e.DisallowedReason);
        }

        static void crawler_PageCrawlDisallowed(object sender, PageCrawlDisallowedArgs e)
        {
            PageToCrawl pageToCrawl = e.PageToCrawl;
            Console.WriteLine("Did not crawl page {0} due to {1}", pageToCrawl.Uri.AbsoluteUri, e.DisallowedReason);
        }
    }
}
