﻿using Abot.Crawler;
using Abot.Demo.CNBlogs;
using Abot.Poco;
using System;
using System.Collections.Generic;
//using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading.Tasks;

namespace Abot.Demo
{
    /// <summary>
    /// 利用Abot 抓取天气
    /// http://www.tianqihoubao.com/lishi/beijing/month/201101.html
    /// 需求1： 抓取北京2011、2012、2013三年全年的历史天气数据
    /// 需求2： 抓取全国历史数据
    /// </summary>
    public class WeatherSpider
    {
        public static void MainEntry()
        {
            var crawler = GetManuallyConfiguredWebCrawler();
            var result = crawler.Crawl(FeedUrl);

            System.Console.WriteLine(result.ErrorException);
        }

        /// <summary>
        /// 种子Url
        /// </summary>
        public static readonly Uri FeedUrl = new Uri(@"http://www.tianqihoubao.com/lishi/beijing.html");
        private static Regex WeatherUrlRegex = new Regex(@"http://www.tianqihoubao.com/lishi/beijing/month/\d{6}.html");
        private static IList<WeatherItem> m_DataList = new List<WeatherItem>();
        public static IWebCrawler GetManuallyConfiguredWebCrawler()
        {
            m_DataList.Clear();
            CrawlConfiguration config = new CrawlConfiguration();
            config.CrawlTimeoutSeconds = 0;
            config.DownloadableContentTypes = "text/html, text/plain";
            config.IsExternalPageCrawlingEnabled = false;
            config.IsExternalPageLinksCrawlingEnabled = false;
            config.IsRespectRobotsDotTextEnabled = false;
            config.IsUriRecrawlingEnabled = false;
            config.MaxConcurrentThreads = System.Environment.ProcessorCount;
            config.MaxPagesToCrawl = 20;
            config.MaxPagesToCrawlPerDomain = 0;
            config.MinCrawlDelayPerDomainMilliSeconds = 1000;

            var crawler = new PoliteWebCrawler(config, null, null, null, null, null, null, null, null);
            crawler.ShouldCrawlPage(ShouldCrawlPage);
            crawler.ShouldDownloadPageContent(ShouldDownloadPageContent);
            crawler.ShouldCrawlPageLinks(ShouldCrawlPageLinks);
            crawler.PageCrawlStartingAsync += crawler_ProcessPageCrawlStarting;
            //爬取页面后的回调函数
            crawler.PageCrawlCompletedAsync += crawler_ProcessPageCrawlCompletedAsync;
            crawler.PageCrawlDisallowedAsync += crawler_PageCrawlDisallowed;
            crawler.PageLinksCrawlDisallowedAsync += crawler_PageLinksCrawlDisallowed;

            return crawler;
        }

        public static void crawler_ProcessPageCrawlCompletedAsync(object sender, PageCrawlCompletedArgs e)
        {
            if (DuplicateURL(e))
            { return; }

            //判断是否是新闻详细页面
            if (WeatherUrlRegex.IsMatch(e.CrawledPage.Uri.AbsoluteUri))
            {
                /*
                 * 
                
                #content > table > tbody > tr:nth-child(4) > td:nth-child(2)
                #content > table > tbody > tr:nth-child(2) > td:nth-child(4)
                #content > table > tbody > tr:nth-child(2) > td:nth-child(1)
                 */
                //获得tr个数
                int trCount = e.CrawledPage.AngleSharpHtmlDocument.QuerySelectorAll("tr").Length;
                int tdCount = 4;
                for (int tr = 2; tr <= trCount; tr++)
                {
                    WeatherItem item = new WeatherItem();
                    for (int td = 1; td <= tdCount; td++)
                    {
                        string selector = string.Format("#content > table > tbody > tr:nth-child({0}) > td:nth-child({1})", tr, td);
                        if (td == 1)
                        {
                            selector += " > a";
                        }
                        string val = e.CrawledPage.AngleSharpHtmlDocument.QuerySelector(selector).FirstChild.TextContent;
                        item[td] = val.Trim().Replace('\n', '#');
                    }
                    System.IO.File.AppendAllText("weather_bj.txt", item.ToString());
                    m_DataList.Add(item);
                }
                
            }
        }

        /// <summary>
        /// ??? 奇怪问题，爬虫会爬到2个重复的URL，一模一样？？？
        /// </summary>
        /// <param name="e"></param>
        /// <returns></returns>
        private static bool DuplicateURL(PageCrawlCompletedArgs e)
        {
            string url = e.CrawledPage.Uri.AbsoluteUri.TrimEnd('/');
            if (url.Equals(lastAbsoluteUrl))
            {
                return true;
            }
            lastAbsoluteUrl = url;
            return false;
        }

        /// <summary>
        /// 如果是Feed页面或者分页或者详细页面才需要爬取
        /// </summary>
        private static CrawlDecision ShouldCrawlPage(PageToCrawl pageToCrawl, CrawlContext context)
        {
            if (pageToCrawl.IsRoot || pageToCrawl.IsRetry || FeedUrl == pageToCrawl.Uri
                || WeatherUrlRegex.IsMatch(pageToCrawl.Uri.AbsoluteUri))
            {
                return new CrawlDecision { Allow = true };
            }
            else
            {
                return new CrawlDecision { Allow = false, Reason = "Not match uri" };
            }
        }
        /// <summary>
        /// 如果是Feed页面或者分页或者详细页面才需要爬取
        /// </summary>
        private static CrawlDecision ShouldDownloadPageContent(PageToCrawl pageToCrawl, CrawlContext crawlContext)
        {
            if (pageToCrawl.IsRoot || pageToCrawl.IsRetry || FeedUrl == pageToCrawl.Uri
                || WeatherUrlRegex.IsMatch(pageToCrawl.Uri.AbsoluteUri))
            {
                return new CrawlDecision
                {
                    Allow = true
                };
            }

            return new CrawlDecision { Allow = false, Reason = "Not match uri" };
        }

        private static string lastAbsoluteUrl;
        private static CrawlDecision ShouldCrawlPageLinks(CrawledPage crawledPage, CrawlContext crawlContext)
        {
            if (!crawledPage.IsInternal)
                return new CrawlDecision { Allow = false, Reason = "We dont crawl links of external pages" };

            if (crawledPage.IsRoot || crawledPage.IsRetry || crawledPage.Uri == FeedUrl
                || WeatherUrlRegex.IsMatch(crawledPage.Uri.AbsoluteUri))
            {
                return new CrawlDecision { Allow = true };
            }
            else
            {
                return new CrawlDecision { Allow = false, Reason = "We only crawl links of pagination pages" };
            }
        }

        static void crawler_ProcessPageCrawlStarting(object sender, PageCrawlStartingArgs e)
        {
            PageToCrawl pageToCrawl = e.PageToCrawl;
            Console.WriteLine("About to crawl link {0} which was found on page {1}", pageToCrawl.Uri.AbsoluteUri, pageToCrawl.ParentUri.AbsoluteUri);
        }

        static void crawler_PageLinksCrawlDisallowed(object sender, PageLinksCrawlDisallowedArgs e)
        {
            CrawledPage crawledPage = e.CrawledPage;
            Console.WriteLine("Did not crawl the links on page {0} due to {1}", crawledPage.Uri.AbsoluteUri, e.DisallowedReason);
        }

        static void crawler_PageCrawlDisallowed(object sender, PageCrawlDisallowedArgs e)
        {
            PageToCrawl pageToCrawl = e.PageToCrawl;
            Console.WriteLine("Did not crawl page {0} due to {1}", pageToCrawl.Uri.AbsoluteUri, e.DisallowedReason);
        }
    }
}
