﻿using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;
using HtmlAgilityPack; 

namespace SimpleCrawler.FromDemo
{
    public partial class Form1 : Form
    {
        public Form1()
        {
            InitializeComponent();
            Load += Form1_Load;
        }

        void Form1_Load(object sender, EventArgs e)
        { 
            filter = new BloomFilter<string>(200000);
            const string CityName = "beijing";

            // 设置种子地址
            //Settings.SeedsAddress.Add(string.Format("http://jobs.zhaopin.com/{0}", CityName));
            //Settings.SeedsAddress.Add(string.Format("http://guangdiu.com/", CityName));
            Settings.SeedsAddress.Add("http://www.cnblogs.com/dudu/");
            //Settings.SeedsAddress.Add(string.Format("http://www2.j32048downhostup9s.info/", CityName)); //freeone/
            // 设置 URL 关键字
            //Settings.HrefKeywords.Add(string.Format("freeone/file.php/OK", CityName));
            //Settings.HrefKeywords.Add(string.Format("/{0}/sj", CityName));

            // 设置爬取线程个数
            Settings.ThreadCount = 5;

            // 设置爬取深度
            Settings.Depth = 7;

            // 设置爬取时忽略的 Link，通过后缀名的方式，可以添加多个
            Settings.EscapeLinks.Add(".jpg");

            // 设置自动限速，1~5 秒随机间隔的自动限速
            Settings.AutoSpeedLimit = false;

            // 设置都是锁定域名,去除二级域名后，判断域名是否相等，相等则认为是同一个站点
            // 例如：mail.pzcast.com 和 www.pzcast.com
            Settings.LockHost = false;

            // 设置请求的 User-Agent HTTP 标头的值
            // settings.UserAgent 已提供默认值，如有特殊需求则自行设置

            // 设置请求页面的超时时间，默认值 15000 毫秒
            // settings.Timeout 按照自己的要求确定超时时间

            // 设置用于过滤的正则表达式
            // settings.RegularFilterExpressions.Add("");
            var master = new CrawlMaster(Settings);
            master.AddUrlEvent += MasterAddUrlEvent;
            master.DataReceivedEvent += MasterDataReceivedEvent;
            master.Crawl();
        }
        #region Static Fields

        /// <summary>
        /// The settings.
        /// </summary>
        private static readonly CrawlSettings Settings = new CrawlSettings();

        /// <summary>
        /// The filter.
        /// 关于使用 Bloom 算法去除重复 URL：http://www.cnblogs.com/heaad/archive/2011/01/02/1924195.html
        /// </summary>
        private static BloomFilter<string> filter;

        #endregion

        #region Methods 

        /// <summary>
        /// The master add url event.
        /// </summary>
        /// <param name="args">
        /// The args.
        /// </param>
        /// <returns>
        /// The <see cref="bool"/>.
        /// </returns>
        private bool MasterAddUrlEvent(AddUrlEventArgs args)
        {
            if (!filter.Contains(args.Url))
            {
                filter.Add(args.Url);
                BeginInvoke((ThreadStart)delegate
                {  
                    lstUrl.Items.Add(new ListViewItem() {  Text = args.Url });
                    label1.Text = lstUrl.Items.Count.ToString();
                });
                return true;
            }

            return false; // 返回 false 代表：不添加到队列中
        }
        DataTable dt=new DataTable();
        public void GetData(string title, string url, string author, string time, string motto, string depth, DataTable data)
        {
            var dr = dt.NewRow();
            dr["BlogTitle"] = title;
            dr["BlogUrl"] = url;
            dr["BlogAuthor"] = author;
            dr["BlogTime"] = time;
            dr["BlogMotto"] = motto;
            dr["BlogDepth"] = depth; 
            dt.Rows.Add(dr);
        }
        /// <summary>
        /// The master data received event.
        /// </summary>
        /// <param name="args">
        /// The args.
        /// </param>
        private void MasterDataReceivedEvent(DataReceivedEventArgs args)
        { 
            var title = GetTitle(args.Html);
            var doc=new HtmlAgilityPack.HtmlDocument();
            doc.LoadHtml(args.Html);
            HtmlNode node = doc.DocumentNode.SelectSingleNode("//title");
            title = node.InnerText;
            HtmlNode node2 = doc.DocumentNode.SelectSingleNode("//*[@id='post-date']");
            string time = node2.InnerText;
            HtmlNode node3 = doc.DocumentNode.SelectSingleNode("//*[@id='topics']/div/div[3]/a[1]");
            string author = node3.InnerText;
            HtmlNode node6 = doc.DocumentNode.SelectSingleNode("//*[@id='blogTitle']/h2");
            string motto = node6.InnerText;
             GetData(title,args.Url,author,time,motto,args.Depth.ToString(),dt);
            if (dt.Rows.Count > 50)
            {

            }
            //if (title.Contains("羽绒服"))
            //{
            //    BeginInvoke((ThreadStart)delegate
            //    {
            //        var d=new MLstData(){Title = title,Url = args.Url};
            //        lstUrl.Items.Add(new ListViewItem() {Tag = d, Text = title});
            //        label1.Text = lstUrl.Items.Count.ToString();
            //    });
            //}
            // 在此处解析页面，可以用类似于 HtmlAgilityPack（页面解析组件）的东东、也可以用正则表达式、还可以自己进行字符串分析
        }

        string GetTitle(string html)
        {
            Match TitleMatch = Regex.Match(html, "<title>([^<]*)</title>", RegexOptions.IgnoreCase | RegexOptions.Multiline);
            var title = TitleMatch.Groups[1].Value;
            return title;
        }

        string GetDesc(string html)
        {
            Match Desc = Regex.Match(html, "<meta name=\"DESCRIPTION\" content=\"([^<]*)\">", RegexOptions.IgnoreCase | RegexOptions.Multiline);
            var strdesc = Desc.Groups[1].Value;
            return strdesc;
        }

        /// <summary>
        /// 过滤html标签
        /// </summary>
        /// <param name="strHtml">html的内容</param>
        /// <returns></returns>
        public static string StripHTML(string stringToStrip)
        {
            // paring using RegEx           //
            stringToStrip = Regex.Replace(stringToStrip, "</p(?:\\s*)>(?:\\s*)<p(?:\\s*)>", "\n\n", RegexOptions.IgnoreCase | RegexOptions.Compiled);
            stringToStrip = Regex.Replace(stringToStrip, "", "\n", RegexOptions.IgnoreCase | RegexOptions.Compiled);
            stringToStrip = Regex.Replace(stringToStrip, "\"", "''", RegexOptions.IgnoreCase | RegexOptions.Compiled);
            stringToStrip = StripHtmlXmlTags(stringToStrip);
            return stringToStrip;
        }

        private static string StripHtmlXmlTags(string content)
        {
            return Regex.Replace(content, "<[^>]+>", "", RegexOptions.IgnoreCase | RegexOptions.Compiled);
        }
        #endregion
    }

    public class MLstData
    {
        public string Url { get; set; }
        public string Title { get;set;}
    }
}
