﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Crawler_v1.spider.entity;
using Crawler_v1.utils;
using CsQuery;
using Crawler_v1.entity;
using System.Net;
using System.Text.RegularExpressions;
namespace Crawler_v1.spider
{
    class ContentProcessor
    {

        public void parse(Page page)
        {
            parseNsoup(page);
        }

        /// <summary>
        /// 方法1 ，暂时不用
        /// </summary>
        /// <param name="page"></param>
        public void parseCsQuery(Page page)
        {
            //WebClient webClient = new WebClient();
            //String htmlString = Encoding.GetEncoding("utf-8").GetString(webClient.DownloadData(page.Url));
            string html = HttpUtil.GetHtml(page.Url);
            //html = Regex.Replace(html, @"[/n]", ""); 
            page.RawContent = html;


            CQ dom = html;// CQ.Create(HttpUtil.getHtmlStream(page.Url));

            var article = page.Data as Article;

            //CQ dom =htmlString;
            var title = dom["#artitle"].Text();
            var content = dom["#zoom"].Html();

            content = Regex.Replace(content, @"[/n]", "");
            article.content = content;
            article.title = title;
            article.url = page.Url;

            page.Data = article;
        }


        public void parseNsoup(Page page)
        {
            string HtmlString = HttpUtil.GetHtml(page.Url);
            NSoup.Nodes.Document doc = NSoup.NSoupClient.Parse(HtmlString);
            var title = doc.Select("#artitle").Text;
            var content = doc.Select("#zoom").Html();

            var article = page.Data as Article;
            article.content = content;
            article.title = title;
            article.url = page.Url;

            page.Data = article;
        }
    }
}
