﻿#region 命名空间

using System;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Text;
using System.Linq;
using System.Threading;
using System.Runtime.Serialization.Json;
using System.Runtime.Serialization;
using HtmlAgilityPack;
using WinnerCMS.BLL;
using WinnerCMS.Common;
using WinnerCMS.Model;
using WinnerCMS.Data;
using WinnerCMS.GatherEngine;

#endregion

namespace WinnerCMS.BLL.GatherEngine
{
    public class AppMain
    {
        private static readonly GatherManage GatherBLL;

        static AppMain()
        {
            GatherBLL = new GatherManage();
        }

        /// <summary>
        /// 采集栏目页面
        /// </summary>
        /// <param name="cp"></param>
        /// <returns></returns>
        public CategoryGatherResult GatherCategoryPage(CategoryParameter cp)
        {
            var rulesList = GatherBLL.GetRules(cp.IDs, cp.ModelId);

            List<ArrStartUrls> startUrls = new List<ArrStartUrls>();


            var q = from x in rulesList where x.PID == 0 select new ArrStartUrls {GatherID = x.Id, Url = x.StartUrl};

            foreach (var v in q)
            {
                var vv = from x in v.Url.Split(new[] {'\n', '\r'}, StringSplitOptions.RemoveEmptyEntries)
                         select new ArrStartUrls {GatherID = v.GatherID, Url = x};

                startUrls.AddRange(vv);
            }


            if (cp.StartUrlIndex >= startUrls.Count)
            {
                cp.StartUrlIndex = startUrls.Count - 1;
            }

            string startUrl = cp.NextCategoryUrl.Length > 0 ? cp.NextCategoryUrl : startUrls[cp.StartUrlIndex].Url;
            CategoryGatherResult cgr = new CategoryGatherResult
            {
                GatherId = startUrls[cp.StartUrlIndex].GatherID,
                GatheringUrl = startUrl
            };

            try
            {
                string[] getPageListRs = new string[0];
                HtmlDocument doc;
                if (cp.IsReverse == false)
                {
                    getPageListRs = GetArticleUrls(startUrls[cp.StartUrlIndex].GatherID, startUrl, rulesList, out doc);
                    if (doc == null)
                    {
                        cgr.Err = "采集文章列表，出现网络错误。";
                        goto Exit;
                    }
                    HtmlNode htmlnode = doc.DocumentNode;
                    if (doc != null)
                    {
                        #region 得到下一页链接

                        foreach (
                            var ag in
                                rulesList.Where(
                                    m =>
                                        cp.IsReverse == false && m.CategoryPageNextUrl.Length > 0 &&
                                        (m.Id == startUrls[cp.StartUrlIndex].GatherID ||
                                         m.PID == startUrls[cp.StartUrlIndex].GatherID)))
                        {
                            switch (GatherRule.Analysis(ag.CategoryPageNextUrl))
                            {
                                case GatherRuleType.XPath:
                                    HtmlNode hn = htmlnode.SelectSingleNode(ag.CategoryPageNextUrl.Substring(2));
                                    cgr.NextCategoryUrl = hn == null ? string.Empty : hn.Attributes["href"].Value;
                                    break;
                                case GatherRuleType.DomQuery:
                                    hn = doc.SingleGet(ag.CategoryPageNextUrl.Substring(2));
                                    if (hn == null || hn.Attributes["href"] == null)
                                    {
                                        cgr.NextCategoryUrl = string.Empty;
                                    }
                                    else
                                    {
                                        cgr.NextCategoryUrl = hn.Attributes["href"].Value;
                                    }
                                    break;
                                default:
                                    cgr.NextCategoryUrl =
                                        Utilities.GetInfos(htmlnode.OuterHtml, ag.CategoryPageNextUrl)[0];
                                    break;
                            }
                            if (cgr.NextCategoryUrl.Length > 0) //格式化采集链接
                            {
                                cgr.NextCategoryUrl = Utilities.FormatUrl(startUrl, cgr.NextCategoryUrl);
                            }

                            if (cgr.NextCategoryUrl.Equals(startUrl, StringComparison.CurrentCultureIgnoreCase))
                                //防止最后一页的“下一页“就是本身
                            {
                                cgr.NextCategoryUrl = ""; //当作没采集到
                            }

                            if (cgr.NextCategoryUrl.Length > 0)
                            {
                                break; //找到下一页，跳出循环。
                            }
                        }

                        #endregion
                    }
                    else
                    {
                        cgr.NextCategoryUrl = string.Empty;
                    }
                }
                else
                {
                    string[] classNextPages = GatherBLL.ReadNextPages(startUrl);
                    if (classNextPages.Length == 0)
                    {
                        classNextPages = GetNPageUrl(rulesList, startUrls[cp.StartUrlIndex].GatherID, startUrl);
                        GatherBLL.CacheNextPages(startUrl, classNextPages);
                    }

                    foreach (var url in classNextPages)
                    {
                        string[] arr = GetArticleUrls(startUrls[cp.StartUrlIndex].GatherID, url, rulesList, out doc);
                        getPageListRs = getPageListRs.Union(arr).ToArray();
                    }
                }

                #region 是否换下一个链接采集

                //采集不到“下一页”地址就跳到下一链接进行采集
                if ((cp.StartUrlIndex < startUrls.Count - 1) && string.IsNullOrEmpty(cgr.NextCategoryUrl))
                {
                    cgr.GatherId = startUrls[cp.StartUrlIndex].GatherID;
                    cp.StartUrlIndex++;
                    cgr.NextCategoryUrl = startUrls[cp.StartUrlIndex].Url; //换下一个链接采集
                }
                if ((cp.StartUrlIndex == startUrls.Count - 1) && string.IsNullOrEmpty(cgr.NextCategoryUrl))
                {
                    cgr.GatherId = startUrls[cp.StartUrlIndex].GatherID;
                    cp.StartUrlIndex++;
                    cgr.NextCategoryUrl = string.Empty;
                }

                #endregion

                Exit:
                cgr.StartUrlIndex = cp.StartUrlIndex;
                cgr.GatherNum = 0;
                if (string.IsNullOrWhiteSpace(cgr.Err))
                    cgr.Err = string.Empty;

                getPageListRs = getPageListRs.Distinct().ToArray();

                if (cp.IsReverse)
                {
                    getPageListRs = getPageListRs.Reverse().ToArray();
                }

                cgr.Urls = getPageListRs.Select(x => Utilities.FormatUrl(startUrl, x)).ToList();

                return cgr;
            }
            catch (Exception ex)
            {
                LogHelper.Error(ex);
                cgr.Err = ex.Message;
                return cgr;
            }
        }

        public string TestResult(string StartUrl, string CategoryArea, string ShowArticleUrl, string CategoryNextPage)
        {
            return "";
        }

        #region 收集“下一页”

        /// <summary>
        /// 收集“下一页”
        /// </summary>
        /// <param name="rulesList"></param>
        /// <param name="gatherId"></param>
        /// <param name="startUrl"></param>
        /// <returns>返回栏目后5页</returns>
        private string[] GetNPageUrl(IList<Gather> rulesList, int gatherId, string startUrl)
        {
            NetHelper engine = new NetHelper
            {
                EncodingString = rulesList.First(x => x.Id == gatherId).Encoding
            };
            IList<string> classUrls = new List<string>();

            string nextCategoryUrl = startUrl;

            int count = 1;
            do
            {
                try
                {
                    if (count > 5)
                    {
                        break;
                    }
                    RemoteRes info = engine.Get(nextCategoryUrl);
                    if (info.StatusCode != HttpStatusCode.OK)
                    {
                        return classUrls.ToArray();
                    }
                    classUrls.Insert(0, nextCategoryUrl);
                    HtmlDocument doc = new HtmlDocument();
                    //载入HTML
                    doc.LoadHtml(info.HTML);

                    HtmlNode htmlnode = doc.DocumentNode;

                    #region 通过采集文章页确认该 url 是否有意义。

                    string[] getPageListRs = new string[0];
                    foreach (
                        var ag in
                            rulesList.Where(m => m.ContentPageUrl.Length > 0 && (m.Id == gatherId || m.PID == gatherId))
                        )
                    {
                        IList<HtmlNode> hc;
                        if (GatherRule.Analysis(ag.ContentPageUrl) == GatherRuleType.XPath)
                        {
                            hc = htmlnode.SelectNodes(ag.ContentPageUrl.Substring(2));

                            //得到文章页链接数组,它可能是相对链接
                            getPageListRs = hc.Attr("href");
                        }
                        else if (GatherRule.Analysis(ag.ContentPageUrl) == GatherRuleType.DomQuery)
                        {
                            hc = doc.Get(ag.ContentPageUrl.Substring(2));

                            //得到文章页链接数组,它可能是相对链接
                            getPageListRs = hc.Attr("href");
                        }
                        else
                        {
                            //得到栏目页扫描范围
                            string strCategoryArea = "";
                            foreach (
                                var v in
                                    rulesList.Where(
                                        m => m.CategoryArea.Length > 0 && (m.Id == gatherId || m.PID == gatherId)))
                            {
                                strCategoryArea = Utilities.GetInfo(info.HTML, v.CategoryArea);
                                if (string.IsNullOrWhiteSpace(strCategoryArea) == false)
                                {
                                    break;
                                }
                            }

                            getPageListRs = Utilities.GetInfos(strCategoryArea, ag.ContentPageUrl);
                        }
                        if (getPageListRs.Length > 0)
                            break;
                    }

                    if (getPageListRs.Length <= 0)
                    {
                        break;
                    }

                    #endregion

                    foreach (
                        var ag in
                            rulesList.Where(
                                m => m.CategoryPageNextUrl.Length > 0 && (m.Id == gatherId || m.PID == gatherId)))
                    {
                        switch (GatherRule.Analysis(ag.CategoryPageNextUrl))
                        {
                            case GatherRuleType.XPath:
                                HtmlNode hn = htmlnode.SelectSingleNode(ag.CategoryPageNextUrl.Substring(2));
                                nextCategoryUrl = hn == null ? string.Empty : hn.Attributes["href"].Value;
                                break;
                            case GatherRuleType.DomQuery:
                                hn = doc.SingleGet(ag.CategoryPageNextUrl.Substring(2));
                                nextCategoryUrl = hn == null ? string.Empty : hn.Attributes["href"].Value;
                                break;
                            default:
                                nextCategoryUrl = Utilities.GetInfos(info.HTML, ag.CategoryPageNextUrl)[0];
                                break;
                        }
                        if (nextCategoryUrl.Length > 0) //格式化采集链接
                        {
                            break;
                        }
                    }
                    if (nextCategoryUrl.Length > 0) //格式化采集链接
                    {
                        nextCategoryUrl = Utilities.FormatUrl(startUrl, nextCategoryUrl);
                        count++;
                    }
                    else
                    {
                        break;
                    }
                    if (nextCategoryUrl.EndsWith("#") || startUrl == nextCategoryUrl)
                    {
                        break;
                    }
                }
                catch
                {
                    count++;
                }
            } while (nextCategoryUrl.Length > 0);

            return classUrls.ToArray();
        }

        #endregion

        #region 获取栏目页的所有文章地址

        /// <summary>
        /// 获取栏目页的所有文章地址
        /// </summary>
        /// <param name="gatherId"></param>
        /// <param name="startUrl"></param>
        /// <param name="rulesList"></param>
        /// <param name="doc"></param>
        /// <returns></returns>
        private string[] GetArticleUrls(int gatherId, string startUrl, IList<Gather> rulesList, out HtmlDocument doc)
        {
            NetHelper engine = new NetHelper
            {
                EncodingString = rulesList.First(x => x.Id == gatherId).Encoding
            };
            //得到栏目网页代码
            RemoteRes info = engine.Get(startUrl);
            if (info.StatusCode != HttpStatusCode.OK)
            {
                doc = null;
                return new string[] {};
            }
            doc = new HtmlDocument();
            //载入HTML
            doc.LoadHtml(info.HTML);

            HtmlNode htmlnode = doc.DocumentNode;
            string[] getPageListRs = new string[0];
            foreach (
                var ag in rulesList.Where(m => m.ContentPageUrl.Length > 0 && (m.Id == gatherId || m.PID == gatherId)))
            {
                IList<HtmlNode> hc;
                if (GatherRule.Analysis(ag.ContentPageUrl) == GatherRuleType.XPath)
                {
                    hc = htmlnode.SelectNodes(ag.ContentPageUrl.Substring(2));

                    //得到文章页链接数组,它可能是相对链接
                    getPageListRs = hc.Attr("href");
                }
                else if (GatherRule.Analysis(ag.ContentPageUrl) == GatherRuleType.DomQuery)
                {
                    hc = doc.Get(ag.ContentPageUrl.Substring(2));

                    //得到文章页链接数组,它可能是相对链接
                    getPageListRs = hc.Attr("href");
                }
                else
                {
                    //得到栏目页扫描范围
                    string strCategoryArea = "";
                    foreach (
                        var v in
                            rulesList.Where(m => m.CategoryArea.Length > 2 && (m.Id == gatherId || m.PID == gatherId)))
                    {
                        strCategoryArea = Utilities.GetInfo(info.HTML, v.CategoryArea);
                        if (string.IsNullOrWhiteSpace(strCategoryArea) == false)
                        {
                            break;
                        }
                    }

                    //得到文章页链接数组,它可能是相对链接
                    getPageListRs = Utilities.GetInfos(strCategoryArea, ag.ContentPageUrl);
                }
                if (getPageListRs.Length > 0)
                    break;
            }

            return
                getPageListRs.Where(x => !x.StartsWith("javascript", StringComparison.CurrentCultureIgnoreCase))
                             .ToArray();
        }

        #endregion
    }

    public class ArrStartUrls
    {
        public string Url { get; set; }
        public int GatherID { get; set; }
    }
}