﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading.Tasks;
using IEx.Common;
using IEx.Model.Partials;
using IEx.Utilities;

namespace IEx.ParserBuilder.Baomoi
{
    public class BaomoiParser : IHtmlParser
    {
        /// <summary>
        /// 
        /// </summary>
        /// <param name="item"></param>
        /// <param name="html"></param>
        /// <param name="attachResultItem"></param>
        /// <param name="attachNewItemSource"></param>
        public void Parse(SourceItem item, string html, Action<Model.Partials.Announcement> attachResultItem, Action<SourceItem> attachNewItemSource)
        {
            try
            {
                string startMainContent = "id=\"maincontents\"";
                string endMainContent = "class=\"sidebar\"";
                string remainHtml = Utility.GetHtmlBody(html, startMainContent, endMainContent);
                List<Announcement> detectedAnnouncements = new List<Announcement>();
                if (!string.IsNullOrEmpty(remainHtml))
                {
                    //Extract top news
                    Regex regTopNews = new Regex("<div class=\"top\">.*?</div>.*?</a></p></div>.*?</a></p></div>");
                    MatchCollection mTopNews = regTopNews.Matches(remainHtml);
                    for (int i = 0; i < mTopNews.Count; i++)
                    {
                        string topNewsHtml = mTopNews[i].Value;
                        string title = string.Empty;
                        string linkUrl = string.Empty;
                        string time = string.Empty;
                        string content = string.Empty;
                        string source = string.Empty;
                        string summary = string.Empty;
                        string tags = string.Empty;
                        ExtractContentNews(item, topNewsHtml, ref title, ref time, ref source, ref summary, ref content, ref linkUrl, ref tags);
                        if (!string.IsNullOrEmpty(title) && !string.IsNullOrEmpty(source) && !string.IsNullOrEmpty(linkUrl))
                        {
                            Announcement ann = new Announcement();
                            ann.LangId = item.LangId;
                            ann.Link = linkUrl;
                            ann.SourceCode = item.SourceCode;
                            ann.Title = title;
                            ann.SourceId = item.SourceId;
                            ann.InsertedDate = DateTime.Now;
                            detectedAnnouncements.Add(ann);
                        }
                    }

                    //Extract futured news
                    Regex regFeaturedNews = new Regex("<div class=\"featured\">.*?</div></div>");
                    MatchCollection mFeaturedNews = regFeaturedNews.Matches(remainHtml);
                    for (int j = 0; j < mFeaturedNews.Count; j++)
                    {
                        string featuredNewsHtml = mFeaturedNews[j].Value;
                        string title = string.Empty;
                        string linkUrl = string.Empty;
                        string time = string.Empty;
                        string content = string.Empty;
                        string source = string.Empty;
                        string summary = string.Empty;
                        string tags = string.Empty;
                        ExtractContentNews(item, featuredNewsHtml, ref title, ref time, ref source, ref summary, ref content, ref linkUrl, ref tags);
                        if (!string.IsNullOrEmpty(title) && !string.IsNullOrEmpty(source) && !string.IsNullOrEmpty(linkUrl))
                        {
                            Announcement ann = new Announcement();
                            ann.LangId = item.LangId;
                            ann.Link = linkUrl;
                            ann.SourceCode = item.SourceCode;
                            ann.Title = title;
                            ann.SourceId = item.SourceId;
                            ann.InsertedDate = DateTime.Now;
                            detectedAnnouncements.Add(ann);
                        }
                    }
                    //Extract lastest news
                    Regex regLastestNews = new Regex("<div class=\"latest\">.*?</div></div></div>");
                    MatchCollection mLastestNews = regLastestNews.Matches(remainHtml);
                    for (int k = 0; k < mLastestNews.Count; k++)
                    {
                        string lastestNewsHtml = mLastestNews[k].Value;
                        string title = string.Empty;
                        string linkUrl = string.Empty;
                        string time = string.Empty;
                        string content = string.Empty;
                        string source = string.Empty;
                        string summary = string.Empty;
                        string tags = string.Empty;
                        ExtractContentNews(item, lastestNewsHtml, ref title, ref time, ref source, ref summary, ref content, ref linkUrl, ref tags);
                        if (!string.IsNullOrEmpty(title) && !string.IsNullOrEmpty(source) && !string.IsNullOrEmpty(linkUrl))
                        {
                            Announcement ann = new Announcement();
                            ann.LangId = item.LangId;
                            ann.Link = linkUrl;
                            ann.SourceCode = item.SourceCode;
                            ann.Title = title;
                            ann.SourceId = item.SourceId;
                            ann.InsertedDate = DateTime.Now;
                            detectedAnnouncements.Add(ann);
                        }
                    }
                    //Extract lastest news of featured news
                    Regex regLastNewsFeatured = new Regex("<div id=\"bmLatest\" class=\"latest\">.*?</div></div></div>");
                    MatchCollection mLastNewsFeatured = regLastNewsFeatured.Matches(remainHtml);
                    for (int h = 0; h < mLastNewsFeatured.Count; h++)
                    {
                        string lastNewsFeaturedHtml = mLastNewsFeatured[h].Value;
                        string title = string.Empty;
                        string linkUrl = string.Empty;
                        string time = string.Empty;
                        string content = string.Empty;
                        string source = string.Empty;
                        string summary = string.Empty;
                        string tags = string.Empty;
                        ExtractContentNews(item, lastNewsFeaturedHtml, ref title, ref time, ref source, ref summary, ref content, ref linkUrl, ref tags);
                        if (!string.IsNullOrEmpty(title) && !string.IsNullOrEmpty(source) && !string.IsNullOrEmpty(linkUrl))
                        {
                            Announcement ann = new Announcement();
                            ann.LangId = item.LangId;
                            ann.Link = linkUrl;
                            ann.SourceCode = item.SourceCode;
                            ann.Title = title;
                            ann.SourceId = item.SourceId;
                            ann.InsertedDate = DateTime.Now;
                            detectedAnnouncements.Add(ann);
                        }
                    }
                }
                // if this request is fail request in previous scanning, 
                // remove it when success in current scanning
                if (item.FailId > 0)
                    Utility.DeleteFailRequests(item.FailId);
            }
            catch (Exception ex)
            {
                // if has an error, save fail request to next time scanning
                if (item.FailId == 0)
                {
                    item.FailId = DateTime.Now.Ticks;
                    Utility.SaveFailRequests(item);
                }

                // then write log
                string message = string.Format(@"
            Company code:   {0}
            HTML Parsing:   Fail
            Request URL:    {1}
            Search key:     {2}", item.SourceCode, item.Url, item.SearchCode);
                Logger.WriteError(message, ex);
            }
        }

        /// <summary>
        /// 
        /// </summary>
        /// <param name="html"></param>
        /// <param name="title"></param>
        /// <param name="time"></param>
        /// <param name="source"></param>
        /// <param name="summary"></param>
        /// <param name="content"></param>
        /// <param name="linkUrl"></param>
        /// <param name="tags"></param>
        private static void ExtractContentNews(SourceItem item, string html, ref string title, ref string time, ref string source, ref string summary, ref string content, ref string linkUrl, ref string tags)
        {
            try
            {
                //Extract title and link url news details
                Regex regTitle = new Regex("<p class=\"title\">.*?</p>");
                Match mTitle = regTitle.Match(html);
                if (mTitle.Success)
                {
                    string titleHtml = mTitle.Value;
                    title = RegexUtility.ExtractTextFromHtmlTag(titleHtml);
                    linkUrl = RegexUtility.ExtractHyperlink(titleHtml);
                }
                else
                {
                    //Extract title
                    Regex regTitleFeatured = new Regex("<h1 class=\"titlefeaturedNewsHtmlh1>");
                    Match mTitleFeatured = regTitleFeatured.Match(html);
                    if (mTitleFeatured.Success)
                    {
                        string titleNewsFocusHtml = mTitleFeatured.Value;
                        title = RegexUtility.ExtractTextFromHtmlTag(titleNewsFocusHtml);
                        linkUrl = RegexUtility.ExtractHyperlink(titleNewsFocusHtml);
                    }
                }
                if (!string.IsNullOrEmpty(linkUrl))
                {
                    string timeDetail = string.Empty;
                    string summaryDetail = string.Empty;
                    string contentDetail = string.Empty;
                    string tagsDetail = string.Empty;
                    //Extract content, tags and time details
                    ExtractContentNews(item, linkUrl, ref timeDetail, ref summaryDetail, ref contentDetail, ref tagsDetail);
                }
                //Extract time
                Regex regTime = new Regex("<span class=\"time\">.*?</span>");
                Match mTime = regTime.Match(html);
                if (mTime.Success)
                {
                    string timeHtml = mTime.Value;
                    title = RegexUtility.ExtractTextFromHtmlTag(timeHtml);
                }
                //Extract source
                Regex regSource = new Regex("<span class=\"source\">.*?</span>");
                Match mSource = regSource.Match(html);
                if (mSource.Success)
                {
                    string sourceHtml = mSource.Value;
                    source = RegexUtility.ExtractTextFromHtmlTag(sourceHtml);
                }
                //Extract summary
                Regex regSummary = new Regex("<p class=\"summary\">.*?</p>");
                Match mSummary = regSummary.Match(html);
                if (mSummary.Success)
                {
                    string summaryHtml = mSummary.Value;
                    summary = RegexUtility.ExtractTextFromHtmlTag(summaryHtml);
                }

                // if this request is fail request in previous scanning, 
                // remove it when success in current scanning
                if (item.FailId > 0)
                    Utility.DeleteFailRequests(item.FailId);
            }
            catch (Exception ex)
            {
                // if has an error, save fail request to next time scanning
                if (item.FailId == 0)
                {
                    item.FailId = DateTime.Now.Ticks;
                    Utility.SaveFailRequests(item);
                }

                // then write log
                string message = string.Format(@"
            Source code:   {0}
            HTML Parsing:   Fail
            Request URL:    {1}
            Search key:     {2}", item.SourceCode, item.Url, item.SearchCode);
                Logger.WriteError(message, ex);
            }
        }

        /// <summary>
        /// 
        /// </summary>
        /// <param name="url"></param>
        /// <param name="time"></param>
        /// <param name="summary"></param>
        /// <param name="content"></param>
        /// <param name="tags"></param>
        private static void ExtractContentNews(SourceItem item, string url, ref string time, ref string summary, ref string content, ref string tags)
        {
            try
            {
                Logger.Debug("***SCANNING CONTENT DETAIL NEWS URL: " + url);
                HttpStatusCode status = HttpStatusCode.Accepted;
                string html = Utility.LoadHtml(url, ref status);
                if (!string.IsNullOrEmpty(html))
                {
                    html = string.Join(string.Empty, Regex.Split(html, @"(?:\r\n|\n|\r|\t)"));
                    //Extract time
                    Regex regTime = new Regex("<time datetime=\".*?\">.*?</time>");
                    Match mTime = regTime.Match(html);
                    if (mTime.Success)
                    {

                    }
                    else
                    {
                        regTime = new Regex("<p class=\"pubdate\">.*?</p>");
                        mTime = regTime.Match(html);
                        if (mTime.Success)
                        {

                        }
                        else
                        {
                            regTime = new Regex("<div class=\"date3 fr mgt2\">.*?</div>");
                            mTime = regTime.Match(html);
                            if (mTime.Success)
                            {

                            }
                            else
                            {
                            }
                        }
                    }
                }
                // if this request is fail request in previous scanning, 
                // remove it when success in current scanning
                if (item.FailId > 0)
                    Utility.DeleteFailRequests(item.FailId);

                Logger.WriteLogInfo("***DONE SCANNING CONTENT DETAIL NEWS URL: " + url);
            }
            catch (Exception ex)
            {
                // if has an error, save fail request to next time scanning
                if (item.FailId == 0)
                {
                    item.FailId = DateTime.Now.Ticks;
                    Utility.SaveFailRequests(item);
                }

                // then write log
                string message = string.Format(@"
            Source code:   {0}
            HTML Parsing:   Fail
            Request URL:    {1}
            Search key:     {2}", item.SourceCode, item.Url, item.SearchCode);
                Logger.WriteError(message, ex);
            }
        }
    }
}
