﻿using System;
using System.Collections.Generic;
using System.Net;
using System.Text.RegularExpressions;
using System.Web;
using IEx.Common;
using IEx.Model.Partials;
using IEx.Utilities;

namespace IEx.ParserBuilder.Rongbay
{
    public class RongbayParser : IHtmlParser
    {
        /// <summary>
        /// 
        /// </summary>
        /// <param name="item"></param>
        /// <param name="html"></param>
        /// <param name="attachResultItem"></param>
        /// <param name="attachNewItemSource"></param>
        public void Parse(SourceItem item, string html, Action<Model.Partials.Announcement> attachResultItem, Action<SourceItem> attachNewItemSource)
        {
            List<Announcement> announcements = new List<Announcement>();
            //Extract classified advertising news
            ExtractClassifiedAds(item.Url, item, ref announcements);
            if (announcements.Count > 0)
            {
                foreach (Announcement ann in announcements)
                {
                    attachResultItem(ann);
                }
            }
            /*try
            {
                string tableId = "id=\"table2\"";
                string footerId = "id=\"adm_sticky_footer\"";
                Uri newUri = Utility.CreateNewUriForNextPage(item.Url, 1);
                List<Announcement> announcements = new List<Announcement>();
                string remainHtml = Utility.GetHtmlBody(html, tableId, footerId);

                if (!string.IsNullOrEmpty(remainHtml))
                {
                    Regex regPanelBox = new Regex("<div id=\"Panel\\d+\" class=\"Panel\" .*?>.*?<div id=\"heightMax\\d+\"></div></div>");
                    MatchCollection mPanelBox = regPanelBox.Matches(remainHtml);
                    for (int i = 0; i < mPanelBox.Count; i++)
                    {
                        string panelTitle = string.Empty;
                        string panelLinkDetail = string.Empty;
                        string cateTitleField = string.Empty;
                        string cateLinkDetail = string.Empty;

                        string panelBox = mPanelBox[i].Value.Trim();
                        panelBox = string.Join(string.Empty, Regex.Split(panelBox, @"(?:\r\n|\n|\r|\t)"));
                        //Extract title and link detail of panel box
                        Regex regPanel = new Regex("<h2 class=\"PanelTitle\" .*?>.*?</h2>");
                        Match mPanel = regPanel.Match(panelBox);
                        if (mPanel.Success)
                        {
                            string panelBoxHtml = mPanel.Value;
                            panelLinkDetail = new Uri(newUri, RegexUtility.ExtractHyperlink(panelBoxHtml)).ToString();
                            panelTitle = RegexUtility.ExtractTextFromHtmlTag(panelBoxHtml);
                        }
                        //Extract cate and link detail of cate
                        Regex regCate = RegexUtility.CreateRegexByHtmlTag("li");
                        MatchCollection mCate = regCate.Matches(panelBox);
                        for (int j = 0; j < mCate.Count; j++)
                        {
                            string categoryHtml = mCate[j].Value;
                            cateLinkDetail = new Uri(newUri, RegexUtility.ExtractHyperlink(categoryHtml)).ToString();
                            cateTitleField = RegexUtility.ExtractTextFromHtmlTag(categoryHtml).Trim();
                        }
                        //Extract classified advertising news
                        ExtractClassifiedAds(panelLinkDetail, item, ref announcements);
                    }
                }
                if (announcements.Count > 0)
                {
                    foreach (Announcement ann in announcements)
                    {
                        attachResultItem(ann);
                    }
                }
                // if this request is fail request in previous scanning, 
                // remove it when success in current scanning
                if (item.FailId > 0)
                    Utility.DeleteFailRequests(item.FailId);
            }
            catch (Exception ex)
            {
                // if has an error, save fail request to next time scanning
                if (item.FailId == 0)
                {
                    item.FailId = DateTime.Now.Ticks;
                    Utility.SaveFailRequests(item);
                }

                // then write log
                string message = string.Format(@"
            Company code:   {0}
            HTML Parsing:   Fail
            Request URL:    {1}
            Search key:     {2}", item.SourceCode, item.Url, item.SearchCode);
                Logger.WriteError(message, ex);
            }*/
        }

        #region Methods to extract ClassifiedAds

        /// <summary>
        /// 
        /// </summary>
        /// <param name="url"></param>
        /// <param name="item"></param>
        private static void ExtractClassifiedAds(string url, SourceItem item, ref List<Announcement> announcements)
        {
            Logger.Debug("***SCANNING LIST OF NEWS CLASSIFIEDADS BY URL: " + url);
            HttpStatusCode status = HttpStatusCode.Accepted;
            string html = Utility.LoadHtml(url, ref status);
            Uri newUri = Utility.CreateNewUriForNextPage(url, 1);
            string nextPageUrl = url;
            try
            {
                if (!string.IsNullOrEmpty(html))
                {
                    string tableId = "id=\"table3\"";
                    string footerId = "id=\"EB-RB-BOX-2014\"";
                    string remainHtml = Utility.GetHtmlBody(html, tableId, footerId);

                    if (!string.IsNullOrEmpty(remainHtml))
                    {
                        Regex regTr = new Regex("<tr class=\".*?\">.*?</tr>");
                        MatchCollection mTr = regTr.Matches(remainHtml);
                        for (int i = 0; i < mTr.Count; i++)
                        {
                            string titleAds = string.Empty;
                            string linkDetails = string.Empty;
                            string dateTime = string.Empty;
                            string location = string.Empty;
                            string place = string.Empty;

                            string trContent = mTr[i].Value;
                            //Extract title, link detail classi ads
                            Regex regTitle = new Regex("<a +class=\"newsTitle .*?\" .*?>.*?</a>");
                            Match mTitle = regTitle.Match(trContent);
                            if (mTitle.Success)
                            {
                                string titleHtml = mTitle.Value;
                                linkDetails = new Uri(newUri, RegexUtility.ExtractHyperlink(titleHtml)).ToString();
                                titleAds = RegexUtility.ExtractTextFromHtmlTag(titleHtml).Trim();
                            }
                            //Extract place
                            Regex regPlace = new Regex("<td class=\"City\".*?>.*?</td>");
                            Match mPlace = regPlace.Match(trContent);
                            if (mPlace.Success)
                            {
                                string placeHtml = mPlace.Value.Trim();
                                place = RegexUtility.ExtractTextFromHtmlTag(placeHtml).Trim();
                            }

                            //Extract news details
                            ClassifiedAds newsDetail = new ClassifiedAds();
                            if (!string.IsNullOrEmpty(linkDetails))
                                ExtractClassifiedAdsDetails(linkDetails, item, ref newsDetail);
                            if (!string.IsNullOrEmpty(titleAds) && newsDetail != null)
                            {
                                Announcement ann = new Announcement();
                                ann.Title = titleAds;
                                ann.SourceId = item.SourceId;
                                ann.SourceCode = item.SourceCode;
                                ann.DateTime = newsDetail.DateTime;
                                ann.Message = newsDetail.Content;
                                ann.LangId = item.LangId;
                                ann.InsertedDate = DateTime.Now;
                                ann.PressrelaseCate = place;
                                ann.Link = linkDetails;
                                ann.Attachment = false;
                                ann.SocialNetwork.ClassifiedAds = newsDetail;
                                ann.AttachmentLinks = newsDetail.AttachmentLinks;
                                announcements.Add(ann);
                            }
                        }
                    }
                    //Extract next page
                    Regex regNextPage = new Regex("<span class=\"pagecurrent\">.*?</span>.*?<a.*? class=\"pagelink\">.*?</a>");
                    Match mNextPage = regNextPage.Match(remainHtml);
                    if (mNextPage.Success)
                    {
                        string nextPageHtml = mNextPage.Value;
                        nextPageUrl = RegexUtility.ExtractHyperlink(nextPageHtml);
                        nextPageUrl = string.Empty;
                    }
                    else
                    {
                        nextPageUrl = string.Empty;
                    }

                }
                // if this request is fail request in previous scanning, 
                // remove it when success in current scanning
                if (item.FailId > 0)
                    Utility.DeleteFailRequests(item.FailId);

                Logger.WriteLogInfo("***DONE SCANNING LIST OF NEWS CLASSIFIEDADS BY URL: " + url);
            }
            catch (Exception ex)
            {
                // if has an error, save fail request to next time scanning
                if (item.FailId == 0)
                {
                    item.FailId = DateTime.Now.Ticks;
                    Utility.SaveFailRequests(item);
                }

                // then write log
                string message = string.Format(@"
            Extract list of classified ads
            Source code:   {0}
            HTML Parsing:   Fail
            Request URL:    {1}
            Page number:     {2}", item.SourceCode, url, nextPageUrl);
                Logger.WriteError(message, ex);
            }
            // check if has next page 
            // next page counts as a new request, so it needed insert into request queue
            if (!string.IsNullOrEmpty(nextPageUrl) && string.Compare(url, nextPageUrl, false) != 0)
            {
                ExtractClassifiedAds(nextPageUrl, item, ref announcements);
            }
        }

        /// <summary>
        /// 
        /// </summary>
        /// <param name="url"></param>
        /// <param name="item"></param>
        /// <param name="classifiedAds"></param>
        private static void ExtractClassifiedAdsDetails(string url, SourceItem item, ref ClassifiedAds classifiedAds)
        {
            Logger.Debug("***SCANNING NEWS CLASSIFIEDADS DETAILS URL: " + url);
            HttpStatusCode status = HttpStatusCode.Accepted;
            string html = Utility.LoadHtml(url, ref status);
            try
            {
                if (!string.IsNullOrEmpty(html))
                {
                    string infoFullId = "id=\"NewsInfoFull\"";
                    string otherNewsId = "id=\"OtherNews\"";
                    string remainHtml = Utility.GetHtmlBody(html, infoFullId, otherNewsId);
                    if (!string.IsNullOrEmpty(remainHtml))
                    {
                        string title = string.Empty;
                        string mobile = string.Empty;
                        string name = string.Empty;
                        string email = string.Empty;
                        string date = string.Empty;
                        string time = string.Empty;
                        string datetime = string.Empty;
                        DateTime postDate = DateTime.Now;
                        string newsId = string.Empty;
                        string content = string.Empty;
                        List<string> atts = new List<string>();

                        Regex regTitle = new Regex("<h1.*?>.*?</h1>");
                        Match mTitle = regTitle.Match(remainHtml);
                        if (mTitle.Success)
                        {
                            string titleHtml = mTitle.Value;
                            title = RegexUtility.ExtractTextFromHtmlTag(titleHtml);
                        }
                        Regex regMobile = new Regex("<span class=\"iconCall\">.*?</span>");
                        Match mMobile = regMobile.Match(remainHtml);
                        if (mMobile.Success)
                        {
                            string mobileHtml = mMobile.Value;
                            mobile = HttpUtility.HtmlDecode(RegexUtility.ExtractTextFromHtmlTag(mobileHtml));
                        }
                        Regex regName = new Regex("<div class=\"detail_personal\" +><a .*?>.*?</a>");
                        Match mName = regName.Match(remainHtml);
                        if (mName.Success)
                        {
                            string nameHtml = mName.Value;
                            name = RegexUtility.ExtractTextFromHtmlTag(nameHtml);
                        }
                        Regex regEmail = new Regex("<div class=\"detail_email\">.*?</div>");
                        Match mEmail = regEmail.Match(remainHtml);
                        if (mEmail.Success)
                        {
                            string emailHtml = mEmail.Value;
                            int firstSign = emailHtml.LastIndexOf('(') + 1;
                            int endSign = emailHtml.IndexOf(')');
                            string startCharCode = emailHtml.Substring(firstSign);
                            string endChartCode = emailHtml.Substring(endSign);
                            string charCode = startCharCode.Replace(endChartCode, string.Empty);
                            if (!string.IsNullOrEmpty(charCode))
                            {
                                string[] charCodes = charCode.Split(',');
                                string result = string.Empty;
                                foreach (var code in charCodes)
                                {
                                    int codeItem = int.Parse(code);
                                    result += Char.ConvertFromUtf32(codeItem).ToString();
                                }
                                if (!string.IsNullOrEmpty(result))
                                {
                                    email = RegexUtility.ExtractTextFromHtmlTag(result);
                                }
                            }
                        }
                        Regex regDateTime = new Regex("<div class=\"detail_time\">.*?</div>");
                        Match mDateTime = regDateTime.Match(remainHtml);
                        if (mDateTime.Success)
                        {
                            string dateTimeHtml = mDateTime.Value;
                            //Extract date time detail
                            Regex regTimeDetail = new Regex("<b>.*?</b>");
                            MatchCollection mTimeDetail = regTimeDetail.Matches(dateTimeHtml);
                            for (int i = 0; i < mTimeDetail.Count; i++)
                            {
                                string timeDetailHtml = mTimeDetail[i].Value;
                                Regex regTime = new Regex(@"\d{2}:\d{2}");
                                Match mTime = regTime.Match(timeDetailHtml);
                                if (mTime.Success)
                                {
                                    time = RegexUtility.ExtractTextFromHtmlTag(timeDetailHtml);
                                }
                                Regex regDate = new Regex(@"\d{2}/\d{2}/\d{4}");
                                Match mDate = regDate.Match(timeDetailHtml);
                                if (mDate.Success)
                                {
                                    date = RegexUtility.ExtractTextFromHtmlTag(timeDetailHtml);
                                }
                            }
                            if (!string.IsNullOrEmpty(date) && !string.IsNullOrEmpty(time))
                            {
                                datetime = date + " " + time;
                                postDate = Utility.ConvertToDateTimeObject(datetime, item.DateTimeFormat);
                            }
                        }
                        //Extract newsId
                        Regex regNewsId = new Regex("<div class=\"detail_idItem\">.*?</div>");
                        Match mNewsId = regNewsId.Match(remainHtml);
                        if (mNewsId.Success)
                        {
                            string newsIdHtml = mNewsId.Value;
                            Regex regItemId = new Regex("<b>.*?</b>");
                            Match mItemId = regItemId.Match(newsIdHtml);
                            if (mItemId.Success)
                            {
                                newsId = RegexUtility.ExtractTextFromHtmlTag(mItemId.Value);
                            }
                        }
                        //Extract content
                        Regex regContent = new Regex("<div class=\"NewsContent\" id=\"NewsContent\".*?>.*?</div></div>");
                        Match mContent = regContent.Match(remainHtml);
                        if (mContent.Success)
                        {
                            string contentHtml = mContent.Value;
                            content = RegexUtility.ExtractTextFromHtmlTag(contentHtml);
                        }
                        //Regex to get images
                        Regex regImage = new Regex("<a onclick.*?class=\"img_thumb\".*?</a>");
                        MatchCollection mImage = regImage.Matches(remainHtml);
                        for (int j = 0; j < mImage.Count; j++)
                        {
                            string imageHtml = mImage[j].Value.Trim();
                            string linkPhoto = RegexUtility.ExtractHyperlink(imageHtml);
                            atts.Add(linkPhoto);
                        }

                        //Return values which has deteced.
                        if (!string.IsNullOrEmpty(content) && !string.IsNullOrEmpty(title)
                            && int.Parse(newsId) > 0 && !string.IsNullOrEmpty(mobile))
                        {
                            classifiedAds.Content = content;
                            classifiedAds.DateTime = postDate;
                            classifiedAds.NewId = newsId;
                            classifiedAds.Title = title;
                            classifiedAds.ContactInfo.Email = email;
                            classifiedAds.ContactInfo.MobilePhone = mobile;
                            classifiedAds.ContactInfo.ScreenName = name;
                            classifiedAds.AttachmentLinks = atts;
                        }
                    }
                }
                // if this request is fail request in previous scanning, 
                // remove it when success in current scanning
                if (item.FailId > 0)
                    Utility.DeleteFailRequests(item.FailId);
                Logger.WriteLogInfo("***DONE SCANNING NEWS CLASSIFIEDADS DETAILS URL: " + url);
            }
            catch (Exception ex)
            {
                // if has an error, save fail request to next time scanning
                if (item.FailId == 0)
                {
                    item.FailId = DateTime.Now.Ticks;
                    Utility.SaveFailRequests(item);
                }

                // then write log
                string message = string.Format(@"
            Extract news classified ads details
            Source code:   {0}
            HTML Parsing:   Fail
            Request URL:    {1}", item.SourceCode, url);
                Logger.WriteError(message, ex);
            }
        }

        #endregion
    }
}
