﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using NewsMine.DomainObjects;
using NewsMine.Storage;
using System.Web;
using NewsMine.Utilities;

namespace NewsDiscoveryEngine
{
    [Serializable]
    public class ProcessWebUrl2FeedWorkFlow : BaseJobWorkFlow
    {
        public override void Initialize()
        {
            //if some calls the initilize. 
            // Just call the ScheduleHtml2FeedJob

            ScheduleHtml2FeedJob job = new ScheduleHtml2FeedJob { WorkFlow = this };
            job.OutputProduced += new BaseJob.OutputProducedDelegate(job_OutputProduced);
            job.ExecuteJob();

        }

        void job_OutputProduced(BaseJob job, object outputObject)
        {
            ProcessOutput(job, outputObject);
        }

        public override void ProcessOutput(BaseJob job, object outputData)
        {
            /*
             * 1. Read website information from WebSiteInfoMasterStore and schedule crawling jobs.
             *  For each job
             * 2. Download Url
             * 3. Store it in HtmlDocumentStore
             * 4. Extract Html links from the above downloaded document and create Dedupe job.
             * 5. In Dedupe job, we need to get html links from previous document and find the new links.
             * 6. After that we need create job to dedupe against main html feed store
             * 7. Uniquely identified links will be processed for the details.
             * For each link that identified 
             * 8. Download html document (Save it in htmldocuemntstore) and get meta data like Title, keywords,description, Image.
             *    (In future this job will be enhanced to accept the xpath hints to get the details like image and body content for each domain)
             * 9. Save the feed in main htmlFeedStore
             * 10.Index starts here.....
             */

            if (job == null || !(job is BaseJob))
            {
                throw new ApplicationException("Invalid job... Null or non BaseJob type will not be accepted.");
            }

            if (job is ScheduleHtml2FeedJob)
            {
                ProcessOutput_Of_ScheduleHtml2FeedJob(outputData);
                return;
            }
            else if (job is HtmlDownloadJob)
            {

                ProcessOutput_Of_HtmlDownloadJob(outputData);
                return;
            }

            if (job is HtmlString2LinksJob)
            {
                //validate outcome and convert back to the expected type.
                if (outputData == null)
                    return;

                if (!(outputData is List<HtmlLink>))
                    throw new ApplicationException("Invalid outputData rendered from the HtmlString2LinksJob; This data should be type of List<HtmlLink>)");

                var htmlLinks = (List<HtmlLink>)outputData;

                string sourceUrl = ((HtmlString2LinksJob)job).HtmlContent.Url;

                string sourceDomain = UtilityExtn.GetDomainNameFromUrl(sourceUrl).ToLower();

                //string sourceRootDomain = UtilityExtn.GetRootDomainNameFromUrl(sourceUrl).ToLower();

                if (htmlLinks == null || htmlLinks.Count == 0)
                    return;

                List<HtmlLink> filteredHtmlLinksForSourceDomain = htmlLinks;

                //actionItem: all the images and titles needs to be merged properly and make sure that we delete items that are not having any titles.
                //We may need to consider those images to keep seperately for getting any help to get logos and others.
                filteredHtmlLinksForSourceDomain = SelfMergeAndDedupeForLinksInSingleHtmlPage(filteredHtmlLinksForSourceDomain);

                //Just dedupe the links against previous visit.
                filteredHtmlLinksForSourceDomain = HtmlLinksDedupeAgainstPreviousVisitJob.DedupeHtmlLinks(sourceUrl, filteredHtmlLinksForSourceDomain);

                //Save htmlLinks for using the data for next time dedupe process. these links will be used to nexttime processing.
                Defaults.HtmlLinkStore.Replace(((HtmlString2LinksJob)job).HtmlContent.Url, htmlLinks);
                Defaults.HtmlLinkStore.Close();

                if (filteredHtmlLinksForSourceDomain == null || filteredHtmlLinksForSourceDomain.Count == 0)
                    return;

                //if the title is not there, we can think of replacing image text 
                for (int i = 0; i < filteredHtmlLinksForSourceDomain.Count; i++)
                {
                    var tempFeed = filteredHtmlLinksForSourceDomain[i];

                    if (string.IsNullOrEmpty(tempFeed.Title) && !string.IsNullOrEmpty(tempFeed.ImageAltText))
                    {
                        tempFeed.Title = tempFeed.ImageAltText;
                    }
                }

                //todo: filtered html links only contains links that are inside that domain.
                //so, we need to write some code to get the extra hint links to spread over internet.
                if (Convert.ToBoolean(NewsMine.Configuration.ConfigurationManager.Get("isSourceDomainFilterEnabled")) == true)
                {
                    //Just filter the links that are inside that perticular domain name.
                    filteredHtmlLinksForSourceDomain = (from link in filteredHtmlLinksForSourceDomain
                                                        where IsThisLinkFromTheRootDomain(link.Link.ToLower(),sourceDomain,link.IsFeedSourced)==true
                                                        select link).ToList();
                }
                else
                {
                    //filteredHtmlLinksForSourceDomain = htmlLinks.ToList(); //todo: white list the domains that can be accepted by the system. And filter here. other wise above condition will just allow the source domain.
                }

                if (filteredHtmlLinksForSourceDomain == null || filteredHtmlLinksForSourceDomain.Count == 0)
                    return;

                //change on 15082013

                //Just make sure that remove the items that are having invalid characters in title.
                //Just filter the links that are inside that perticular domain name.
                filteredHtmlLinksForSourceDomain = (from link in filteredHtmlLinksForSourceDomain
                                                                   where link!=null && !string.IsNullOrEmpty(link.Title) && IsInvalidCharsFound(link.Title) == false
                                                                   select link).ToList();

                //if there is no html links after dedupe, just stop the job.
                if (filteredHtmlLinksForSourceDomain == null || filteredHtmlLinksForSourceDomain.Count == 0)
                    return;
             

                //Attempt to do the cleaning process, here we will have configured cleaning process that allows to remove all unnecessary stuff from the list.
                var htmlLinksAfterCleansingTakesPlace = Cleansing.CleansingManager.Cleanse(filteredHtmlLinksForSourceDomain);

                //actionItem: there could items in the store without images with the same link, here is the oppurtunity for us to go ahead update and index them as well.



                //actionItem: deduping needs to be done, in the group manner, especially when we deal with aginst main store.

                //actionItem: consider adding tags, category, menu item title and also referral url in the feed for us to query in diffrent dimentions.

                //actionitem: Give scoring based on the image, image size, wrapping tags like H1, presents in home page.

               

                //Just dedupe the links against main store
                htmlLinksAfterCleansingTakesPlace = HtmlLinksDedupeAgainstMainHtmlFeedStore.DedupeHtmlLinksAginstTitleAndLink(sourceUrl, htmlLinksAfterCleansingTakesPlace);


                //if there is no html links after dedupe, just stop the job.
                if (htmlLinksAfterCleansingTakesPlace == null || htmlLinksAfterCleansingTakesPlace.Count == 0)
                    return;


                //Before converting the html links to feeds, just ignore the links that are not having titles.
                // if the item that contains the images and not contain titles, just store them for future use.

                var htmlLinksAfterTitleQualificationFilter = new List<HtmlLink>();

                for (int i = 0; i < htmlLinksAfterCleansingTakesPlace.Count; i++)
                {
                    HtmlLink link = htmlLinksAfterCleansingTakesPlace[i];

                    //there could be title with some html spaces like &nbsp;, we need to just remove that stuff.

                    //todo: this needs to goto the cleansing process....
                    if (link.Title != null && string.IsNullOrEmpty(link.Title.Replace(" ", "").ToLower().Replace("&nbsp;", "")))
                    {
                        link.Title = link.Title.Replace(" ", "").Replace("&nbsp;", "");
                        link.Title = link.Title.Replace("\r", "").Replace("\n", "");
                    }


                    if (link.Title==null || string.IsNullOrEmpty(link.Title.Trim()))
                        continue;

                    if (!string.IsNullOrEmpty(link.Link))
                        htmlLinksAfterTitleQualificationFilter.Add(link);
                    else
                        continue;
                }


                if (htmlLinksAfterTitleQualificationFilter.Count == 0)
                    return;


                //And also try to get get from the stored image links, for filling into new links.

                List<FeedItem> feedItems = HtmlLinksToFeedItems(htmlLinksAfterTitleQualificationFilter, sourceUrl);

                // FILTER FEEDS FOR A SOURCE DOMAIN.

                List<FeedItem> filteredFeedItemsForSourceDomain = new List<FeedItem>();
                if (Convert.ToBoolean(NewsMine.Configuration.ConfigurationManager.Get("isSourceDomainFilterEnabled")) == true)
                {
                    filteredFeedItemsForSourceDomain = (from fdItem in feedItems
                                                        where IsThisDomainsAreFromSameRoot(fdItem.Domain, sourceDomain) == true
                                                                       select fdItem).ToList();
                }
                else
                {
                    filteredFeedItemsForSourceDomain = feedItems.ToList();
                }



                WebSiteInfo webInfo = (WebSiteInfo)Defaults.WebSiteInfoMasterStore.Get(sourceDomain);
                WebSiteMenu menuItem = (WebSiteMenu)Defaults.CrawlingTrackingStore.Get(sourceUrl);

                string currentDomainLanguage = "";
                if (webInfo != null && !string.IsNullOrEmpty(webInfo.Language))
                {
                    currentDomainLanguage = webInfo.Language;

                    //trying to override the default website's language by menu level language if it is available.

                    if (menuItem != null && !string.IsNullOrEmpty(menuItem.Language))
                    {
                        currentDomainLanguage = menuItem.Language;
                    }

                }

                //Get the category information from store, against the menuitem

                
                //collect all the available tags from website and its menu and attach them to the feed.
                List<string> configuredTags = new List<string>();

                if (webInfo.Tags != null && webInfo.Tags.Count > 0)
                {
                    configuredTags.AddRange(webInfo.Tags);
                }

                if (menuItem.Tags != null && menuItem.Tags.Count > 0)
                {
                    configuredTags.AddRange(menuItem.Tags);
                }

                if (configuredTags.Count > 0)
                    configuredTags = configuredTags.Distinct().ToList();

                string category = string.Empty;
                
                if (menuItem != null)
                {
                    if (string.IsNullOrEmpty(menuItem.Category) && !string.IsNullOrEmpty(menuItem.Title))
                        category = menuItem.Title;
                    else
                        category = menuItem.Category;

                    category = category + ",";
                    //todo: need more tag attachement other than category, referral url, and menu item configured meta tags.
                    //actionItem : category is not correct now.. this needs to be supplied by user from entry screen.
                }

                //lets trim the "," character at the end.

                category = category.TrimEnd(',');
         
                bool isDetailedFeedProcessRequired = Convert.ToBoolean(NewsMine.Configuration.ConfigurationManager.Get("isDetailedFeedProcessRequired"));

                for (int i = 0; i < filteredFeedItemsForSourceDomain.Count; i++)
                {
                    filteredFeedItemsForSourceDomain[i].category = category;

                    filteredFeedItemsForSourceDomain[i].Language = currentDomainLanguage;

                    DetectLanguageByInspectingTitleContent(filteredFeedItemsForSourceDomain[i]);

                    //in case the language detector is not finding its language, then website defualt language is going to be final.
                    if (string.IsNullOrEmpty(filteredFeedItemsForSourceDomain[i].Language) == true)
                    {
                        filteredFeedItemsForSourceDomain[i].Language = currentDomainLanguage;
                    }

                    SaveHtmlFeedItemToMainStoreJob.SaveTitleAndLinkForDeduplicationPurpose(filteredFeedItemsForSourceDomain[i]);


                    var feedItem = UtilityExtn.CleanFeed(filteredFeedItemsForSourceDomain[i]);

                    if (feedItem.PubDate == DateTime.MinValue)
                        feedItem.PubDate = DateTime.UtcNow;
                    feedItem.UpdatedDateTime = feedItem.PubDate;



                    if (isDetailedFeedProcessRequired == false)
                    {

                        if (feedItem.PubDate == DateTime.MinValue)
                            feedItem.PubDate = DateTime.UtcNow;

                        feedItem.UpdatedDateTime = feedItem.PubDate;

                        Defaults.IndexingQueue.Enqueue(feedItem);
                    }

                }

                if (isDetailedFeedProcessRequired == false)
                    return;

                //Defaults.TargetDetailsQueueItemStore.Close();
                //Defaults.TargetDetailsQueueItemStore.Open();

                for (int i = 0; i < filteredFeedItemsForSourceDomain.Count; i++)
                {
                    var feedItem = filteredFeedItemsForSourceDomain[i];

                    //if (Defaults.TargetDetailsQueueItemStore.Get(feedItem.Link) != null)
                    //    continue;

                   // Defaults.TargetDetailsQueueItemStore.Replace(feedItem.Link, "0");

                    HtmlFeedDetailsFromTargetUrlJob tempJOb = new HtmlFeedDetailsFromTargetUrlJob
                    {
                        WorkFlow = this,
                        FeedItem = filteredFeedItemsForSourceDomain[i]
                    };

                    Defaults.DownloadingJobQueue.Enqueue(tempJOb);
                }

                return;

            }
            else if ((job is HtmlFeedDetailsFromTargetUrlJob) && (outputData != null))
            {
                if (!(outputData is FeedItem))
                {
                    throw new ApplicationException("Invalid outputData rendered from the HtmlFeedDetailsFromTargetUrlJob; This data should be type of FeedItem)");
                }
                var feedItem = (FeedItem)outputData;

                //feedItem = SaveHtmlFeedItemToMainStoreJob.SaveFeedItemInHtmlFeedStore(UtilityExtn.CleanFeed(feedItem));

                Defaults.IndexingQueue.Enqueue(feedItem);
            }

           
        }

        private bool IsThisDomainsAreFromSameRoot(string DomainInLink, string sourceDomain)
        {
            string[] sourceDomainSegments = sourceDomain.Split('.').Reverse().ToArray();
            string[] sourceLinkDomainSegments = DomainInLink.Split('.').Reverse().ToArray();

            int numberOfSegmentsMatched = 0;

            for (int i = 0; i < sourceDomainSegments.Length && i < sourceLinkDomainSegments.Length; i++)
            {
                if (sourceDomainSegments[i] == sourceLinkDomainSegments[i])
                    numberOfSegmentsMatched++;
                else
                    break;
            }

            if (numberOfSegmentsMatched > 1)
            {
                return true;
            }
            else
                return false;
        }

        private bool IsThisLinkFromTheRootDomain(string urlLink, string sourceDomain, bool isFeedSourced)
        {
            
            // How to find matching between feeds.labnol.org & www.labnol.org  
            //Solution: reverse it and count number of parts matched..... => org.labnol.feeds , org.labnol.www 
            //if parts are matched more than 2 then it is ok to go with this.

            try
            {

                if (string.IsNullOrEmpty(urlLink) || string.IsNullOrEmpty(sourceDomain))
                {
                    return false;
                }

                string DomainInLink = UtilityExtn.GetDomainNameFromUrl(urlLink).ToLower();

                if (isFeedSourced == false)
                {
                    return urlLink.ToLower().Contains(sourceDomain.ToLower());
                }

                return IsThisDomainsAreFromSameRoot(DomainInLink, sourceDomain);

            }
            catch (Exception ex)
            {
                TempLogger.Log("Error while checking root domain stuff: the inputs are UrlLink is : " + urlLink + Environment.NewLine + " source domain is : " + sourceDomain + Environment.NewLine + ex.Message, ex);

                return false;
            }
        }

        private bool IsInvalidCharsFound(string inputString)
        {
            bool isUnknownOrInvalidCharsFoundInString = false;

            string resultedLanguage = LanguageDetector.ChracterSetDetector.Detect(inputString, out isUnknownOrInvalidCharsFoundInString);

            return isUnknownOrInvalidCharsFoundInString;
        }

        private void DetectLanguageByInspectingTitleContent(FeedItem feedItem)
        {

            if (feedItem != null && !string.IsNullOrEmpty(feedItem.Title) && !string.IsNullOrEmpty(feedItem.Title.Trim()))
            {
                feedItem.Title = HttpUtility.HtmlDecode(feedItem.Title);

                bool isUnknownOrInvalidCharsFoundInString = false;

                string resultedLanguage = LanguageDetector.ChracterSetDetector.Detect( NewsMine.Utilities.TempAnalyzer.RemoveAllSpecialCharacterForIndexing(feedItem.Title), out isUnknownOrInvalidCharsFoundInString);

                if (resultedLanguage == null)
                    return;

                if (resultedLanguage.ToLower() == "hindi")
                {
                    if (!string.IsNullOrEmpty(feedItem.Language))
                    {
                        if ((feedItem.Language.ToLower() == "marathi" || feedItem.Language.ToLower() == "marati"))
                            feedItem.Language = "Marathi";
                        else
                            feedItem.Language = resultedLanguage;
                    }
                    else
                        feedItem.Language = resultedLanguage;
                }
                else
                    feedItem.Language = resultedLanguage;

                return;
            }

        }

        private void ProcessOutput_Of_HtmlDownloadJob(object outputData)
        {
            //validate outcome and convert back to the expected type.
            if (outputData == null)
                return;

            if (!(outputData is HtmlContentFromUrl))
                throw new ApplicationException("Invalid outputData rendered from the HtmlDownloadJob; This data should be type of HtmlContentFromUrl)");

            var htmlContent = (HtmlContentFromUrl)outputData;

            // Save raw html to the HtmlDocumentStore.
            //todo: uncomment when we need to save htmlcontent to the store.
            //Defaults.HtmlDocumentStore.Replace(htmlContent.Url, htmlContent);
            //Defaults.HtmlDocumentStore.Close();

            //Create Html to htmllinks conversion job.
            HtmlString2LinksJob htmlString2LinksJob = new HtmlString2LinksJob();
            htmlString2LinksJob.WorkFlow = this;
            htmlString2LinksJob.HtmlContent = htmlContent;

            Defaults.JobQueue.Enqueue(htmlString2LinksJob);

            return;
        }

        private void ProcessOutput_Of_ScheduleHtml2FeedJob(object outputData)
        {
            //Need to create html download job with this workflow.
            // here the output will be list of menuitems, that are from website master datastore.
            if (outputData == null)
                return;

            if (!(outputData is List<WebSiteMenu>))
            {
                throw new ApplicationException("Invalid outputData rendered from the ScheduleHtml2FeedJob; This data should be type of List<WebSiteMenu>)");
            }

            var menuItems = (List<WebSiteMenu>)outputData;

            foreach (var menuItem in menuItems)
            {
                //create the job 
                //url=http://telugu.oneindia.in/
                HtmlDownloadJob downLoadJob = new HtmlDownloadJob();
                downLoadJob.Url = menuItem.Url;
                downLoadJob.WorkFlow = this;

                Defaults.DownloadingJobQueue.Enqueue(downLoadJob);

                Defaults.CrawlingTrackingStore.Replace(menuItem.Url, menuItem);
                Defaults.CrawlingTrackingStore.Close();

            }

            return;
        }


        /// <summary>
        /// This method allows to merge the items that are grouped by link and title;
        /// this can be used to merge the links from the single page crawl;
        /// 
        /// </summary>
        /// <param name="filteredHtmlLinksForSourceDomain"></param>
        /// <returns></returns>
        private List<HtmlLink> SelfMergeAndDedupeForLinksInSingleHtmlPage(List<HtmlLink> filteredHtmlLinksForSourceDomain)
        {
            // making sure we are not doing any kind of selfmerge operation for the feed sourced links.

            int feedSourcedLinks = (from htmlLink in filteredHtmlLinksForSourceDomain
                                    where htmlLink.IsFeedSourced==true
                                    select htmlLink).Count();

            if (feedSourcedLinks > 0)
            {
                return filteredHtmlLinksForSourceDomain;
            }


            //if the links are not form feed the below code will do merging between the links.

            List<HtmlLink> resultedHtmlLinks = new List<HtmlLink>();

            var grpByLinks = filteredHtmlLinksForSourceDomain.GroupBy(htmlLink =>
                htmlLink.Link);

            foreach (var fds in grpByLinks)
            {
                List<HtmlLink> linksForASingleLink = new List<HtmlLink>();
                foreach (var f in fds)
                {
                    linksForASingleLink.Add(f);
                }

                resultedHtmlLinks.Add(UtilityExtn.MergeMultipleHtmlLinks(linksForASingleLink));
            }

            return resultedHtmlLinks;

        }

        //todo:This method needs togo some common place as utility method.

        private List<FeedItem> HtmlLinksToFeedItems(List<HtmlLink> htmlLinksAfterDedupe, string refUrl)
        {
            List<FeedItem> feedItemList = new List<FeedItem>();

            foreach (var htmlLink in htmlLinksAfterDedupe)
            {
                FeedItem fdItem = new FeedItem();
                fdItem.RefUrl = refUrl;
                fdItem.Title = htmlLink.Title;
                fdItem.Link = htmlLink.Link;
                
                fdItem.ImageUrl = htmlLink.ImageUrl;
                //todo: here we need to write some logic to handle the feedItem.domain based on the source url / self url / feed url. need some clarity.
                fdItem.Domain = UtilityExtn.GetDomainNameFromUrl(htmlLink.Link);

                if (htmlLink.IsFeedSourced == true)
                {
                    fdItem.IsFeedSourced = true;

                    if (htmlLink.CategoriesFromFeed != null)
                    {
                        fdItem.CategoriesFromFeed = htmlLink.CategoriesFromFeed;
                    }

                    if (!string.IsNullOrEmpty(htmlLink.Description))
                    {
                        fdItem.Description = htmlLink.Description;
                    }

                    fdItem.PubDate = htmlLink.PubDate;
                }

                feedItemList.Add(fdItem);
            }

            return feedItemList;
        }


    }
}
