﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Common;
using Indexing;
using Amib.Threading;
using Lucene.Net.Index;

namespace Workers
{
    /// <summary>
    /// Gets and processes all urls in the DB for the input site, 
    /// those which have changed are downloaded and indexed,
    /// Those not changed are (hopefully) skipped.
    /// </summary>
    public class Updater
    {
        const int MAX_BYTES_PER_SECOND = 0;
        Site _site;
        DateTime _fromDate;
        LuceneManager.LuceneManager _luceneManager;
        Indexer _indexer;
        IWorkItemsGroup _workGroup;

        public Updater(Site site, IWorkItemsGroup workGroup, int maxThreads, string prefixFilePath, 
            DateTime fromDate, LuceneManager.LuceneManager lm)  {
            _site = site;
           _workGroup = workGroup;
            _fromDate = fromDate;
            _luceneManager = lm;
            _indexer = new Indexer(_luceneManager, "", prefixFilePath, maxThreads,
                MAX_BYTES_PER_SECOND);
      
        }

        public void Update(string mergeWithIndexPath)
        {
            Dal.CrawlUrlManager cum = new Dal.CrawlUrlManager(_site.SiteNames.SiteName);
            CrawlUrl url;
            while ((url = cum.GetNextCrawlUrl()) != null)
            {
                try
                {
                    //Download Page;
                    //string html = HttpHelper.HttpDownloader.DownloadPage(url.Uri);
                    _workGroup.QueueWorkItem(HandleCrawlUrl, url);
 
                } //Page was not mod5ified (or problem with it).  do not process.
                catch (Exception e)
                {

                }
            }
            //Wait till all urls finish.
           
          
          
        }

        void HandleCrawlUrl(object crawlUrl)
        {
            if (!(crawlUrl is CrawlUrl))
                return;
            HandleCrawlUrl(crawlUrl as CrawlUrl);
        }

        private void HandleCrawlUrl(CrawlUrl crawlUrl)
        {
          
            switch (crawlUrl.crawlUrlType)
            {
                case CrawlUrl.CrawlUrlType.Story:
                    {
                        StoryIndexArguments sia = new StoryIndexArguments(crawlUrl.SiteName,
                    crawlUrl.Uri, _site[RuleSet.Story], _fromDate,
                    _site[RuleSet.Story].CrawlDepth);
                        _indexer.IndexStory(sia, _workGroup);
                       
                        break;
                    }

                case CrawlUrl.CrawlUrlType.Blog :
                    {
                        StoryIndexArguments sia = new StoryIndexArguments(crawlUrl.SiteName,
                    crawlUrl.Uri, _site[RuleSet.Blog], _fromDate,
                    _site[RuleSet.Blog].CrawlDepth);
                        _indexer.IndexStory(sia, _workGroup);
                        break;
                    }
                case CrawlUrl.CrawlUrlType.Forum:
                    {
                        StoryIndexArguments sia = new StoryIndexArguments(crawlUrl.SiteName,
                    crawlUrl.Uri, _site[RuleSet.Forum], _fromDate,
                    _site[RuleSet.Forum].CrawlDepth);
                        _indexer.IndexForum(sia, _workGroup);
                        break;
                    }
                   
            }

        }
    }
}
