﻿using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Framework.Extensions;

namespace WebCrawler
{
    public interface IPageProcessor
    {
        PageDescriptior GetPageDescriptor(string pageUrl);
    }


    public class PageProcessor : IPageProcessor
    {
        private readonly IHttpClient _httpClient;
        private readonly IUrlMatcher _urlMatcher;
        private readonly IUrlRepository _urlRepository;

        public PageProcessor(IHttpClient httpClient, IUrlMatcher urlMatcher, IUrlRepository urlRepository)
        {
            _httpClient = httpClient;
            _urlMatcher = urlMatcher;
            _urlRepository = urlRepository;
        }

        public PageDescriptior GetPageDescriptor(string pageUrl)
        {
            var loadPage = _httpClient.LoadPageInfoAsync(pageUrl);
            var lr = loadPage.Result;
            _urlRepository.Complete(pageUrl);
            var urls = new string[]{};
            if (lr.IsSuccess())
                urls = GetNewUrlsFromPage(lr.Text).ToArray();
            
            return new PageDescriptior(){Info = lr, Urls = urls};
        }

        private IEnumerable<string> GetNewUrlsFromPage(string text)
        {
            var urlsOnPage = _urlMatcher.Match(text);

            foreach (var url in urlsOnPage)
            {
                var wasAdded = _urlRepository.AddNew(url);
                if (wasAdded)
                    yield return url;
            }
        }
    }
}