﻿using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Threading;
using System.Threading.Tasks;

namespace Kaspersky.Test.RecursiveLinkLogger
{
    /// <summary>
    /// Async link searcher
    /// </summary>
    public class AsyncLinkLogger<TLinkExtractor> where TLinkExtractor : ILinkExtractor, new()
    {
        /// <summary>
        /// The resulting set of links. We need to collect them to prevent cross-references
        /// </summary>
        private ConcurrentDictionary<string, string> _links;

        /// <summary>
        /// A base URL part to be applied to all found links
        /// </summary>
        private readonly string _baseUrl;

        /// <summary>
        /// An object, that should be used for extracting links from a string
        /// </summary>
        private readonly ILinkExtractor _linkExtractor = new TLinkExtractor();

        /// <summary>
        /// A CancellationTokenSource to cancel all the started tasks
        /// </summary>
        private CancellationTokenSource _cancellationTokenSource;

        private const int MaxConcurrentWebClients = 2;
        /// <summary>
        /// A Semaphore for limiting the number of concurrently opened WebClients (to limit memory usage)
        /// </summary>
        private readonly SemaphoreSlim _webClientSemaphore = new SemaphoreSlim(MaxConcurrentWebClients, MaxConcurrentWebClients);

        /// <summary>
        /// This event will fire, when all RecursiveLogLinks() calls are through
        /// </summary>
        private CountdownEvent _finishedEvent;

        /// <summary>
        /// Fires a 'link found' event
        /// </summary>
        /// <param name="link"></param>
        private void FireLinkFoundEvent(string link)
        {
            var handler = this.LinkFoundEvent;
            if (handler != null)
            {
                handler(link);
            }
        }

        /// <summary>
        /// Fires an ExceptionEvent
        /// </summary>
        /// <param name="ex"></param>
        private void FireExceptionEvent(Exception ex)
        {
            var handler = this.ExceptioEvent;
            if (handler != null)
            {
                handler(ex);
            }
        }

        /// <summary>
        /// Recursively and asynchronously searches for URLs.
        /// </summary>
        /// <param name="url">URL to start with</param>  
        private async void RecursiveLogLinks(string url)
        {
            try
            {
                this._finishedEvent.AddCount();

                // should not proceed, if it was cancelled
                this._cancellationTokenSource.Token.ThrowIfCancellationRequested();

                // Limiting the number of concurrently opened WebClients with SemaphoreSlim
                await this._webClientSemaphore.WaitAsync();

                // Console.WriteLine(">>>" + url);

                // downloading the page
                var client = new WebClient();

                // this seems to be a bad idea, because the Token will hold a reference to this client forever
                //this._cancellationTokenSource.Token.Register(client.CancelAsync);

                // Add a user agent header in case the requested URI contains a query
                client.Headers.Add("user-agent", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705;)");

                string html;
                try
                {
                    // should not proceed, if it was cancelled
                    this._cancellationTokenSource.Token.ThrowIfCancellationRequested();

                    html = await client.DownloadStringTaskAsync(this._baseUrl + url);
                }
                finally
                {
                    this._webClientSemaphore.Release();
                    client.Dispose();
                }
                // Console.WriteLine("<<<" + url);

                foreach
                (
                    var curUrl in this._linkExtractor.ExtractLinksFromString(html)
                    // preventing cross-references
                    //.Where(curUrl => this._links.TryAdd(curUrl.ToLower(), curUrl))
                )
                {
                    this.FireLinkFoundEvent(curUrl);

                    // recursively getting links
                    this.RecursiveLogLinks(curUrl);
                }
            }
            catch (Exception ex)
            {
                this.FireExceptionEvent(ex);
            }

            // The CountDownEvent should fire, when all tasks are finished
            this._finishedEvent.Signal();
        }


/*
        /// <summary>
        /// Recursively and asynchronously searches for URLs.
        /// Returns a task, that is finished only when all recursive child tasks are finished.
        /// </summary>
        /// <param name="url">URL to start with</param>  
        private async Task RecursiveLogLinks(string url)
        {
            try
            {
                // should not proceed, if it was cancelled
                this._cancellationTokenSource.Token.ThrowIfCancellationRequested();

                // downloading the page
                var client = new WebClient();

                // this is to be able to cancel the request
                this._cancellationTokenSource.Token.Register(client.CancelAsync);

                // Add a user agent header in case the requested URI contains a query
                client.Headers.Add("user-agent", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705;)");

                string html;

                // Limiting the number of concurrently opened WebClients with SemaphoreSlim
                await this._webClientSemaphore.WaitAsync();

//                Console.WriteLine(">>>" + url);
                try
                {
                    // should not proceed, if it was cancelled
                    this._cancellationTokenSource.Token.ThrowIfCancellationRequested();

                    html = await client.DownloadStringTaskAsync(this._baseUrl + url);
                }
                finally
                {
                    this._webClientSemaphore.Release();
                    client.Dispose();
                }
//                Console.WriteLine("<<<" + url);

                // running child tasks in parallel, but awaiting all of them
                var childTasks = this._linkExtractor.ExtractLinksFromString(html)
                    // preventing cross-references
                    .Where(curUrl => this._links.TryAdd(curUrl.ToLower(), curUrl))
                    .Select(curUrl =>
                    {
                        this.FireLinkFoundEvent(curUrl);

                        // recursively getting links 
                        return this.RecursiveLogLinks(curUrl);
                    });

                await Task.WhenAll(childTasks);
            }
            catch (Exception ex)
            {
                this.FireExceptionEvent(ex);
            }
        }
*/

/*      
        // the bad thing here is that all tasks are attached to parent, that is, collected in memory
        private Task RecursiveLogLinks(string url)
        {
            // downloading the page
            var client = new WebClient();

            // this is to be able to cancel the request
            this._cancellationTokenSource.Token.Register(client.CancelAsync);

            // Add a user agent header in case the requested URI contains a query
            client.Headers.Add("user-agent", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705;)");

            // Limiting the number of concurrently opened WebClients with SemaphoreSlim
            return this._webClientSemaphore.WaitAsync().ContinueWith(_ =>
            {
                Task<string> downloadTask;

                // During the next two steps an exception might occur, and we need to release the semaphore in that case
                try
                {
                    this._cancellationTokenSource.Token.ThrowIfCancellationRequested();

                    downloadTask = client.DownloadStringTaskAsync(this._baseUrl + url);
                }
                catch (Exception)
                {
                    this._webClientSemaphore.Release();
                    client.Dispose();

                    throw;
                }

                downloadTask.ContinueWith(htmlTask =>
                {
                    this._webClientSemaphore.Release();
                    client.Dispose();

                    // possible exceptions in htmlTask will be rethrown here
                    foreach (var curUrl in this._linkExtractor.ExtractLinksFromString(htmlTask.Result))
                    {
                        // preventing cross-references
                        if (!this._links.TryAdd(curUrl.ToLower(), curUrl))
                        {
                            continue;
                        }

                        this.FireLinkFoundEvent(curUrl);

                        // recursively getting links 
                        this.RecursiveLogLinks(curUrl);
                    }
                },
                TaskContinuationOptions.AttachedToParent); // This continuation should be attached to parent, because we want to be able to wait for the whole process to be finished
            },
            TaskContinuationOptions.AttachedToParent); // This continuation should be attached to parent, because we want to be able to wait for the whole process to be finished
        }
*/       
/* 
        // a version with synchronous wait for Semaphore
        private Task RecursiveLogLinks(string url)
        {
            this._cancellationTokenSource.Token.ThrowIfCancellationRequested();

            // downloading the page
            var client = new WebClient();

            // this is to be able to cancel the request
            this._cancellationTokenSource.Token.Register(client.CancelAsync);

            // Add a user agent header in case the requested URI contains a query
            client.Headers.Add("user-agent", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705;)");

            // Limiting the number of concurrently opened WebClients
            this._webClientSemaphore.Wait();

            var downloadTask = client.DownloadStringTaskAsync(this._baseUrl + url);

            return downloadTask.ContinueWith(htmlTask =>
            {
                this._webClientSemaphore.Release();
                client.Dispose();

                // possible exceptions in htmlTask will be rethrown here
                foreach (var curUrl in this._linkExtractor.ExtractLinksFromString(htmlTask.Result))
                {
                    // preventing cross-references
                    if (!this._links.TryAdd(curUrl.ToLower(), curUrl))
                    {
                        continue;
                    }

                    this.FireLinkFoundEvent(curUrl);

                    // recursively getting links 
                    this.RecursiveLogLinks(curUrl);
                }
            }
            , TaskContinuationOptions.AttachedToParent); // This continuation should be attached to parent, because we want to be able to wait for the whole process to be finished
        }
*/
        /// <summary>
        /// An empty ctor
        /// </summary>
        public AsyncLinkLogger()
        {
        }

        /// <summary>
        /// A ctor, that allows to specify a base URL part for all URLs to be opened
        /// </summary>
        /// <param name="baseUrl">A base URL part</param>
        public AsyncLinkLogger(string baseUrl)
        {
            if (baseUrl == null)
            {
                throw new ArgumentNullException("baseUrl");
            }
            this._baseUrl = baseUrl;
        }

        /// <summary>
        /// Recursively searches for URLs on a page by running a bunch of Tasks.
        /// Shouldn't be called from different threads in parallel.
        /// </summary>
        /// <param name="url">URL to start with</param>
        public Task LogLinks(string url)
        {
            if (url == null)
            {
                throw new ArgumentNullException("url");
            }

            // this method should be called only once
            if (this._finishedEvent != null)
            {
                throw new InvalidOperationException("LogLinks() shouldn't be called twice!");
            }

            this._finishedEvent = new CountdownEvent(1);

            this._cancellationTokenSource = new CancellationTokenSource();
            this._links = new ConcurrentDictionary<string, string>();

            this.RecursiveLogLinks(url);

            // returning the task, that is only finished when all RecursiveLogLinks() calls are through
            return Task.Run(() =>
            {
                this._finishedEvent.Signal();
                this._finishedEvent.Wait();
                this._finishedEvent.Dispose();
                this._finishedEvent = null;
            });
        }

        public void Cancel()
        {
            if (this._cancellationTokenSource == null)
            {
                throw new InvalidOperationException("Link logging wasn't started, so it cannot be cancelled!");
            }

            this._cancellationTokenSource.Cancel();
        }

        /// <summary>
        /// Fired when a link is found by LogLinks()
        /// </summary>
        public event Action<string> LinkFoundEvent;

        /// <summary>
        /// Fired when an exception occurs
        /// </summary>
        public event Action<Exception> ExceptioEvent;
    }
}
