﻿using System;
using System.Data;
using MyClever.Lib;
using System.Xml;
using System.Collections.Generic;
using HtmlAgilityPack;

namespace MyClever.DefaultPlugins
{
    public class ParseHTML : MissionPlugin
    {
        [Package(Direction.Input, "Defines the websites you like to scrape.")]
        enum Websites
        {
            [Column(typeof(string), "Which website you like to scrape?")]
            URL
        }

        [Package(Direction.Input, "Defines the scraping rules.")]
        enum ScrapingRules
        {
            [Column(typeof(string), "Defines the XPath you like to use to scrape the website.")]
            XPathCode
        }

        [Package(Direction.Output, "Defines the extracted data from the given websites.")]
        enum ExtractedData
        {
            [Column(typeof(int), "Defines the XPath parent Node Line.")]
            ParentNodeLine,
            [Column(typeof(string), "Defines a comma separated list of all node attributes.")]
            AttributeList,
            [Column(typeof(string), "Defines extracted data.")]
            ExtractedText
        }

        [Plugin(PluginDevStatus.Production, "This plugin use XPath to extract data from websites.")]
        public ParseHTML()
        {
        }

        public override void Initialize()
        {

            //This is the first Function called by the host...
            //Put anything needed to start with here first
        }

        public override void Dispose()
        {
            //Put any cleanup code in here for when the program is stopped
        }

        /// <summary>
        /// Generate new Source-content.
        /// </summary>
        protected override void Work()
        {
            DataTableReader websitesPackage = this.GetDataReader(typeof(Websites));
            while (websitesPackage.Read())
            {
                this.AddNewLoggingMessage(typeof(Websites), String.Format("load website by URL {0}", websitesPackage[Websites.URL.ToString()].ToString()));

                HtmlDocument document = new HtmlWeb().Load(websitesPackage[Websites.URL.ToString()].ToString());

                DataTableReader scrapingRulesPackage = this.GetDataReader(typeof(ScrapingRules));
                while (scrapingRulesPackage.Read())
                {
                    this.AddNewLoggingMessage(typeof(ScrapingRules), String.Format("start scraping with XPath Code: {0}", scrapingRulesPackage[ScrapingRules.XPathCode.ToString()].ToString()));

                    foreach (HtmlNode link in document.DocumentNode.SelectNodes(scrapingRulesPackage[ScrapingRules.XPathCode.ToString()].ToString()))
                    {
                        // prepare Attribute list
                        List<string> attributes = new List<string>();
                        foreach (HtmlAttribute attributeOn in link.Attributes)
                        {
                            attributes.Add(attributeOn.Name + ":" + attributeOn.Value);
                        }
                        
                        // save scraped data
                        DataRow newRow = this.CreateNewRowFromPackage(typeof(ExtractedData));
                        newRow[ExtractedData.ParentNodeLine.ToString()] = link.ParentNode.Line.ToString();
                        newRow[ExtractedData.AttributeList.ToString()] = String.Join(",", attributes.ToArray());
                        newRow[ExtractedData.ExtractedText.ToString()] = link.InnerText.Trim();
                        this.AddNewRowToPackage(typeof(ExtractedData), newRow);  
                    }
                }
            }
        }
    }
}
