using System;
using System.Collections.Generic;
using System.Text;
using System.Text.RegularExpressions;

using Jeffrey.XFramework;
using Jeffrey.XFramework.XCrawl;

namespace Jeffrey.XProcess
{
    /*
     *ExtractUri without image links...
     */
    public class ExtractUri : Extractor
    {
        /*
         *Stands for the PHP pages session id... 
         */
        //private static readonly String PHPSESSID_UPPER = "PHPSESSID=";
        /*"in this pattern: [^'\"\\s<>?]*
         *exclude '?' may not fetch the active pages..."
         *has been modified.
         */

        public ExtractUri(String name, String description, String classname, String path)
            : base(name, description, classname, path) { }
        public ExtractUri(String name, String description, String classname, String path, Filter filter)
            : base(name, description, classname, path, filter) { }

        protected override void Extract(CrawlUri curi)
        {
            if (curi.Content == String.Empty)
                return;

            //validate the http
            Regex validater = new Regex(HTTP_URI_VALIDATE_PATTERN, RegexOptions.IgnoreCase);
            //to exclude the MIME and other pages...
            Regex extension_reg = new Regex(HTML_EXTENSIONS, RegexOptions.IgnoreCase);

            //to exctract urls...in <a href..></a> pattern...
            Regex reg = new Regex(HTTP_URI_EX_EXTRACT_PATTERN, RegexOptions.IgnoreCase);

            MatchCollection mcol = reg.Matches(curi.Content);

            String Httplink = null;

            foreach (Match match in mcol)
            {
                try
                {
                    CrawlUri uri = new CrawlUri(curi, match.Groups["uri"].Value);

                    //normalization makes sure that the directory ends with slash...
                    Httplink = CrawlToolKits.UrlNormalization(uri);

                    if ( validater.Match(Httplink).Success &&
                          //is a directory ?
                         (Httplink.EndsWith("/") ||
                          //not directory, is a html extension page ?
                         extension_reg.Match(uri.AbsolutePath).Success
                         )
                       )
                       curi.NormalLinks.Add(Httplink);
                }
                catch (UriFormatException ufe)
                {
                    Logger.LogError(ufe, "occurred when create CrawUri using " + match.Value);
                }
            }

            /*
             *try to find the links that do not in <a href..></a> pattern.
             *but exclude the image links...
             */
            reg = new Regex(HTTP_URI_EXTRACT_PATTERN, RegexOptions.IgnoreCase);
            mcol = reg.Matches(curi.Content);

            foreach (Match match in mcol)
            {
                try
                {
                    CrawlUri uri = new CrawlUri(curi, match.Value);

                    //normalization makes sure that the directory ends with slash...
                    Httplink = CrawlToolKits.UrlNormalization(uri);

                    if (  //is a directory ?
                         (Httplink.EndsWith("/") ||
                          //not directory, is a html extension page ?
                          extension_reg.Match(uri.AbsolutePath).Success
                          ) &&
                         !curi.NormalLinks.Contains(Httplink)
                        )
                        curi.NormalLinks.Add(Httplink);
                }
                catch (UriFormatException ufe)
                {
                    Logger.LogError(ufe, "occurred when create CrawUri using " + match.Value);
                }
            }
        }
    }
}
