﻿#region Header

#region License
/*
Copyright (c) 2010, G.W. van der Vegt
All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided 
that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this list of conditions and the 
  following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and 
  the following disclaimer in the documentation and/or other materials provided with the distribution.

* Neither the name of G.W. van der Vegt nor the names of its contributors may be 
  used to endorse or promote products derived from this software without specific prior written 
  permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY 
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 
THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF 
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
*/
#endregion License

#region Changelog

//----------   ---   -------------------------------------------------------------------------------
//Purpose:           Latent Semantic Analysis (Lsa) Engine
//By:                G.W. van der Vegt (wim.vandervegt@ou.nl)
//Url:               http://lsa.codeplex.com
//Depends:           Swiss.IniFile
//                   DotNetZip Library
//License:           New BSD License
//----------   ---   -------------------------------------------------------------------------------
//dd-mm-yyyy - who - description
//----------   ---   -------------------------------------------------------------------------------
//02-02-2010 - veg - Created.
//                 * Added Index() (Output format is an IniFile).
//                 - Added Zip().
//                 - Added NonAlpha().
//                 - Added Spaces().
//                 - Added Hyphens().
//                 * Added WordLength().
//                 - Added methods for retrieving the DescriptionAttribute and Method.Name 
//                   for zipfile Comment.
//                 - Added StopList().
//04-02-2010 - veg - Mapped most methods sofar onto a small routines that call ZipToZip with a 
//                   delegate performing the actual processing and taking optional parameters.
//                 - Added Profiling.
//                 - Added Terms().
//05-02-2010 - veg - Added Occurences().
//                 - Fixed some bugs in Hypens and other workers (did return result without modifying it).
//                 - Removed RegEx from wordlength as it did not work properly on words like 'zuid-azie'.
//                   Replaced this code by an inplace shift of works in the ziparray variable 
//                   together with an array resize.
//07-02-2010 - veg - Restructured and renamed solution. This might cause some nasty problems!
//                 - Changed output format of Index() back to index_documents.ls as we need it down the line.
//                 - Added TermMatrix().
//                 - Added Jobs.ini to output containing saved values for some properties we need frequently.
//                 * Changed output format of Index() so the document index is one based like the rest of the output.
//09-02-2010 - veg - Debugged the Decompose() routine.
//                 - Swapped legacy svd call for zero base svd routine.
//                 - Removed Rows/Columns parameters from SetAs1/2DArray() routines.
//24-03-2010 - veg - Split licences. 
//----------   ---   -------------------------------------------------------------------------------
//TODO         veg + Importers (disk/ftp/nntp/www).
//                 + Integrate Windows Search Engine for Plain text Extraction.
//                 ? Look into \r\n pattern in Hyphens.
//                 + Create a MinMax Class/Struct with InRange member (see Occurences).
//                 * StringBuilder Fails om large files. Write Directly to file!
//----------   ---   -------------------------------------------------------------------------------
//NOTE             - The Changelog Items with a * differ (a bit) from the php_lsa implementation.
//                 - If NonAlpha()'s pattern contains a Hyphen, the Hyphen routine will fail to 
//                   concat hyphenated words.
//                 - Spaces() does not remove leading spaces like php_lsa does.
//                 - WordLength() already removes all linebreaks from the input (as words are copied 
//                   one by one into an array). Works with Regex Limiting Repetition.
//                   See: http://www.regular-expressions.info/repeat.html.
//NOTE             - There seem to be some bugs in the php_lsa version around cleaning up the text.
//                   In the aap corpus we still see some words that are on the stoplist ('aan') 
//                   and the derivates of 'zuidoost-azie' also differ.
//                   For easy comparisment we should sort the 'terms_all.ls' on both term (alpha) and 
//                   document (numeric)!
//
//BACKPORT         * !!! Port test suite to Delphi/php !!!
//                 * One based document index
//                 * Port new occurencs to php_lsa (single pass for term_occurence/document_occurence ).
//                 * One pass/inplace Occurences().
//                 * Check Hypens code to be equivalent.
//----------   ---   -------------------------------------------------------------------------------

#endregion Changelog

#endregion Header

namespace Lsa.NET
{
    using System;
    using System.Collections.Generic;
    using System.Collections.Specialized;
    using System.ComponentModel;
    using System.Diagnostics;
    using System.IO;
    using System.Reflection;
    using System.Text;
    using System.Text.RegularExpressions;
    using alglib;
    using Ionic.Utils.Zip;
    using Swiss;
    using TermDocId = System.Collections.Generic.KeyValuePair<string, int>;
    //using TermDocId = System.Collections.Generic.KeyValuePair<string, int>;


    /*
     
       $lsa->disk(GetBaseDir($job), "*.pdf;*.doc;*.ppt", false);
            $lsa->clean("*.zip");
            $lsa->clean("*.ls");
            $lsa->clean("*.hb");
            $lsa->clean("*.mm");
            $lsa->clean("*.ns");
            $lsa->clean("*.out");
            $lsa->clean("*.csv");
            $lsa->clean("*.lsa");
     x $lsa->index("*.txt");
     x $lsa->zip($lsa->basedir, "*.lsa");

     x $lsa->nonalpha(".,!?':()[];=");
     x $lsa->spaces();
     x $lsa->hyphens();
     x $lsa->wordlength($minword,$maxword);
     x $lsa->stoplist($stop);

     x $lsa->terms();
     x $lsa->occurrences(-1,-1, -1,-1);
     //$lsa->countterms();      OBSOLETE
     //$lsa->nonzerocount();    OBSOLETE

     x $lsa->termmatrix();
       $lsa->weight($weighting);
     x $lsa->decompose($engine);
            $lsa->clean("*.lsa");
     x      $tmprank = $lsa->estimaterank(-1,-1,90);
       $lsa->reduce($minrank,$maxrank);
       $lsa->reducedspace();
     
    */

    public class Lsa : IDisposable
    {

        #region Fields

        /// <summary>
        /// The directory where temporary files are stored.
        /// </summary>
        public String BaseDir = ".";

        /// <summary>
        /// Used for numbering zipfiles and other output.
        /// </summary>
        public static Int32 StepCount = 0;

        #endregion Fields

        #region Constructors

        /// <summary>
        /// The constructor misues an IniFile to sets the BaseDir to a subdirectory of %APPDATA%.
        /// </summary>
        public Lsa()
        {
            //TODO: Eliminate Side Effect of setting the BaseDir.
            using (IniFile ini = new IniFile("job.ini"))
            {
                BaseDir = Path.GetDirectoryName(ini.FileName);
            }
        }

        #endregion Constructors

        #region Worker Handling

        /// <summary>
        /// Prototype of a Worker Method
        /// </summary>
        /// <param name="zipcontent">The content to process</param>
        /// <param name="vars">Optional parameters</param>
        /// <returns>The processed content</returns>
        internal delegate String Lsa_Worker(String zipcontent, params Object[] vars);

        /// <summary>
        /// Performs an operation between two ZipFiles (Numbered with StepCount and StepCount+1).
        /// </summary>
        /// <param name="worker">The Action to perform</param>
        /// <param name="vars">Optional Paramaters needed for the Action</param>
        [Description("Wrapper for Zip to Zip File operations.")]
        internal void ZipToZip(Lsa_Worker worker, params Object[] vars)
        {
            using (new Profiling(this))
            {
                String inzipname = ZipStepFileName;

                StepCount++;

                String outzipname = ZipStepFileName;

                if (File.Exists(outzipname))
                {
                    File.Delete(outzipname);
                }

                using (ZipFile outzip = new ZipFile(outzipname))
                {

                    using (ZipFile zip = new ZipFile(inzipname))
                    {
                        foreach (ZipEntry ze in zip)
                        {
                            //Read Content into String
                            String zipcontent;

                            using (Stream zipstream = new MemoryStream())
                            {
                                ze.Extract(zipstream);

                                zipstream.Seek(0, SeekOrigin.Begin);

                                using (StreamReader reader = new StreamReader(zipstream))
                                {
                                    zipcontent = reader.ReadToEnd();
                                }
                            }

                            {
                                //Perform Operation
                                zipcontent = worker(zipcontent, vars);
                            }

                            //Zip Cleaned Content.
                            String dir = Path.GetDirectoryName(ze.FileName);
                            String file = Path.GetFileName(ze.FileName);

                            outzip.AddStringAsFile(zipcontent, file, dir);
                        }

                        outzip.Comment = worker.Method.Name;

                        outzip.Save();
                    }
                }
            }
        }

        #endregion Worker Handling

        #region Properties

        /// <summary>
        /// Creates a ZipFileName based on the StepCount.
        /// </summary>
        public String ZipStepFileName
        {
            get { return Path.Combine(BaseDir, String.Format("Step_{0:000}.zip", StepCount)); }
        }

        #endregion Properties

        #region Methods

        /// <summary>
        /// Dispose.
        /// </summary>
        public void Dispose()
        {
            //Nothing yet
        }

        /// <summary>
        /// Returns the value of the callers DescriptionAttribute.
        /// </summary>
        /// <returns>The value of the callers DescriptionAttribute</returns>
        private String GetMethodDescription()
        {
            StackFrame fr = new StackFrame(1);
            MethodBase mb = fr.GetMethod();

            DescriptionAttribute da = (DescriptionAttribute)this.GetType().GetMethod(mb.Name).GetCustomAttributes(typeof(DescriptionAttribute), false)[0];

            if (da == null)
            {
                return "Error";
            }

            return da.Description;
        }

        /// <summary>
        /// Retunrs the value of the callers method name.
        /// </summary>
        /// <returns>The callers methods name</returns>
        private String GetMethodName()
        {
            StackFrame fr = new StackFrame(1);
            MethodBase mb = fr.GetMethod();

            return mb.Name;
        }

        #endregion Methods

        #region Lsa Workers

        /// <summary>
        /// Removes non alpha characters specified in the pattern.
        /// 
        /// NOTE: Do not specify a Hyphen character as part of the Pattern or 
        ///       the Hyphens method will fail to restore hyphenated words.
        /// <param name="zipcontent">The content to process</param>
        /// <param name="vars">The characters to remove (typically ".,!?':()[];=")</param>
        /// <returns>The processed content</returns>
        internal String NonAlpha_Worker(String zipcontent, params Object[] vars)
        {
            String Result = zipcontent;

            if (vars.Length == 1)
            {
                String Pattern = (String)vars[0];

                {//1
                    //Remove Characters in pattern.
                    String[] ziparray = Result.Split(Pattern.ToCharArray()/*, StringSplitOptions.RemoveEmptyEntries*/);
                    Result = String.Concat(ziparray);
                }

                {//2
                    //Collapse double spaces AND remove trailing spaces.
                    String[] ziparray = Result.Split(" ".ToCharArray(), StringSplitOptions.RemoveEmptyEntries);
                    Result = String.Join(" ", ziparray);
                }
            }

            return Result;
        }

        /// <summary>
        /// Removes trailing spaces of each line.
        /// </summary>
        /// <param name="zipcontent">The content to process</param>
        /// <param name="vars">Empty</param>
        /// <returns>The processed content</returns>
        internal String Spaces_Worker(String zipcontent, params Object[] vars)
        {
            String Result = zipcontent;

            if (vars.Length == 0)
            {
                Result = zipcontent.TrimEnd(' ');
            }

            return Result;
        }

        /// <summary>
        /// Removes Trailing Hyphens and Hyphens within the text.
        /// </summary>
        /// <param name="zipcontent">The content to process</param>
        /// <param name="vars">Empty</param>
        /// <returns>The processed content</returns>
        internal String Hyphens_Worker(String zipcontent, params Object[] vars)
        {
            String Result = zipcontent;

            if (vars.Length == 0)
            {
                char[] delimiters = new char[] { '\r', '\n' };
                {//1
                    //Remove trailing Hyphens 
                    //for example: 
                    //  'col-'
                    //  'lapse'             -> col/lapse
                    //NOTE: Works for Windows Delimited Lines (crlf)!
                    String[] ziparray = Regex.Split(Result, @"\w-\r\n", RegexOptions.Multiline);
                    Result = String.Join("", ziparray);
                }

                {//2
                    //Remove rest of Hyphens
                    //for example: 
                    //  'train- and car'    -> train and car
                    //  'train - car'       -> train car
                    //  'pinche-aapjes'     -> pinche-aapjes
                    String[] ziparray = Regex.Split(Result, @"\W*\s?-\s+\W*");
                    Result = String.Join(" ", ziparray);
                }
            }

            return Result;
        }

        /// <summary>
        /// Removes all words outside the given min/max length boundaries.
        /// 
        /// NOTE: WordLength() already removes all linebreaks from the input (as words are copied 
        ///        one by one into an array). Works with Regex Limiting Repetition.
        /// <see cref="http://www.regular-expressions.info/repeat.html"/>
        /// </summary>
        /// <param name="min">Minimum character length</param>
        /// <param name="max">Maximum character length</param>
        internal String WordLength_Worker(String zipcontent, params Object[] vars)
        {
            String Result = zipcontent;

            if (vars.Length == 2 && vars[0] is Int32 && vars[1] is Int32)
            {
                //Int32 min = (Int32)vars[0];
                //Int32 max = (Int32)vars[1];

                {//1
                    //Only Match words between two sizes.
                    //NOTE : \b = word coundary, without it we get truncated words matches!
                    //String pattern = @"\b(\w{" + min.ToString() + "," + max.ToString() + @"})\b";

                    //MatchCollection matches = Regex.Matches(Result, pattern, RegexOptions.Multiline);

                    //String[] ziparray = new String[matches.Count];

                    //for (Int32 r = 0; r < matches.Count; r++)
                    //{
                    //    ziparray[r] = matches[r].Value;
                    //}

                    //Result = String.Join(" ", ziparray);

                    Char[] delimiter = { ' ', '\r', '\n' };

                    MinMax<Int32> bounds = new MinMax<Int32>((Int32)vars[0], (Int32)vars[1]);

                    String[] ziparray = Result.Split(delimiter, StringSplitOptions.RemoveEmptyEntries);
                    StringBuilder sb = new StringBuilder();

                    Int32 cnt = 0;
                    for (Int32 i = 0; i < ziparray.Length; i++)
                    {
                        if (bounds.InBewteen(ziparray[i].Length))
                        {
                            ziparray[cnt] = ziparray[i];
                            cnt++;
                        }
                    }
                    Array.Resize<String>(ref ziparray, cnt);
                    Result = String.Join(" ", ziparray);
                }
            }

            return Result;
        }

        /// <summary>
        /// Removes words from the content that are present in the stoplist.
        /// 
        /// NOTE: The stoplist parameter should be a StringCollection!
        /// </summary>
        /// <param name="zipcontent">The content to process</param>
        /// <param name="vars">The Stoplist</param>
        /// <returns>The processed content</returns>
        internal String StopList_Worker(String zipcontent, params Object[] vars)
        {
            String Result = zipcontent;

            //TODO Check ParamCount!
            if (vars.Length == 1 && vars[0] is StringCollection)
            {
                StringCollection stoplist = (StringCollection)vars[0];

                {//1
                    //Split into words and remove all that occur in the stoplist.

                    Char[] delimiter = { '\t', ' ', '\r', '\n' };

                    String[] ziparray = Result.Split(delimiter, StringSplitOptions.RemoveEmptyEntries);

                    //Regex.Match(zipcontent, @"\b(\w+)\b");

                    for (Int32 i = 0; i < ziparray.Length; i++)
                    {
                        if (stoplist.Contains(ziparray[i].ToUpper()))
                        {
                            ziparray[i] = "";
                        }
                    }

                    Result = String.Join(" ", ziparray);
                }

                {//2
                    //Clean up extra spaces caused by above code.
                    Char[] delimiter = { ' ' };

                    String[] ziparray = Result.Split(delimiter, StringSplitOptions.RemoveEmptyEntries);

                    Result = String.Join(" ", ziparray);
                }
            }

            return Result;
        }

        #endregion Lsa Workers

        #region Lsa Exports

        /// <summary>
        /// Creates an index of files found by the specified path and wildcard filename (for example c:\temp\*.txt).
        /// </summary>
        /// <param name="FileName">A valid path containing a wildcard filename</param>
        public void Index(String FileName)
        {
            using (new Profiling(this))
            {
                //Doccnt differs from php_lsa version that is 0 based. But as all counting is done
                //one based this is easier for lookup. 
                //
                //NOTE: In the Sparse TxD matrix it's zero indexes based again (for terms and documents).
                //
                Int32 doccnt = 1;

                String directory = Path.GetDirectoryName(FileName);
                String wildcard = Path.GetFileName(FileName);

                if (!directory.EndsWith(Path.DirectorySeparatorChar.ToString()))
                {
                    directory += Path.DirectorySeparatorChar;
                }

                //Search recusively
                StringBuilder sb = new StringBuilder();
                foreach (String file in Directory.GetFiles(directory, wildcard, SearchOption.AllDirectories))
                {
                    sb.AppendLine(String.Format("{0}={1}", doccnt++, file));
                }
                File.WriteAllText(Path.Combine(BaseDir, "index_documents.ls"), sb.ToString());

                using (IniFile ini = new IniFile("job.ini"))
                {
                    ini.Clear();

                    ini.WriteTime("Job", "Start", DateTime.Now);

                    ini.WriteString("build", "path", directory);
                    ini.WriteString("build", "wildcard", wildcard);
                    ini.WriteInteger("build", "documents", doccnt);

                    ini.UpdateFile();
                }
            }
        }

        /// <summary>
        /// Zips all files found by the specified path and wildcard filename (for example c:\temp\*.txt).
        /// 
        /// Leaves zipped files on disk.
        /// </summary>
        /// <param name="FileName">A valid path containing a wildcard filename</param>
        [Description("Overloaded Zip() method does not remove imported files.")]
        public void Zip(String FileName)
        {
            using (new Profiling(this))
            {
                Zip(FileName, true);
            }
        }

        /// <summary>
        /// Zips all files found by the specified path and wildcard filename (for example c:\temp\*.txt).
        /// </summary>
        /// <param name="FileName">A valid path containing a wildcard filename</param>
        /// <param name="leave">If true, zipped files are not removed from disk</param>
        public void Zip(String FileName, Boolean leave)
        {
            using (new Profiling(this))
            {
                String outzipname = ZipStepFileName;

                String dir = Path.GetDirectoryName(FileName);
                String pat = Path.GetFileName(FileName);

                if (!dir.EndsWith("" + Path.DirectorySeparatorChar.ToString()))
                {
                    dir += Path.DirectorySeparatorChar;
                }

                if (File.Exists(outzipname))
                {
                    File.Delete(outzipname);
                }

                using (ZipFile outzip = new ZipFile(outzipname))
                {
                    //Search recusively
                    foreach (String file in Directory.GetFiles(dir, pat, SearchOption.AllDirectories))
                    {
                        String relfile = file.Replace(dir, "");
                        String reldir = Path.GetDirectoryName(relfile);
                        outzip.AddFile(file, reldir);
                    }

                    outzip.Comment = GetMethodName();

                    outzip.Save();
                }
            }
        }

        public void Nonalpha(String Pattern)
        {
            using (new Profiling(this))
            {
                ZipToZip(NonAlpha_Worker, new Object[] { Pattern });
            }
        }

        public void Spaces()
        {
            using (new Profiling(this))
            {
                ZipToZip(Spaces_Worker, new Object[] { });
            }
        }

        public void Hyphens()
        {
            using (new Profiling(this))
            {
                ZipToZip(Hyphens_Worker, new Object[] { });
            }
        }

        public void WordLength(Int32 min, Int32 max)
        {
            using (new Profiling(this))
            {
                ZipToZip(WordLength_Worker, new Object[] { min, max });
            }
        }

        public void StopList(String FileName)
        {
            using (new Profiling(this))
            {
                //Fetch the Stoplist
                StringCollection stoplist = new StringCollection();

                stoplist.AddRange(File.ReadAllLines(FileName));

                //Uppercase Stoplist!
                for (Int32 i = 0; i < stoplist.Count; i++)
                {
                    stoplist[i] = stoplist[i].ToUpper();
                }

                ZipToZip(StopList_Worker, new Object[] { stoplist });
            }
        }

        public void Terms()
        {
            using (new Profiling(this))
            {
                Int32 docID = 0;

                String inzipname = ZipStepFileName;

                List<TermDocId> all_terms = new List<TermDocId>();

                using (ZipFile zip = new ZipFile(inzipname))
                {
                    foreach (ZipEntry ze in zip)
                    {
                        //Read Content into String
                        String zipcontent;

                        using (Stream zipstream = new MemoryStream())
                        {
                            ze.Extract(zipstream);

                            zipstream.Seek(0, SeekOrigin.Begin);

                            using (StreamReader reader = new StreamReader(zipstream))
                            {
                                zipcontent = reader.ReadToEnd();
                            }
                        }

                        {//1
                            docID++;

                            //@"\w+"; Matches all words.
                            //@"\b[A-Z,a-z][A-Z,a-z,0-9]*\b"; Matches all words that start with a Letter.

                            MatchCollection matches = Regex.Matches(zipcontent.ToUpper(), @"\b[A-Z][A-Z,0-9,\-]*\b", RegexOptions.Multiline);

                            String[] ziparray = new String[matches.Count];

                            for (Int32 i = 0; i < matches.Count; i++)
                            {
                                all_terms.Add(new TermDocId(matches[i].Value, docID));
                            }
                        }
                    }
                }

                all_terms.Sort(CompareKeys);

                //php_sa Compatible Output
                StringBuilder sb = new StringBuilder();

                foreach (TermDocId tdi in all_terms)
                {
                    sb.AppendLine(String.Format("{0}={1}", tdi.Key, tdi.Value));
                }
                File.WriteAllText(Path.Combine(BaseDir, "term_all.ls"), sb.ToString());
            }
        }

        private static int CompareKeys(TermDocId x, TermDocId y)
        {
            int keys = x.Key.CompareTo(y.Key);

            if (keys == 0)
            {
                return x.Value.CompareTo(y.Value);
            }

            return keys;
        }

        public void Occurences(Int32 minDoc, Int32 maxDoc, Int32 minGlobal, Int32 maxGlobal)
        {
            using (new Profiling(this))
            {
                MinMax<Int32> doc = new MinMax<Int32>(minDoc == -1 ? 0 : minDoc, maxDoc == -1 ? Int32.MaxValue : maxDoc);
                MinMax<Int32> glo = new MinMax<Int32>(minGlobal == -1 ? 0 : minGlobal, maxGlobal == -1 ? Int32.MaxValue : maxGlobal);

                String infilename = Path.Combine(BaseDir, "term_all.ls");

                Dictionary<TermDocId, Int32> count_terms = new Dictionary<TermDocId, Int32>();

                String[] lines = File.ReadAllLines(infilename);

                String lastline = null;
                TermDocId tdi = default(TermDocId);
                Int32 count = 0;
                String[] kvp = null;

                //Walk all lines from term_all.ls!
                foreach (String line in lines)
                {
                    kvp = line.Split('=');

                    if (kvp.Length == 2)
                    {
                        //We assume the term_all.ls list to be sorted on both key and value (See Terms()).
                        if (line != lastline)
                        {
                            if (count != 0)
                            {
                                count_terms.Add(tdi, count);
                            }
                            tdi = new TermDocId(kvp[0], Int32.Parse(kvp[1]));
                            lastline = line;
                            count = 1;
                        }
                        else
                        {
                            count++;
                        }
                    }
                }

                //The count_terms list now contains all counts for each word/document combination.
                //This is in fact almost the sparse matrix we have to produce,
                //But we still have to filter on occurences.
                Console.WriteLine("KeyValuePairs: {0}", count_terms.Count);

                //php_sa Compatible Output
                StringBuilder term_occurrence = new StringBuilder();
                StringBuilder document_occurrence = new StringBuilder();
                StringBuilder collection_occurrence = new StringBuilder();
                StringBuilder index_terms = new StringBuilder();

                String term = "";
                String lastterm = null;
                Int32 nzcnt = 0;
                Int32 colcnt = 0;
                Int32 doccnt = 0;
                String docs = "";
                Int32 termcnt = 0;

                foreach (KeyValuePair<TermDocId, Int32> occurence in count_terms)
                {
                    term = occurence.Key.Key;

                    if (lastterm != term)
                    {
                        //Write all
                        if (!String.IsNullOrEmpty(lastterm))
                        {
                            //Filter occurences before adding them to the output!
                            if (doc.InRange(doccnt) && glo.InRange(colcnt))
                            {
                                term_occurrence.AppendLine(String.Format("{0}={1}", lastterm, docs.TrimEnd(';')));
                                document_occurrence.AppendLine(String.Format("{0}={1}", lastterm, doccnt));
                                collection_occurrence.AppendLine(String.Format("{0}={1}", lastterm, colcnt));
                                index_terms.AppendLine(String.Format("{0}={1}", termcnt++, lastterm));

                                //Sum of all doccnt (will ne the NonZeroCount of the TxD Matrix.
                                nzcnt += doccnt;
                            }
                        }

                        //Reset values
                        lastterm = term;
                        docs = "";
                        colcnt = 0;
                        doccnt = 0;
                    }

                    //Add new
                    for (Int32 i = 0; i < occurence.Value; i++)
                    {
                        docs += occurence.Key.Value + ";";
                    }
                    colcnt += occurence.Value;
                    doccnt += 1;
                }

                File.WriteAllText(Path.Combine(BaseDir, "term_occurrence.ls"), term_occurrence.ToString());
                File.WriteAllText(Path.Combine(BaseDir, "document_occurrence.ls"), document_occurrence.ToString());
                File.WriteAllText(Path.Combine(BaseDir, "collection_occurrence.ls"), collection_occurrence.ToString());
                File.WriteAllText(Path.Combine(BaseDir, "index_term.ls"), collection_occurrence.ToString());

                using (IniFile ini = new IniFile("job.ini"))
                {
                    ini.WriteInteger("build", "terms", termcnt);
                    ini.WriteInteger("build", "nonzero", nzcnt);
                    ini.UpdateFile();
                }
            }
        }

        public void TermMatrix()
        {
            using (new Profiling(this))
            {
                //We need some dimensions for the Sparse TxD matrix.
                Int32 termcnt = 0;
                Int32 doccnt = 0;
                Int32 nzcnt = 0;

                using (IniFile ini = new IniFile("job.ini"))
                {
                    termcnt = ini.ReadInteger("build", "terms", termcnt);
                    doccnt = ini.ReadInteger("build", "documents", doccnt);
                    nzcnt = ini.ReadInteger("build", "nonzero", nzcnt);
                }

                //Load document_occurrences.ls
                Sparse<Double> TxD = new Sparse<Double>(termcnt, doccnt, nzcnt, "Unweighted Term x Document Matrix.");

                String[] tos = File.ReadAllLines(Path.Combine(BaseDir, "term_occurrence.ls"));

                Int32 chk = 0;
                Int32 row = 0;
                foreach (String to in tos)
                {
                    String[] kvp = to.Split('=');
                    String term = kvp[0];
                    String[] docs = kvp[1].Split(';');

                    Int32 j = 0;
                    while (j < docs.Length)
                    {
                        String col = docs[j];
                        Int32 value = 0;

                        while (j < docs.Length && col == docs[j])
                        {
                            value++;
                            j++;
                        }

                        //TODO: Have to correct this: 'term_all.ls' is 1 based for it's document_index.ls (and so are it's derivates)
                        //Where index_documents is 0 based!
                        Int32 ndx = Int32.Parse(col) - 1;
                        TxD.AddElement(row, ndx, value);

                        chk++;
                    }

                    row++;
                }

                //Console.WriteLine("{0} - {1}", nzcnt, chk);

                //TxD.SaveExcel(Path.Combine(BaseDir, String.Format("Term_{0:000}.csv", StepCount)));

                TxD.SaveHarwellBoeing(Path.Combine(BaseDir, String.Format("Term_{0:000}.hb", StepCount)));
            }
        }

        const Int32 alSmallMem = 0;
        const Int32 alMediumMem = 1;
        const Int32 alLargeMem = 2;

        const Int32 alDontCalc = 0;
        const Int32 alMinimumCalc = 1;
        const Int32 alFullCalc = 2;

        public Int32 Decompose()
        {
            using (Profiling decomp = new Profiling(this))
            {
                Int32 Result = 0;

                String infile = Path.Combine(BaseDir, String.Format("Term_{0:000}.hb", StepCount));

                Sparse<Double> M = new Sparse<Double>();
                M.LoadHarwellBoeing(infile);

                Int32 rows = M.RowCount;
                Int32 cols = M.ColCount;

                Double[,] A = M.GetAs2DArray();
                Double[,] U = new Double[0, 0];
                Double[,] VT = new Double[0, 0];
                Double[] W = new Double[0];

                if (!svd.rmatrixsvd(A, rows, cols, alFullCalc, alFullCalc, alLargeMem, ref W, ref U, ref VT))
                {
                    Console.WriteLine("ALGLIB SVD decomposition failed!");
                }
                else
                {
                    Console.WriteLine("ALGLIB SVD decomposition succeeded!");

                    //terms := TStringList.Create;
                    //terms.LoadFromFile(ExtractFilePath(infile) + ChangeFileExt('index_terms', SLext));

                    //docs := TStringList.Create;
                    //docs.LoadFromFile(ExtractFilePath(infile) + ChangeFileExt('index_documents', SLext));

                    //Converting Decomposition Output back to Sparse Matrices and save them as Harwell Boeing Files.
                    decomp.Add();

                    Sparse<Double> svm = new Sparse<Double>();

                    using (new Profiling("Saving Decomposed Matrix - out-S"))
                    {
                        svm.SetAs1DArray(W);
                        svm.Comment = "Singular Values";
                        svm.SaveHarwellBoeing(Path.Combine(BaseDir, "out-S" + SparseExtension.HBext));
                        svm.Clear();
                    }

                    using (new Profiling("Saving Decomposed Matrix - out-U"))
                    {
                        svm.SetAs2DArray(U);
                        svm.Comment = "Term Vector Matrix";

                        //Return number of eigenvalues!
                        Result = svm.RowCount;

                        svm.SaveHarwellBoeing(Path.Combine(BaseDir, "out-U" + SparseExtension.HBext));
                        svm.Transpose();
                        svm.SaveHarwellBoeing(Path.Combine(BaseDir, "out-Ut" + SparseExtension.HBext));
                        svm.Clear();
                    }

                    using (new Profiling("Saving Decomposed Matrix - out-Vt"))
                    {
                        svm.SetAs2DArray(VT);
                        svm.Comment = "Document Vector Matrix";
                        svm.SaveHarwellBoeing(Path.Combine(BaseDir, "out-Vt" + SparseExtension.HBext));

                        svm.Transpose();
                        svm.SaveHarwellBoeing(Path.Combine(BaseDir, "out-V" + SparseExtension.HBext));
                    }
                }

                return Result;
            }
        }

        /// <summary>
        /// Estimate Rank based on Jan van Bruggen's Estimation Method.
        /// </summary>
        /// <param name="Percentage">The percentage to explain</param>
        /// <returns>The estimated Rank</returns>
        public Int32 EstimateRank(Int32 Percentage)
        {
            return EstimateRank(-1, -1, Percentage);
        }

        /// <summary>
        /// Estimate Rank based on Jan van Bruggen's Estimation Method.
        /// 
        /// Note: The upper left corner may hold a very large 'base 
        /// offset' value that can be ignored by specifying a minRank.
        /// </summary>
        /// <param name="minRank">Minimum Rank to start or -1 to start the top left corner</param>
        /// <param name="maxRank">Maximum Rank to start or -1 to end in the lower left corner</param>
        /// <param name="Percentage">The percentage to explain</param>
        /// <returns>The estimated Rank</returns>
        public Int32 EstimateRank(Int32 minRank, Int32 maxRank, Int32 Percentage)
        {
            using (new Profiling(this))
            {
                Sparse<Double> s = new Sparse<Double>();

                s.LoadHarwellBoeing(Path.Combine(BaseDir, "out-S" + SparseExtension.HBext));

                Int32 fr = minRank == -1 ? 0 : Math.Min(minRank, s.ColCount);
                Int32 tr = maxRank == -1 ? s.ColCount : Math.Min(maxRank, s.ColCount);

                Console.WriteLine("Requested Rank: {0}..{1}", minRank, maxRank);
                Console.WriteLine("Actual Rank: {0}..{1}", fr, tr);

                Int32 Result = s.ColCount;

                //Calculate sumSqr
                Double sumSqr = 0;
                for (Int32 i = fr; i < tr; i++)
                {
                    sumSqr += s[i, i] * s[i, i];
                }
                Double limit = sumSqr * ((Double)Percentage / 100);

                Console.WriteLine("Limit {0} explains {1}% Variance", limit, Percentage);

                Double explained = 0;

                for (Int32 i = fr; i < tr; i++)
                {
                    explained += s[i, i] * s[i, i];

                    Console.WriteLine("Rank {0} explains {1}", Result, explained);

                    if (explained >= limit)
                    {
                        Result = i;
                        break;
                    }
                }

                Console.WriteLine("Estimated Rank: {0}", Result);

                return Result;
            }
        }

        /// <summary>
        /// Reduduce the SVD Matrices.
        /// 
        /// </summary>
        /// <param name="minRank">Minimum Rank to start or -1 to start the top left corner</param>
        /// <param name="maxRank">Maximum Rank to start or -1 to end in the lower left corner</param>
        public void Reduce(Int32 minRank, Int32 maxRank)
        {
            using (new Profiling(this))
            {
                using (new Profiling("Reducing S"))
                {
                    Sparse<Double> S = new Sparse<Double>();

                    S.LoadHarwellBoeing(Path.Combine(BaseDir, "out-S" + SparseExtension.HBext));

                    Int32 fr = minRank == -1 ? 0 : Math.Min(minRank, S.ColCount);
                    Int32 tr = maxRank == -1 ? S.ColCount : Math.Min(maxRank, S.ColCount);

                    Console.WriteLine("Requested Rank: {0}..{1}", minRank, maxRank);
                    Console.WriteLine("Actual Rank: {0}..{1}", fr, tr);

                    //Reduce Rank of S.
                    S.Reduce(fr, fr, tr, tr);

                    S.Comment = "Reduced Singular Values";
                    S.SaveHarwellBoeing(Path.Combine(BaseDir, "R-out-S" + SparseExtension.HBext));
                }

                using (new Profiling("Reducing Ut"))
                {
                    Sparse<Double> U = new Sparse<Double>();

                    U.LoadHarwellBoeing(Path.Combine(BaseDir, "out-U" + SparseExtension.HBext));
                    U.Transpose();

                    Int32 fr = minRank == -1 ? 0 : Math.Min(minRank, U.ColCount);
                    Int32 tr = maxRank == -1 ? U.ColCount : Math.Min(maxRank, U.ColCount);

                    Console.WriteLine("Requested Rank: {0}..{1}", minRank, maxRank);
                    Console.WriteLine("Actual Rank: {0}..{1}", fr, tr);

                    //Reduce Rank of S.
                    U.Reduce(fr, 0, tr, U.RowCount);

                    U.Comment = "Reduced Term Vector Matrix";
                    U.SaveHarwellBoeing(Path.Combine(BaseDir, "R-out-U" + SparseExtension.HBext));

                    U.Clear();
                }

                using (new Profiling("Reducing Vt"))
                {
                    Sparse<Double> V = new Sparse<Double>();

                    V.LoadHarwellBoeing(Path.Combine(BaseDir, "out-Vt" + SparseExtension.HBext));

                    Int32 fr = minRank == -1 ? 0 : Math.Min(minRank, V.ColCount);
                    Int32 tr = maxRank == -1 ? V.ColCount : Math.Min(maxRank, V.ColCount);

                    Console.WriteLine("Requested Rank: {0}..{1}", minRank, maxRank);
                    Console.WriteLine("Actual Rank: {0}..{1}", fr, tr);

                    //Reduce Rank of S.
                    V.Reduce(0, fr, V.ColCount, tr);

                    V.Comment = "Reduced Document Vector Matrix";
                    V.SaveHarwellBoeing(Path.Combine(BaseDir, "R-out-Vt" + SparseExtension.HBext));                    
                    V.Transpose();
                    V.SaveHarwellBoeing(Path.Combine(BaseDir, "R-out-V" + SparseExtension.HBext));

                    V.Clear();
                }
            }

        }

        /// <summary>
        /// Calculate the Reduduced Term x Document Space (Ur.Sr.Vtr).
        /// </summary>
        public void ReducedSpace()
        {
            using (new Profiling(this))
            {
                //Load Rank Reduced Ut.
                Sparse<Double> U = new Sparse<Double>();
                U.LoadHarwellBoeing(Path.Combine(BaseDir, "R-out-U" + SparseExtension.HBext));

                //Load Rank Reduced S.
                Sparse<Double> S = new Sparse<Double>();
                S.LoadHarwellBoeing(Path.Combine(BaseDir, "R-out-S" + SparseExtension.HBext));

                //Calculate Intermediate Result: Ur.Sr
                Sparse<Double> UxS = new Sparse<Double>();
                UxS.Multiply(U, S, "");

                S.Clear();
                U.Clear();

                //Load Rank Reduced Vt
                Sparse<Double> V = new Sparse<Double>();
                V.LoadHarwellBoeing(Path.Combine(BaseDir, "R-out-Vt" + SparseExtension.HBext));

                //Calculate Ur.Sr.Vr = Rank Reduced Term x Document Matrix.
                Sparse<Double> UxSxV = new Sparse<Double>();
                UxSxV.Multiply(UxS, V, "Rank Reduced Term x Document matrix");

                UxSxV.SaveHarwellBoeing(Path.Combine(BaseDir, "reducedspace" + SparseExtension.HBext));
            }
        }

        #endregion Lsa Exports

        #region IDisposable Members

        /// <summary>
        /// Used to implement IDisposable.
        /// </summary>
        void IDisposable.Dispose()
        {
            this.Dispose();
        }

        #endregion

        #region Other



        #endregion Other
    }
}


