using System;
using System.Collections.Generic;
using System.Collections.ObjectModel;
using System.Diagnostics;
using System.IO;
using System.Text;
using System.Text.RegularExpressions;
using Lucene.Net.Analysis;
using Lucene.Net.Documents;
using Lucene.Net.Index;
using Lucene.Net.Search.Spans;
using Lucene.Net.Store;
using Palaso;
using Palaso.Reporting;

namespace SpellCenter.Core
{
    sealed public class CorpusIndex
    {
      /// <summary>
      /// The files in corpus is a more abstract concept of what should be
      /// indexed
      /// </summary>
      private readonly FilesInCorpusList _filesInCorpus;

      private readonly ITaskMonitor _taskMonitor;
      private PackingList _packingList;

      public CorpusIndex(FilesInCorpusList filesInCorpus, ITaskMonitor taskMonitor)
        {
            _filesInCorpus = filesInCorpus;
            _taskMonitor = taskMonitor;

            PackingListFileName = Path.Combine(Location, PackingList.DEFAULT_FILENAME);
            _packingList = PackingList.CreateFromFile(PackingListFileName);
            // will need to compare our packing list (what is currently in the 
            // index) to what we have been requested to put into the index.

            // Any files that are in the packing list but not in the 
            // corpus files list should be dropped from the index. 
            // Any files that are in the packing list but have a timestamp 
            // that differs from the one in the
            // packing list will need to be updated in the index.
            CreateNewIndexIfCorruptOrDoesNotExist();
            Update();
          }

          /// <summary>
          /// return true if no files have been added, deleted
          /// or updated since they were indexed.
          /// </summary>
          /// <returns></returns>
          public bool IsUpToDate()
          {
            IEnumerable<string> filesToRemoveFromIndex;
            IEnumerable<string> filesToUpdateInIndex;
            IEnumerable<string> filesToAddToIndex;
            PackingList.Compare(new PackingList(this._filesInCorpus.Files),
                                                this._packingList,
                                                out filesToAddToIndex,
                                                out filesToRemoveFromIndex,
                                                out filesToUpdateInIndex);
            if (HasContent(filesToRemoveFromIndex)
              || HasContent(filesToAddToIndex)
              || HasContent(filesToUpdateInIndex))
            {
              return false;
            }
            return true;
          }

        private static bool HasContent<T>(IEnumerable<T> enumerable)
        {
          // since MoveNext returns false when there is no more content
          // we get the answer we want just by seeing if we can 
          // move to the first element
          // if we can't then we don't have content
          return enumerable.GetEnumerator().MoveNext();
        }

        private static readonly string IndicesRootPath = Path.Combine(Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData), "SpellCenter"),"indices");

        private string Location
        {
            get
            {
              string path = GetIndexDirectory(_filesInCorpus);
                if (!System.IO.Directory.Exists(path))
                    System.IO.Directory.CreateDirectory(path);
                return path;
            }
        }

      public string IndexDirectory
      {
        get
        {
          return GetIndexDirectory(_filesInCorpus);
        }
      }

      static public string GetIndexDirectory(FilesInCorpusList filesInCorpus)
      {
        return Path.Combine(IndicesRootPath, filesInCorpus.Id.ToString());
      }

        private void CreateNewIndexIfCorruptOrDoesNotExist()
        {
            try
            {
                CreateIndex();
            }
            catch (IOException e)
            {
                Logger.WriteEvent("Caught corrupted index: {0}, delete index and retry.", e.Message);
              DeleteIndexFiles();
              CreateIndex();
            }
        }

      private void DeleteIndexFiles()
      {
        FSDirectory directory = FSDirectory.GetDirectory(Location);
        foreach (string file in directory.List())
        {
          ForceDeleteOfLuceneFile(directory, file);
        }
        directory.Close();
      }

        /// <summary>
        /// occasionally a file called something like '_1.cfs' will hold a write lock for several
        /// seconds after it should have released it. This is a hack that should only be needed 
        /// for extreme circumstances such as unit testing.
        /// </summary>
      private static void ForceDeleteOfLuceneFile(Lucene.Net.Store.Directory directory, string file)
        {
            while(true)
            {
                try
                {
                    directory.DeleteFile(file);
                    break;
                }
                catch(IOException e)
                {
                    Logger.WriteEvent("Attempted to delete file '{0}', but: {1}", file, e.Message);
                }
            }
        }

        private void CreateIndex()
        {
          NotifyTaskStarted(1);
            // This creates a new index with a 'segments' file if it doesn't exist.
            //  It also throws an IOException if it is corrupted. 
            //  Choice of SfmAnalyzer is arbitrary.
          FSDirectory directory = FSDirectory.GetDirectory(Location);
          CreateIndex(directory);
          directory.Close();
          NotifyTaskCompleted();
        }

      internal static void CreateIndex(Lucene.Net.Store.Directory directory)
      {
        IndexWriter writer = new IndexWriter(directory, new SfmAnalyzer());
        // there is always one and only one MetaDocumentModel document
        // in the index
        MetaDocumentModel model = new MetaDocumentModel();
        model.CreateDocument(writer);
        writer.Close();
      }

      private void NotifyTaskStarted(int subTaskCount)
      {
        this._taskMonitor.OnTaskStarted(subTaskCount);
      }

      private void NotifyTaskCompleted()
      {
        this._taskMonitor.OnTaskFinished();
      }

      private void NotifySubTaskStarted(string description)
      {
        this._taskMonitor.OnStepStarted(description);
      }

      private void NotifySubTaskCompleted()
      {
        this._taskMonitor.OnStepFinished();
      }

      private bool CancelTaskRequested()
      {
        return this._taskMonitor.IsCancelled;
      }

      public IEnumerable<SpellWordInfo> GetWords()
        {
              Collection<SpellWordInfo> words = new Collection<SpellWordInfo>();
                lock (_filesInCorpus)
                {
                  FSDirectory directory = FSDirectory.GetDirectory(Location);
                  IndexReader indexReader = IndexReader.Open(directory);
                  TermEnum terms = indexReader.Terms();
                  while (terms.Next())
                  {
                    Term t = terms.Term();

                    if (SwallowNonText(terms, ref t))
                    {
                      string word = t.Text();
                      int occurrences = GetOccurrenceCountOfWord(indexReader, word);
                      SpellWordInfo wordInfo = new SpellWordInfo(this,
                                                             word, 
                                                             occurrences);
                      words.Add(wordInfo);
                    }
                    else
                      break;
                  }
                  indexReader.Close();
                  directory.Close();
                }
              return words;
        }


        private static bool SwallowNonText(TermEnum terms, ref Term t)
        {
            while (t.Field() != DocumentHelper.TEXT /*|| !IsTermContainedInFilterDocumentList(t)*/)
            {
                if (!terms.Next())
                    return false;
                t = terms.Term();
            }
            return true;
        }

        //private bool IsTermContainedInFilterDocumentList(Term t)
        //{
        //    //if (Filter == null)
        //        return true;

        //    //SpanTermQuery query = new SpanTermQuery(t);
        //    //Spans spans = query.GetSpans(_index);

        //    //// if iterator is not empty, there were results for that term
        //    ////  in this per
        //    //while (spans.Next())
        //    //{
        //    //    DocumentWrapper document = new DocumentWrapper(_index.Document(spans.Doc()));
        //    //    if (document.FileName == file)
        //    //    {
        //    //        return true;
        //    //    }
        //    //}
        //    //return false;
        //}

        /// <summary>
        /// </summary>
        /// <param name="word">Word to find occurrences of</param>
        /// <param name="resolution">The number of characters before and after the word to include in context</param>
        /// <returns></returns>
        public IEnumerable<WordContext> GetOccurrencesOfWord(string word, int resolution)
        {
          Collection<WordContext> wordContexts = new Collection<WordContext>();
          FSDirectory directory = FSDirectory.GetDirectory(Location);
          IndexReader indexReader = IndexReader.Open(directory);

          Spans spans = GetSpans(indexReader, word);
          Debug.Assert(spans != null);
          while (spans.Next())
          {
            Document doc = indexReader.Document(spans.Doc());
            string tag = DocumentHelper.GetTag(doc);
            int ignoredTokens = IgnoredTokens(tag);
            WordContext wordContext;

            string filename = DocumentHelper.GetFileName(doc);
            string reference = DocumentHelper.GetReference(doc);
            long offset = DocumentHelper.GetOffset(doc, indexReader);
            long wordOffsetWithinFile;
            int start = spans.Start() + ignoredTokens;
            long textOffsetInDoc;
            using (StreamReader file = File.OpenText(filename))
            {
              long charsBeforeDocument = SeekByCharacterThroughFile(offset, file);

              SfmAnalyzer analyzer = new SfmAnalyzer();
              TokenStream tokens = analyzer.TokenStream(DocumentHelper.TEXT, file);

              Token t = null;
              for (int i = 0; i <= start; i++)
              {
                t = tokens.Next();
              }
              textOffsetInDoc = charsBeforeDocument + (t == null ? 0 : t.StartOffset());
            }

            string context;
            using (StreamReader reader = new StreamReader(File.OpenRead(filename)))
            {
              long startReadMarker = textOffsetInDoc - resolution;
              SeekByCharacterThroughFile(startReadMarker, reader);

              int correction = (int) (startReadMarker < 0 ? startReadMarker : 0);
              wordOffsetWithinFile = resolution + correction;
              char[] buffer = new char[(resolution * 2) + word.Length + correction];

              long correction2 = 0;
              int read = reader.Read(buffer, 0, buffer.Length);
              string ret = new string(buffer, 0, read);
              while (ret.Contains("\r\n"))
              {
                int c = reader.Read();
                int indexOf = ret.IndexOf("\r\n");
                ret = ret.Remove(indexOf, 2).Insert(indexOf, " ") + (c == -1 ? "" : "" + (char) c);
                if (indexOf < textOffsetInDoc)
                {
                  correction2--;
                }
              }
              context = ret.Replace('\n', ' ');
              wordOffsetWithinFile += correction2;
            }

            wordContext = new WordContext(word,
                                          context,
                                          (int) wordOffsetWithinFile,
                                          reference);
            // we can't just yield this as it will keep the file lock
            wordContexts.Add(wordContext);
          }
          indexReader.Close();
          directory.Close();
          return wordContexts;
        }

      private static int GetOccurrenceCountOfWord(IndexReader index, string word)
      {
        int ret = 0;
        Spans spans = GetSpans(index, word);
        Debug.Assert(spans != null);

        while (spans.Next())
        {
          ret++;
        }
        return ret;
      }

        static private Spans GetSpans(IndexReader index, string word)
        {
            SpanTermQuery query = new SpanTermQuery(new Term(DocumentHelper.TEXT, word));
            return query.GetSpans(index);
        }


      //todo use seek
      private static long SeekByCharacterThroughFile(long n, StreamReader file)
      {
        // This is the alternative to using TextReader.Read(char[],int,int) 
        // It should take less memory, especially on large corpuses, but it might be slower
        long i;
        for (i = 0; i < n; i++)
        {
          file.Read();
          if (file.EndOfStream)
          {
            break;
          }
        }
        return i;
      }

        #region ReplaceAllWords

        public void ReplaceAllWords(string word, string newWord)
        {
          FSDirectory directory = FSDirectory.GetDirectory(Location);
          IndexReader indexReader = IndexReader.Open(directory);
          try
          {
            SpanTermQuery query = new SpanTermQuery(new Term(DocumentHelper.TEXT, word));
            Spans spans = query.GetSpans(indexReader);
            NotifyTaskStarted(CountSpans(indexReader, query));
            Dictionary<string, int> offsetOfLuceneDocumentsInFiles = new Dictionary<string, int>();
            int wordLengthDiff = newWord.Length - word.Length; // shorter word means negative diff

            StreamReader reader = null;
            StreamWriter writer = null;
            string newDoc = string.Empty;
            Guid lastId = Guid.Empty;
            string lastFilename = string.Empty;
            string lastBook = string.Empty;
            string lastChapter = string.Empty;
            string lastVerse = string.Empty;
            long lastRawStart = 0;
            long lastRawEnd = 0;

            Guid id = Guid.Empty;
            int diffWithinDoc = 0;
            if (_taskMonitor.IsCancelled)
              return;

            // From Lucene.NET documentation:
            // Each span represents a range of term positions within a document.
            // Matches are enumerated in order, by increasing document number, 
            // within that by increasing start position and finally by 
            // increasing end position.
            while (spans.Next())
            {
              Document document = indexReader.Document(spans.Doc());
              string fileName = DocumentHelper.GetFileName(document);
              NotifySubTaskStarted(string.Format("Replacing words in {0}", fileName));

              long startOfDocInFile = DocumentHelper.GetStartOfDocInFile(document, indexReader);
              long end = DocumentHelper.GetEnd(document, indexReader);
              string tag = DocumentHelper.GetTag(document);
              id = DocumentHelper.GetId(document);

              if (!offsetOfLuceneDocumentsInFiles.ContainsKey(fileName))
              {   // This file hasn't been processed yet, open new streams
                offsetOfLuceneDocumentsInFiles.Add(fileName, 0);
                if (reader != null)
                {
                  FlushWriterToFile(writer, lastFilename + '~');
                  reader.Dispose();
                  writer.Dispose();
                }
                reader = File.OpenText(fileName);
                writer = new StreamWriter(new MemoryStream());
              }
              else
              {
                reader.Dispose();
                writer.Flush();
                MemoryStream mem = (MemoryStream) writer.BaseStream;
                reader = new StreamReader(new MemoryStream(mem.GetBuffer(), 0, (int) mem.Length));
                writer.Dispose();
                writer = new StreamWriter(new MemoryStream((int) reader.BaseStream.Length));
              }
              Debug.Assert(reader != null);
              Debug.Assert(writer != null);
              Debug.Assert(offsetOfLuceneDocumentsInFiles.ContainsKey(fileName));

              ReIndexDocument(id,
                              newDoc,
                              wordLengthDiff,
                              false,
                              lastId,
                              lastFilename,
                              lastBook,
                              lastChapter,
                              lastVerse,
                              lastRawStart,
                              lastRawEnd);

              newDoc = ReplaceDocumentWithNewDocument(word,
                                                      newWord,
                                                      spans,
                                                      id,
                                                      startOfDocInFile,
                                                      end,
                                                      tag,
                                                      offsetOfLuceneDocumentsInFiles,
                                                      fileName,
                                                      reader,
                                                      writer,
                                                      lastId,
                                                      ref diffWithinDoc,
                                                      newDoc,
                                                      wordLengthDiff);
              writer.Write(newDoc);
              WriteAllCharactersFromStream(writer, reader);
              offsetOfLuceneDocumentsInFiles[fileName] += wordLengthDiff;
              lastFilename = fileName;
              lastId = id;
              diffWithinDoc += wordLengthDiff;

              lastBook = DocumentHelper.GetBook(document);
              lastChapter = DocumentHelper.GetChapter(document);
              lastVerse = DocumentHelper.GetVerse(document);
              lastRawStart = DocumentHelper.GetRawStart(document);
              lastRawEnd = DocumentHelper.GetRawEnd(document);

              NotifySubTaskCompleted();
            }
            if (reader != null)
            {
              reader.Dispose();
            }
            if (writer != null)
            {
              FlushWriterToFile(writer, lastFilename + '~');
              writer.Dispose();
            }

            ReIndexDocument(id,
                            newDoc,
                            wordLengthDiff,
                            true,
                            lastId,
                            lastFilename,
                            lastBook,
                            lastChapter,
                            lastVerse,
                            lastRawStart,
                            lastRawEnd);

            ReindexAndCleanup(offsetOfLuceneDocumentsInFiles);

            NotifyTaskCompleted();
          }
          finally
          {
            indexReader.Close();
            directory.Close();
          }
        }

      static private int CountSpans(IndexReader reader, SpanQuery query)
      {
        Spans spans = query.GetSpans(reader);
        int i = 0;
        while (spans.Next())
        {
          ++i;
        }
        return i;
      }

      private void ReIndexDocument(Guid docId, string newDoc, int diff, bool always, Guid lastId, string lastFileName, string lastBook, string lastChapter, string lastVerse, long lastRawStart, long lastRawEnd)
        {
          if (!always && (lastId == Guid.Empty))
          {
            return;
          }

          if (always || docId != lastId)
          {
            FSDirectory directory = FSDirectory.GetDirectory(Location);
            IndexReader reader = IndexReader.Open(directory);
            MetaDocumentModel metaModel = MetaDocumentModel.GetFromMeta(FragmentMovementTracker.GetDocument(reader));
            reader.Close();

            IndexWriter indexWriter = new IndexWriter(directory, new SfmAnalyzer());
                ParseDocument(newDoc, indexWriter, metaModel,
                  lastId,
                                    lastFileName,
                                    lastBook,
                                    lastChapter,
                                    lastVerse,
                                    lastRawStart,
                                    lastRawEnd,
                                    diff);
            indexWriter.Optimize();
            indexWriter.Close();
            directory.Close();
          }
        }

        /// <summary>
        /// Parse an SFM file
        /// </summary>
        /// <remarks>
        /// Documents with a tag of 'v', 'c' or 'id' will have spans offsets that are -1 what they should be.
        /// This is because here we swallow those identifiers because they aren't words part of the writing 
        /// system, just metadata.
        /// </remarks>
        internal static void ParseFile(Stream input, string filename, IndexWriter indexWriter,
          MetaDocumentModel metaModel)
        {
          SFMReader reader = new SFMReader(input);
          reader.ReadInitialText();

          string book = "", chapter = "", verse = "";
          while (true)
          {
            string start = "" + reader.Offset;
            string tag = reader.ReadNextTag();

            if (tag == null)
              break;

            string text;
            UpdateReference(reader, tag, ref book, ref chapter, ref verse, out text);
            string end = "" + reader.Offset;

            Document document = DocumentHelper.CreateDocument(tag, text, filename, start, end, book, chapter, verse, Guid.NewGuid());
            indexWriter.AddDocument(document);
            metaModel.SetStart(filename, long.Parse(start), 0);
            metaModel.SetEnd(filename, long.Parse(end), 0);

          }
          input.Close();
          indexWriter.UpdateDocument(new Term(FragmentMovementTracker.ID, FragmentMovementTracker.ID), metaModel.GetDocument());
        }

        // <param name="diff">newDocument.Length - oldDocument.Length</param>
        private static void ParseDocument(string inputtext, IndexWriter writer, MetaDocumentModel model,
        Guid id, string filename, string book, string chapter, string verse, long rawStart, long rawEnd, int diff)
        {
          if (filename == null)
          {
            throw new ArgumentNullException("filename");
          }
        string start = rawStart.ToString();
        string end = rawEnd.ToString();

        using (Stream input = new MemoryStream(Encoding.UTF8.GetBytes(inputtext)))
        {
          SFMReader reader = new SFMReader(input);
          reader.ReadInitialText();

          while (true)
          {
            string tag = reader.ReadNextTag();

            if (tag == null)
              break;

            string text;
            UpdateReference(reader, tag, ref book, ref chapter, ref verse, out text);

            Document document = DocumentHelper.CreateDocument(tag, text, filename, start, end, book, chapter, verse, id);
            writer.UpdateDocument(new Term(DocumentHelper.ID, id.ToString()), document);

            model.AddOffset(filename, long.Parse(start), diff);
          }
          input.Close();
        }
          writer.UpdateDocument(new Term(FragmentMovementTracker.ID, FragmentMovementTracker.ID), model.GetDocument());

        }

      private static void UpdateReference(SFMReader reader, string tag, ref string book, ref string chapter, ref string verse, out string text)
      {
        if (tag == "id")
        {
          book = ReadIdentifier(reader, out text, false);
        }
        else if (tag == "c")
        {
          chapter = ReadIdentifier(reader, out text, true);
        }
        else if (tag == "v")
        {
          verse = ReadIdentifier(reader, out text, true);
        }
        else
        {
          text = reader.ReadNextText();
        }
      }

      private static string ReadIdentifier(SFMReader reader, out string text, bool includeFirstLine)
      {
        string tmp = reader.ReadNextText();
        Match match = s_idSplitter.Match(tmp);
        if (match.Success)
        {
          if (includeFirstLine)
          {
            text = match.Groups["text"].Value;
          }
          else
          {
            text = match.Groups["otherlines"].Value;
          }
          return match.Groups["id"].Value;
        }
        else
          throw new InvalidDataException(string.Format("Data is not normal: '{0}'", tmp));
      }

      private static readonly Regex s_idSplitter = new Regex(@"^ ?(?<id>[\w,-]+) ?(?<text>.*?[\r\n]{0,2}(?<otherlines>.*))",
                                            RegexOptions.Compiled | RegexOptions.Multiline);
      


        private static string ReplaceDocumentWithNewDocument(string word, string newWord, Spans spans, 
          Guid docId,
          long startOfDocInFile,
          long endOfDocInFile,
          string tag,
          IDictionary<string, int> offsetOfLuceneDocumentInFile, string fileName, StreamReader reader, 
            TextWriter writer, Guid lastId, ref int diffWithinDoc, string newDoc, 
            int diff)
        {
            if (lastId == docId)
            {
                WriteCharactersFromStream(writer, reader,
                    startOfDocInFile //as far as lucene knows
                    + offsetOfLuceneDocumentInFile[fileName] //from our offset meta data thing
                    - diffWithinDoc); // compensate with uncomitted movement of this word in this doc 
                string document = ReplaceWordInDocument(spans, newDoc.Substring(2 + tag.Length), newWord, word, tag);
                newDoc = '\\' + tag + ' ' + document;
                SkipCharactersInReader(reader, newDoc.Length - diff);
            }
            else
            {
                diffWithinDoc = 0;
                //first copy from the old file to the new file, up to the start of the document (e.g. verse) we want to change
                WriteCharactersFromStream(writer, reader, startOfDocInFile + offsetOfLuceneDocumentInFile[fileName]);
                newDoc = GetDocumentWithReplacedWord(word, 
                                                    newWord, 
                                                    reader, 
                                                    tag,
                                                    (int)(endOfDocInFile - startOfDocInFile),
                                                    spans);
            }
            return newDoc;
        }

        private static void SkipCharactersInReader(StreamReader reader, long size)
        {
            long charsRead = 0;
            while(charsRead < size)
            {
                if(reader.EndOfStream)
                    throw new InvalidOperationException("reached end of stream");
                char[] buffer = new char[1024];
                charsRead += reader.Read(buffer, 0, (int) Math.Min(size, buffer.Length));
            }
        }

        private static void FlushWriterToFile(StreamWriter stream, string filename)
        {
            Debug.Assert(stream.BaseStream.CanRead && stream.BaseStream.CanWrite);
            stream.Flush();
            MemoryStream mem = (MemoryStream) stream.BaseStream;
            mem.Seek(0, SeekOrigin.Begin);
            using(FileStream o = File.OpenWrite(filename))
            {
                o.Write(mem.GetBuffer(), 0, (int) mem.Length);
            }
        }

        /// <summary>
        /// Commits all changes made during ReplaceAllWords
        /// </summary>
        /// <param name="changedFiles"></param>
        private void ReindexAndCleanup(IDictionary<string, int> changedFiles)
        {
            // changed files + opening & saving packinglist
            NotifyTaskStarted(changedFiles.Count);
            foreach (string file in _packingList)
            {
                if (changedFiles.ContainsKey(file))
                {
                    _packingList.UpdateTimestamp(file);
                    changedFiles[file] = int.MinValue;
                }
                if (CancelTaskRequested())
                    return;
            }
            RemoveBackupsAndGetFileNames(changedFiles.Keys);
           // GetFilesThatWereReindexed(changedFiles, _packingList);
            _packingList.SaveAs(PackingListFileName);
            NotifyTaskCompleted();
        }

        private static void RemoveBackupsAndGetFileNames(IEnumerable<string> names)
        {
            foreach (string file in names)
            {
                File.Copy(file + '~', file, true);
                File.Delete(file + '~');
            }
        }

        private static string GetDocumentWithReplacedWord(string word, 
          string newWord, 
          TextReader reader, 
          string tag,
          int originalLength, //doc.End - doc.StartOfDocInFile
          Spans spans)
        {
            
            string newDoc = '\\' + tag + ' ';
            // skip over '\tag '
            char[] buffer = new char[newDoc.Length ];
            reader.Read(buffer, 0, newDoc.Length);
            string foundString = new string(buffer);
            if (newDoc != foundString)
            {
                Debug.Fail(string.Format("Expected '{0}' but got '{1}", newDoc, foundString));
                throw new ApplicationException("SpellCenter either has a serious bug or its data is corrupt.");
            }
            int charCount = originalLength - newDoc.Length;
            string docText = GetCharsFromStream(reader, charCount);

            return newDoc + ReplaceWordInDocument(spans, docText, newWord, word, tag);
        }

        private static string GetCharsFromStream(TextReader reader, int charCount)
        {
            int charsRead = 0;
            string docText = string.Empty;
            while(charsRead < charCount)
            {
                char[] buffer = new char[1024];
                int read = reader.Read(buffer, 0, Math.Min(charCount - charsRead, buffer.Length));
                docText += new string(buffer, 0, read);
                charsRead += read;
            }
            return docText;
        }

        private static string ReplaceWordInDocument(Spans spans, string docText, string newWord, string word, string tag)
        {
            long offset = GetOffsetOfWordWhithinDocumentText(spans, docText, tag);
            //int excess = (offset == 0 ? 0 : 1); // because of weirdness in Lucene...
            string substring = docText.Substring((int)offset, word.Length);
            Debug.Assert(substring == word, string.Format("{0} != {1}, before='{2}' after='{3}'", substring, word, docText.Substring(0, (int)offset ), docText.Substring((int)(offset +  word.Length))));
            return docText.Substring(0, (int)offset) + newWord +
                   docText.Substring((int)(offset + word.Length));
        }

        private static long GetOffsetOfWordWhithinDocumentText(Spans spansContainingWord, string docText, string tag)
        {
            SfmAnalyzer analyzer = new SfmAnalyzer();
            TokenStream tokenStream = analyzer.TokenStream("", new StringReader(docText));
            int startTokenNumber = spansContainingWord.Start() + IgnoredTokens(tag);
            for (int i = 0; i < startTokenNumber; i++)
                tokenStream.Next();
            //read in the word we're looking for
            tokenStream.Next();
            return analyzer.StartOfCurrentToken;
          //  return analyzer.Offset;
        }
        /// <summary>
        /// If a document is a verse, chapter, or book it will have an ignored token
        /// </summary>
        /// <returns>the number of ignored tokens</returns>
        private static int IgnoredTokens(string tag)
        {
          if (tag == "c" || tag == "v" || tag == "id")
          {
            return 1;
          }
          else
            return 0;
        }

        private static void WriteCharactersFromStream(TextWriter writer, StreamReader reader, long count)
        {
            int charsRead = 0;
            char[] buffer = new char[1024]; //sounds like a good number for a buffer size
            while(charsRead < count)
            {
                if(reader.EndOfStream)
                    throw new InvalidOperationException("Tried reading past the end of the file");
                int i = reader.Read(buffer, 0, (int) Math.Min(buffer.Length, count - charsRead));
                writer.Write(buffer, 0, i);
                charsRead += i;
            }
        }
        private static void WriteAllCharactersFromStream(TextWriter writer, StreamReader reader)
        {
            char[] buffer = new char[1024]; //sounds like a good number for a buffer size
            while (!reader.EndOfStream)
            {
                if (reader.EndOfStream)
                    throw new InvalidOperationException("Tried reading past the end of the file");
                int i = reader.Read(buffer, 0, buffer.Length);
                writer.Write(buffer, 0, i);
            }
        }

        #endregion

        /// <summary>
        /// A faster way of deleting multiple documents as this way we 
        /// only Optimize after deleting all documents. 
        /// Optimizing can take ~500ms
        /// </summary>
        /// <param name="files"></param>
        private void RemoveFromIndex(IEnumerable<string> files)
        {
            // I'm locking the index throughout this entire class 
            // wherever it's being used.
            //  the index location is the common object to all 
            // accesses to the index.
            lock (_filesInCorpus)
            {
                IndexWriter modifier = new IndexWriter(Location, new SfmAnalyzer(), false);
                try
                {
                    foreach (string file in files)
                    {
                        Term term = new Term(DocumentHelper.FILENAME, file);
                        modifier.DeleteDocuments(term);
                    }
                }
                finally
                {
                    // Magical cleanup routine that ensures changes have taken place
                    modifier.Flush();
                    modifier.Optimize();
                    modifier.Close();
                }
            }
        }

        public IList<string> Files
        {
            get
            {
              return new List<string>(_packingList);
            }
        }

        private static string PackingListFileName;


        #region Update


        public void Update()
        {
          IEnumerable<string> filesToRemoveFromIndex;
          IEnumerable<string> filesToUpdateInIndex;
          IEnumerable<string> filesToAddToIndex;
          PackingList newPackingList = new PackingList(this._filesInCorpus.Files);
          PackingList.Compare(newPackingList, 
                              this._packingList, 
                              out filesToAddToIndex, 
                              out filesToRemoveFromIndex, 
                              out filesToUpdateInIndex);

          if (CancelTaskRequested())
          {
            return;
          }

          // don't allow cancel between these lines 
          // since it would leave the index in an invalid state
          RemoveFromIndex(filesToRemoveFromIndex);
          {//todo use updater
            RemoveFromIndex(filesToUpdateInIndex);
            AddFiles(filesToUpdateInIndex);
          }
          AddFiles(filesToAddToIndex);
          _packingList = newPackingList;
          _packingList.SaveAs(PackingListFileName);
          // don't allow cancel until here
          NotifyTaskCompleted();
        }

      
      #endregion

        #region IndexFile

     public ITaskMonitor TaskMonitor
      {
        get { return this._taskMonitor; }
      }

      public string WritingSystemId
      {
        get
        {
          return this._filesInCorpus.WritingSystemId;
        }
      }

      private void AddFiles(IEnumerable<string> files)
        {
            lock(_filesInCorpus)
            {

              FSDirectory directory = FSDirectory.GetDirectory(Location);
              // This creates a new index with a 'segments' file if it doesn't exist.
              //  It also throws an IOException if it is corrupted.
              IndexReader reader = IndexReader.Open(directory);
              MetaDocumentModel metaModel = MetaDocumentModel.GetFromMeta(FragmentMovementTracker.GetDocument(reader));
              reader.Close();
              IndexWriter indexWriter = new IndexWriter(directory, new SfmAnalyzer());

                foreach (string file in files)
                {
                  ParseFile(File.OpenRead(file), file, indexWriter, metaModel);
                }
              indexWriter.Optimize();
              indexWriter.Close();
              directory.Close();
            }
        }

        #endregion

    }

}