﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Lucene.Net.Documents;

namespace ObjectSearch
{
    /// <summary>
    /// Specifies how a field should be stored in the full text index.
    /// </summary>
    public enum StorageOptions
    {
        /// <summary>
        /// Do not store the field value in the index.
        /// </summary>
        NotStored,
        /// <summary>
        /// Store the original field value in the index. This is useful for short texts
        /// like a document's title which should be displayed with the results. The
        /// value is stored in its original form, i.e. no analyzer is used before it is
        /// stored. 
        /// </summary>
        Stored,
        /// <summary>
        /// Store the original field value in the index in a compressed form. This is
        /// useful for long documents and for binary valued fields.
        /// </summary>
        Compressed
    }

    /// <summary>
    /// Specifies whether and how a field should be indexed.
    /// </summary>
    public enum IndexOptions
    {
        /// <summary>
        /// Do not index the field value. This field can thus not be searched,
        /// but one can still access its contents provided it has been stored.
        /// </summary>
        NotIndexed,
        /// <summary>
        /// Index the field's value so it can be searched. An Analyzer will be used
        /// to tokenize and possibly further normalize the text before its
        /// terms will be stored in the index. This is useful for common text.
        /// </summary>
        Tokenized,
        /// <summary>
        /// Index the field's value without using an Analyzer, so it can be searched.
        /// As no analyzer is used the value will be stored as a single term. This is
        /// useful for unique Ids like product numbers.
        /// </summary>
        Untokenized,
        /// <summary>
        /// Index the field's value without an Analyzer, and disable
        /// the storing of norms.  No norms means that index-time boosting
        /// and field length normalization will be disabled.  The benefit is
        /// less memory usage as norms take up one byte per indexed field
        /// for every document in the index.
        /// </summary>
        NotOptimized
    }


    /// <summary>
    /// Specifies how a field should have term vectors.
    /// </summary>
    public enum TermVectorOptions
    {
        /// <summary>
        /// Do not store term vectors. 
        /// </summary>
        NotStored,
        /// <summary>
        /// Store the term vectors of each document. A term vector is a list
        /// of the document's terms and their number of occurences in that document. 
        /// </summary>
        Stored,
        /// <summary>
        /// Store the term vector + token position information
        /// </summary>
        StoreWithPositions,
        /// <summary>
        /// Store the term vector + Token offset information
        /// </summary>
        StoreWithOffsets,
        /// <summary>
        /// Store the term vector + Token position and offset information
        /// </summary>
        StoreWithPositionsAndOffsets
    }

    public enum SortOptions
    {
        /// <summary>
        /// The search results are returned to you by ranking.
        /// </summary>
        None,
        /// <summary>
        /// This column is sorted when returning search results.
        /// </summary>
        Sort,
        /// <summary>
        /// This column is sorted by descending when returning search resutls.
        /// </summary>
        SortDescending
    }
}
