﻿using System;
using System.Collections.Generic;
using System.Text;
using System.ComponentModel;
using System.Xml.Serialization;

using ShootSearch.Core;
using ShootSearch.Util;

using Lucene.Net.Documents;


namespace ShootSearch.Indexers
{
    [TypeConverter(typeof(Util.ExpandableConverter<LuceneIndexerConfig>))]
    public class LuceneIndexerConfig:SpiderManConfig
    {  

        [CategoryAttribute("Lucene Configuration"),
            DescriptionAttribute("The the index storage directory")]
        public string IndexPath { get; set; }
        /// <summary>
        /// true, always create a new index; false, append to the exist index
        /// </summary>
        [CategoryAttribute("Lucene Configuration"),
                DescriptionAttribute("true, always create a new index; false, append to the exist index")]
        public bool CreateIndex { get; set; }

        /// <summary>
        /// Analyzer in lucene
        /// </summary>
        [CategoryAttribute("Lucene Configuration"),
                DescriptionAttribute("Analyzer configuration")] 
        public AssemblyConfig Analyzer { get; set; }

        private int mergeFactor = 2;
        /// <summary>
        /// Lucene.Net.Index.IndexWriter.MergeFactor, default 2
        /// </summary>
        [CategoryAttribute("Lucene Configuration"),
                DescriptionAttribute("MergeFactor configuration, default 2, 1<=MergeFactor<=10")]
        public int MergeFactor
        {
            get { return mergeFactor; }
            set
            { 
                
                if (value >= 2 && value <= 10)mergeFactor = value; 
            }
        }

        /// <summary>
        /// Lucene.Net.Index.IndexWriter.MaxMergeDocs, 0 to ignored
        /// </summary>
        [CategoryAttribute("Lucene Configuration"),
            DescriptionAttribute("MaxMergeDocs configuration, 0 to ignored")]
        public int MaxMergeDocs { get; set; }

        /// <summary>
        /// Lucene.Net.Index.IndexWriter.MaxBufferedDocs, 0 to ignored
        /// </summary>
        [CategoryAttribute("Lucene Configuration"),
                DescriptionAttribute("MaxBufferedDocs configuration, 0 to ignored")]
        public int MaxBufferedDocs { get; set; }
        
        /// <summary>
        /// Indexed attributes
        /// </summary>
        public List<LuceneIndexerAttribute> InterestedAttributes { get; set; }

        public LuceneIndexerConfig()
        {
            InterestedAttributes = new List<LuceneIndexerAttribute>();
            CreateIndex = false;
            Analyzer = new AssemblyConfig(typeof(Lucene.Net.Analysis.Standard.StandardAnalyzer));
            MergeFactor = 2;
        }

        #region LuceneIndexerAttribute
        [TypeConverter(typeof(Util.ExpandableConverter<LuceneIndexerAttribute>))]
        public class LuceneIndexerAttribute : IndexerAttribute//, IXmlSerializable
        {
            private FieldStore store = FieldStore.YES;
            private FieldIndex index = FieldIndex.ANALYZED;
            private FieldTermVector termVector= FieldTermVector.YES;

            public FieldStore Store
            {
                get { return store; }
                set
                {
                    this.store = value;
                    LuceneFieldStore = storeDict[value];
                }
            }
            public FieldIndex Index
            {
                get { return index; }
                set
                {
                    this.index = value;
                    LuceneFieldIndex = indexDict[value];
                }
            }
            public FieldTermVector TermVector
            {
                get { return termVector; }
                set
                {
                    this.termVector = value;
                    LuceneFieldTermVector = termVectorDict[value];
                }
            }


            [XmlIgnore]
            [BrowsableAttribute(false)]
            public Field.Store LuceneFieldStore  { get;protected set; }
            [XmlIgnore]
            [BrowsableAttribute(false)]
            public Field.Index LuceneFieldIndex { get; protected set; }
            [XmlIgnore]
            [BrowsableAttribute(false)]
            public Field.TermVector LuceneFieldTermVector { get; protected set; }
            
            #region Static Serializable Helper
            protected static Dictionary<FieldStore, Field.Store> storeDict = new Dictionary<FieldStore, Field.Store>();
            protected static Dictionary<FieldIndex, Field.Index> indexDict = new Dictionary<FieldIndex, Field.Index>();
            protected static Dictionary<FieldTermVector, Field.TermVector> termVectorDict = new Dictionary<FieldTermVector, Field.TermVector>();

            static LuceneIndexerAttribute()
            {
                storeDict.Clear();
                storeDict.Add(FieldStore.YES, Field.Store.YES);
                storeDict.Add(FieldStore.NO, Field.Store.NO);
                storeDict.Add(FieldStore.COMPRESS, Field.Store.COMPRESS);

                indexDict.Clear();
                indexDict.Add(FieldIndex.ANALYZED, Field.Index.ANALYZED);
                indexDict.Add(FieldIndex.ANALYZED_NO_NORMS, Field.Index.ANALYZED_NO_NORMS);
                indexDict.Add(FieldIndex.NO, Field.Index.NO);
                indexDict.Add(FieldIndex.NOT_ANALYZED, Field.Index.NOT_ANALYZED);
                indexDict.Add(FieldIndex.NOT_ANALYZED_NO_NORMS, Field.Index.NOT_ANALYZED_NO_NORMS);

                indexDict.Add(FieldIndex.NO_NORMS, Field.Index.NOT_ANALYZED_NO_NORMS);
                indexDict.Add(FieldIndex.TOKENIZED, Field.Index.ANALYZED);
                indexDict.Add(FieldIndex.UN_TOKENIZED, Field.Index.NOT_ANALYZED);


                termVectorDict.Clear();
                termVectorDict.Add(FieldTermVector.YES, Field.TermVector.YES);
                termVectorDict.Add(FieldTermVector.NO, Field.TermVector.NO);
                termVectorDict.Add(FieldTermVector.WITH_OFFSETS, Field.TermVector.WITH_OFFSETS);
                termVectorDict.Add(FieldTermVector.WITH_POSITIONS, Field.TermVector.WITH_POSITIONS);
                termVectorDict.Add(FieldTermVector.WITH_POSITIONS_OFFSETS, Field.TermVector.WITH_POSITIONS_OFFSETS);

            }
            #endregion

            public LuceneIndexerAttribute()
                : base()
            {              
            }


            public LuceneIndexerAttribute(string name)
                : base(name)
            {              

            }

            public LuceneIndexerAttribute(string name, FieldStore store, FieldIndex index, FieldTermVector termVector)
                : base(name)
            {
                Store = store;
                Index = index;
                TermVector = termVector;

            }


            public Field GetField(string value)
            {
                Field field = new Field(ColumnName, value, LuceneFieldStore, LuceneFieldIndex, LuceneFieldTermVector);

                return field;
            }

            


       
        } 
        #endregion

        #region FieldStore
        /// <summary>
        /// Specifies whether and how a field should be stored.
        /// </summary> 
        [Serializable]
        public enum FieldStore
        {
            /// <summary>
            /// Store the original field value in the index in a compressed form. This is
            /// useful for long documents and for binary valued fields.
            /// </summary>
            COMPRESS,

            /// <summary>
            /// Do not store the field value in the index.
            /// </summary>
            NO,

            /// <summary>
            /// Store the original field value in the index. This is useful for short texts
            /// like a document's title which should be displayed with the results. The value
            /// is stored in its original form, i.e. no analyzer is used before it is stored.
            /// </summary>
            YES
        } 
        #endregion
        
        #region FieldIndex
        /// <summary>
        /// Specifies whether and how a field should be indexed.
        /// </summary> 
        [Serializable]
        public enum FieldIndex
        {
            /// <summary>
            /// Index the tokens produced by running the field's value through an Analyzer.
            /// This is useful for common text.
            /// </summary>
            ANALYZED,

            /// <summary>
            /// Expert: Index the tokens produced by running the field's value through an
            /// Analyzer, and also separately disable the storing of norms. See {@link #NOT_ANALYZED_NO_NORMS}
            /// for what norms are and why you may want to disable them.
            /// </summary>
            ANALYZED_NO_NORMS,

            /// <summary>
            /// Do not index the field value. This field can thus not be searched, but one
            /// can still access its contents provided it is {@link Field.Store stored}.
            /// </summary>    
            NO,

            /// <summary>
            /// Index the field's value without using an Analyzer, so it can be searched.
            /// As no analyzer is used the value will be stored as a single term. This is
            /// useful for unique Ids like product numbers.
            /// </summary>            
            NOT_ANALYZED,

            /// <summary>
            /// Expert: Index the field's value without an Analyzer, and also disable the
            /// storing of norms. Note that you can also separately enable/disable norms
            /// by calling {@link Field#setOmitNorms}. No norms means that index-time field
            /// and document boosting and field length normalization are disabled. The benefit
            /// is less memory usage as norms take up one byte of RAM per indexed field for
            /// every document in the index, during searching. Note that once you index a
            /// given field with norms enabled, disabling norms will have no effect. In other
            /// words, for this to have the above described effect on a field, all instances
            /// of that field must be indexed with NOT_ANALYZED_NO_NORMS from the beginning.
            /// </summary>
            NOT_ANALYZED_NO_NORMS,

            /// <summary>
            /// This has been renamed to NOT_ANALYZED_NO_NORMS
            /// </summary>
            NO_NORMS,  
          
            /// <summary>
            /// this has been renamed to ANALYZED
            /// </summary>
            TOKENIZED,

            /// <summary>
            /// This has been renamed to NOT_ANALYZED
            /// </summary>
            UN_TOKENIZED,
        }

       
        #endregion

        #region FieldTermVector
        /// <summary>
        /// Specifies whether and how a field should have term vectors.
        /// </summary>
        [Serializable]
        public enum FieldTermVector
        {
            /// <summary>
            /// Do not store term vectors.
            /// </summary>  
            NO,

            /// <summary>
            /// Store the term vector + Token offset information
            /// </summary>
            WITH_OFFSETS,

            /// <summary>
            /// Store the term vector + token position information
            /// </summary>
            WITH_POSITIONS,

            /// <summary>
            /// Store the term vector + Token position and offset information
            /// </summary>
            WITH_POSITIONS_OFFSETS,

            /// <summary>
            /// Store the term vectors of each document. A term vector is a list of the document's
            /// terms and their number of occurrences in that document.
            /// </summary>
            YES
        } 
        #endregion

    }
}
