/**
 * Copyright 2006 The Apache Software Foundation
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.ocean.main.fields;

import java.io.IOException;
import java.io.Reader;
import java.text.ParseException;
import java.util.Map;
import java.util.logging.Logger;

import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.builder.ReflectionToStringBuilder;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.ocean.ClusterException;
import org.apache.ocean.ClusterUtil;
import org.apache.ocean.analysis.SolrAnalyzer;
import org.apache.ocean.main.Index.IndexSnapshot;
import org.apache.ocean.main.fields.SchemaField.Attribute;
import org.apache.ocean.main.search.cache.FieldCacheKey.FieldIndex;

/**
 * Base class for all field types used by an index schema.
 * 
 */
public abstract class FieldType {
	public static final Logger log = Logger.getLogger(FieldType.class.getName());
	protected String typeName;
	public Boolean tokenized;
	protected Analyzer analyzer = new DefaultAnalyzer(256);
	protected Analyzer queryAnalyzer = analyzer;

	public String getName() {
		return typeName;
	}
  
	public abstract Class getType();
	
	public abstract FieldIndex createFieldIndex(String field, IndexSnapshot indexSnapshot) throws Exception;
	
	public static ULocale getLocale(Map<String, String> args) {
		String localeString = args.get("locale");
		if (StringUtils.isNotBlank(localeString)) {
			return new ULocale(localeString);
		} else {
			return new ULocale("en_US");
		}
	}

	public void setTypeName(String typeName) {
		this.typeName = typeName;
	}

	public String toString() {
		return ReflectionToStringBuilder.toString(this);
	}

	public boolean isTokenized() {
		return ClusterUtil.getBoolean(tokenized);
	}

	/**
	 * Used for adding a document when a field needs to be created from a type and
	 * a string.
	 * 
	 * <p>
	 * By default, the indexed value is the same as the stored value (taken from
	 * toInternal()). Having a different representation for external, internal,
	 * and indexed would present quite a few problems given the current Lucene
	 * architecture. An analyzer for adding docs would need to translate
	 * internal->indexed while an analyzer for querying would need to translate
	 * external-&gt;indexed.
	 * </p>
	 * <p>
	 * The only other alternative to having internal==indexed would be to have
	 * internal==external. In this case, toInternal should convert to the indexed
	 * representation, toExternal() should do nothing, and createField() should
	 * *not* call toInternal, but use the external value and set tokenized=true to
	 * get Lucene to convert to the internal(indexed) form.
	 * </p>
	 * 
	 * 
	 * @see #toInternal
	 */
	public Fieldable createField(SchemaField schemaField, String externalVal, float boost) throws ClusterException {
		String val;
		try {
			val = toInternal(externalVal);
		} catch (NumberFormatException numberFormatException) {
			throw new ClusterException("Error while creating field '" + schemaField + "' from value '" + externalVal + "'", numberFormatException);
		}
		if (val == null)
			return null;
		Field.TermVector termVector = Field.TermVector.NO;
		if (schemaField.has(Attribute.TERMPOSITIONS) && schemaField.has(Attribute.TERMOFFSETS))
			termVector = Field.TermVector.WITH_POSITIONS_OFFSETS;
		else if (schemaField.has(Attribute.TERMPOSITIONS))
			termVector = Field.TermVector.WITH_POSITIONS;
		else if (schemaField.has(Attribute.TERMOFFSETS))
			termVector = Field.TermVector.WITH_OFFSETS;
		else if (schemaField.has(Attribute.TERMVECTOR))
			termVector = Field.TermVector.YES;

		Field field = new Field(schemaField.getName(), val, schemaField.has(Attribute.STORED) ? Field.Store.YES : Field.Store.NO, schemaField.has(Attribute.INDEXED) ? (isTokenized() ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED) : Field.Index.NO, termVector);
		field.setOmitNorms(schemaField.has(Attribute.OMITNORMS));
		field.setBoost(boost);
		return field;
	}

	public abstract Object stringToObject(String string) throws ParseException;

	public byte[] createPayloadBuffer() {
		throw new UnsupportedOperationException("");
	}

	public byte[] objectToBytes(Object object) {
		throw new UnsupportedOperationException("");
	}

	public Object bytesToObject(byte[] bytes) {
		throw new UnsupportedOperationException("");
	}

	public String toInternal(String val) {
		return val;
	}

	public Object indexedToObject(String string) throws ParseException {
		String readable = indexedToReadable(string);
		return stringToObject(readable);
	}

	public abstract String objectToExternal(Object object);

	public abstract String objectToIndexed(Object object);

	public String toExternal(Fieldable fieldable) {
		return fieldable.stringValue();
	}

	public String indexedToReadable(String indexedForm) {
		return indexedForm;
	}

	public String storedToReadable(Fieldable fieldable) {
		return toExternal(fieldable);
	}

	public String storedToIndexed(Fieldable fieldable) {
		return fieldable.stringValue();
	}

	/**
	 * Default analyzer for types that only produce 1 verbatim token... A maximum
	 * size of chars to be read must be specified
	 */
	public class DefaultAnalyzer extends SolrAnalyzer {
		final int maxChars;

		DefaultAnalyzer(int maxChars) {
			this.maxChars = maxChars;
		}

		public TokenStream tokenStream(String fieldName, Reader reader) {
			return new Tokenizer(reader) {
				char[] chars = new char[maxChars];

				public Token next() throws IOException {
					int n = input.read(chars, 0, maxChars);
					if (n <= 0)
						return null;
					String string = toInternal(new String(chars, 0, n));
					Token token = new Token(string, 0, n);
					return token;
				};
			};
		}
	}

	public Analyzer getAnalyzer() {
		return analyzer;
	}

	public Analyzer getQueryAnalyzer() {
		return queryAnalyzer;
	}

	public void setAnalyzer(Analyzer analyzer) {
		this.analyzer = analyzer;
		log.finest("FieldType: " + typeName + ".setAnalyzer(" + analyzer.getClass().getName() + ")");
	}

	public void setQueryAnalyzer(Analyzer analyzer) {
		this.queryAnalyzer = analyzer;
		log.finest("FieldType: " + typeName + ".setQueryAnalyzer(" + analyzer.getClass().getName() + ")");
	}
}
