package org.apache.cassandra.cql;

import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;

import org.antlr.runtime.ANTLRStringStream;
import org.antlr.runtime.CharStream;
import org.antlr.runtime.CommonTokenStream;
import org.antlr.runtime.RecognitionException;
import org.antlr.runtime.TokenStream;
import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.cli.CliUtils;
import org.apache.cassandra.concurrent.Stage;
import org.apache.cassandra.concurrent.StageManager;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.config.ConfigurationException;
import org.apache.cassandra.config.KSMetaData;
import org.apache.cassandra.config.Schema;
import org.apache.cassandra.db.CounterColumn;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.IMutation;
import org.apache.cassandra.db.RangeSliceCommand;
import org.apache.cassandra.db.ReadCommand;
import org.apache.cassandra.db.Row;
import org.apache.cassandra.db.SliceByNamesReadCommand;
import org.apache.cassandra.db.SliceFromReadCommand;
import org.apache.cassandra.db.Table;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.db.marshal.AsciiType;
import org.apache.cassandra.db.marshal.MarshalException;
import org.apache.cassandra.db.marshal.TypeParser;
import org.apache.cassandra.db.migration.AddColumnFamily;
import org.apache.cassandra.db.migration.AddKeyspace;
import org.apache.cassandra.db.migration.DropColumnFamily;
import org.apache.cassandra.db.migration.DropKeyspace;
import org.apache.cassandra.db.migration.Migration;
import org.apache.cassandra.db.migration.UpdateColumnFamily;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.dht.Bounds;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.RandomPartitioner;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.thrift.CfDef;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.ColumnDef;
import org.apache.cassandra.thrift.ConsistencyLevel;
import org.apache.cassandra.thrift.CqlMetadata;
import org.apache.cassandra.thrift.CqlResult;
import org.apache.cassandra.thrift.CqlResultType;
import org.apache.cassandra.thrift.CqlRow;
import org.apache.cassandra.thrift.IndexClause;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexOperator;
import org.apache.cassandra.thrift.InvalidRequestException;
import org.apache.cassandra.thrift.KsDef;
import org.apache.cassandra.thrift.SchemaDisagreementException;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import org.apache.cassandra.thrift.ThriftValidation;
import org.apache.cassandra.thrift.TimedOutException;
import org.apache.cassandra.thrift.UnavailableException;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.cassandra.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import cn.edu.thu.laud.db.marshal.LaUDFileType;
import cn.edu.thu.laud.db.marshal.LaUDHblobType;
import cn.edu.thu.laud.db.marshal.LaUDLblobType;
import cn.edu.thu.laud.db.marshal.LaUDSblobType;
import cn.edu.thu.laud.jdbc.util.LaUDJdbcUtils;
import cn.edu.thu.laud.lasql.parser.LaSQLLexer;
import cn.edu.thu.laud.lasql.parser.LaSQLParser;
import cn.edu.thu.laud.lasql.processor.LaUDCreateColumnFamilyStatement;
import cn.edu.thu.laud.lasql.processor.LaUDCreateIndexStatement;
import cn.edu.thu.laud.lasql.processor.LaUDUpdateStatement;
import cn.edu.thu.laud.server.thrift.FileInfo;
import cn.edu.thu.laud.server.thrift.LaUDResult;
import cn.edu.thu.laud.thrift.LaUDDataServer;
import cn.edu.thu.laud.thrift.LaUDThriftValidation;
import cn.edu.thu.laud.thrift.service.LaUDClientState;
import cn.edu.thu.laud.thrift.service.ServiceFileInfo;
import cn.edu.thu.laud.thrift.service.ServiceFileInfo.FileStoreType;

import com.google.common.base.Predicates;
import com.google.common.collect.Maps;

public class LaUDQueryProcessor
{
	public static final String CQL_VERSION = "2.0.0";

	private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class);

	private static final long timeLimitForSchemaAgreement = 10 * 1000;

	public static final String DEFAULT_KEY_NAME = bufferToString(CFMetaData.DEFAULT_KEY_NAME);

	private static List<org.apache.cassandra.db.Row> getSlice(CFMetaData metadata, SelectStatement select)
			throws InvalidRequestException, TimedOutException, UnavailableException
			{
		QueryPath queryPath = new QueryPath(select.getColumnFamily());
		List<ReadCommand> commands = new ArrayList<ReadCommand>();

		// ...of a list of column names
		if (!select.isColumnRange())
		{
			Collection<ByteBuffer> columnNames = getColumnNames(select, metadata);
			validateColumnNames(columnNames);

			for (Term rawKey: select.getKeys())
			{
				ByteBuffer key = rawKey.getByteBuffer(metadata.getKeyValidator());

				validateKey(key);
				commands.add(new SliceByNamesReadCommand(metadata.ksName, key, queryPath, columnNames));
			}
		}
		// ...a range (slice) of column names
		else
		{
			AbstractType<?> comparator = select.getComparator(metadata.ksName);
			ByteBuffer start = select.getColumnStart().getByteBuffer(comparator);
			ByteBuffer finish = select.getColumnFinish().getByteBuffer(comparator);

			for (Term rawKey : select.getKeys())
			{
				ByteBuffer key = rawKey.getByteBuffer(metadata.getKeyValidator());

				validateKey(key);
				validateSliceRange(metadata, start, finish, select.isColumnsReversed());
				commands.add(new SliceFromReadCommand(metadata.ksName,
						key,
						queryPath,
						start,
						finish,
						select.isColumnsReversed(),
						select.getColumnsLimit()));
			}
		}

		try
		{
			return StorageProxy.read(commands, select.getConsistencyLevel());
		}
		catch (TimeoutException e)
		{
			throw new TimedOutException();
		}
		catch (IOException e)
		{
			throw new RuntimeException(e);
		}
			}

	private static List<ByteBuffer> getColumnNames(SelectStatement select, CFMetaData metadata) throws InvalidRequestException
	{
		String keyString = getKeyString(metadata);
		List<ByteBuffer> columnNames = new ArrayList<ByteBuffer>();
		for (Term column : select.getColumnNames())
		{
			// skip the key for the slice op; we'll add it to the resultset in extractThriftColumns
			if (!column.getText().equalsIgnoreCase(keyString))
				columnNames.add(column.getByteBuffer(metadata.comparator));
		}
		return columnNames;
	}

	private static List<org.apache.cassandra.db.Row> multiRangeSlice(CFMetaData metadata, SelectStatement select)
			throws TimedOutException, UnavailableException, InvalidRequestException
			{
		List<org.apache.cassandra.db.Row> rows;
		IPartitioner<?> p = StorageService.getPartitioner();

		AbstractType<?> keyType = Schema.instance.getCFMetaData(metadata.ksName, select.getColumnFamily()).getKeyValidator();

		ByteBuffer startKey = (select.getKeyStart() != null)
				? select.getKeyStart().getByteBuffer(keyType)
						: (new Term()).getByteBuffer();

				ByteBuffer finishKey = (select.getKeyFinish() != null)
						? select.getKeyFinish().getByteBuffer(keyType)
								: (new Term()).getByteBuffer();

						Token startToken = p.getToken(startKey), finishToken = p.getToken(finishKey);
						if (startToken.compareTo(finishToken) > 0 && !finishToken.equals(p.getMinimumToken()))
						{
							if (p instanceof RandomPartitioner)
								throw new InvalidRequestException("Start key's md5 sorts after end key's md5. This is not allowed; you probably should not specify end key at all, under RandomPartitioner");
							else
								throw new InvalidRequestException("Start key must sort before (or equal to) finish key in your partitioner!");
						}
						AbstractBounds bounds = new Bounds(startToken, finishToken);

						// XXX: Our use of Thrift structs internally makes me Sad. :(
						SlicePredicate thriftSlicePredicate = slicePredicateFromSelect(select, metadata);
						validateSlicePredicate(metadata, thriftSlicePredicate);

						int limit = select.isKeyRange() && select.getKeyStart() != null
								? select.getNumRecords() + 1
										: select.getNumRecords();

						try
						{
							rows = StorageProxy.getRangeSlice(new RangeSliceCommand(metadata.ksName,
									select.getColumnFamily(),
									null,
									thriftSlicePredicate,
									bounds,
									limit),
									select.getConsistencyLevel());
						}
						catch (IOException e)
						{
							throw new RuntimeException(e);
						}
						catch (org.apache.cassandra.thrift.UnavailableException e)
						{
							throw new UnavailableException();
						}
						catch (TimeoutException e)
						{
							throw new TimedOutException();
						}

						// if start key was set and relation was "greater than"
						if (select.getKeyStart() != null && !select.includeStartKey() && !rows.isEmpty())
						{
							if (rows.get(0).key.key.equals(startKey))
								rows.remove(0);
						}

						// if finish key was set and relation was "less than"
						if (select.getKeyFinish() != null && !select.includeFinishKey() && !rows.isEmpty())
						{
							int lastIndex = rows.size() - 1;
							if (rows.get(lastIndex).key.key.equals(finishKey))
								rows.remove(lastIndex);
						}

						return rows.subList(0, select.getNumRecords() < rows.size() ? select.getNumRecords() : rows.size());
			}

	private static List<org.apache.cassandra.db.Row> getIndexedSlices(CFMetaData metadata, SelectStatement select)
			throws TimedOutException, UnavailableException, InvalidRequestException
			{
		// XXX: Our use of Thrift structs internally (still) makes me Sad. :~(
		SlicePredicate thriftSlicePredicate = slicePredicateFromSelect(select, metadata);
		validateSlicePredicate(metadata, thriftSlicePredicate);

		List<IndexExpression> expressions = new ArrayList<IndexExpression>();
		for (Relation columnRelation : select.getColumnRelations())
		{
			// Left and right side of relational expression encoded according to comparator/validator.
			ByteBuffer entity = columnRelation.getEntity().getByteBuffer(metadata.comparator);
			ByteBuffer value = columnRelation.getValue().getByteBuffer(select.getValueValidator(metadata.ksName, entity));

			expressions.add(new IndexExpression(entity,
					IndexOperator.valueOf(columnRelation.operator().toString()),
					value));
		}

		AbstractType<?> keyType = Schema.instance.getCFMetaData(metadata.ksName, select.getColumnFamily()).getKeyValidator();
		ByteBuffer startKey = (!select.isKeyRange()) ? (new Term()).getByteBuffer() : select.getKeyStart().getByteBuffer(keyType);
		IndexClause thriftIndexClause = new IndexClause(expressions, startKey, select.getNumRecords());

		List<org.apache.cassandra.db.Row> rows;
		try
		{
			rows = StorageProxy.scan(metadata.ksName,
					select.getColumnFamily(),
					thriftIndexClause,
					thriftSlicePredicate,
					select.getConsistencyLevel());
		}
		catch (IOException e)
		{
			throw new RuntimeException(e);
		}
		catch (TimeoutException e)
		{
			throw new TimedOutException();
		}

		return rows;
			}

	private static List<Map<String,FileInfo>> batchUpdate(ClientState clientState, List<UpdateStatement> updateStatements, ConsistencyLevel consistency)
			throws InvalidRequestException, UnavailableException, TimedOutException
			{
		String globalKeyspace = clientState.getKeyspace();
		List<IMutation> rowMutations = new ArrayList<IMutation>();
		List<String> cfamsSeen = new ArrayList<String>();

		List<Map<String,FileInfo>> fileInfos=new ArrayList<Map<String,FileInfo>>();
		for (UpdateStatement update : updateStatements)
		{
			String keyspace = update.keyspace == null ? globalKeyspace : update.keyspace;

			// Avoid unnecessary authorizations.
			if (!(cfamsSeen.contains(update.getColumnFamily())))
			{
				clientState.hasColumnFamilyAccess(keyspace, update.getColumnFamily(), Permission.WRITE);
				cfamsSeen.add(update.getColumnFamily());
			}
			fileInfos.add(((LaUDUpdateStatement)update).getFileInfos(keyspace, clientState));
			rowMutations.addAll(update.prepareRowMutations(keyspace, clientState));
		}

		try
		{
			StorageProxy.mutate(rowMutations, consistency);
		}
		catch (org.apache.cassandra.thrift.UnavailableException e)
		{
			throw new UnavailableException();
		}
		catch (TimeoutException e)
		{
			throw new TimedOutException();
		}
		return fileInfos;
			}

	private static SlicePredicate slicePredicateFromSelect(SelectStatement select, CFMetaData metadata)
			throws InvalidRequestException
			{
		SlicePredicate thriftSlicePredicate = new SlicePredicate();

		if (select.isColumnRange() || select.getColumnNames().size() == 0)
		{
			SliceRange sliceRange = new SliceRange();
			sliceRange.start = select.getColumnStart().getByteBuffer(metadata.comparator);
			sliceRange.finish = select.getColumnFinish().getByteBuffer(metadata.comparator);
			sliceRange.reversed = select.isColumnsReversed();
			sliceRange.count = select.getColumnsLimit();
			thriftSlicePredicate.slice_range = sliceRange;
		}
		else
		{
			thriftSlicePredicate.column_names = getColumnNames(select, metadata);
		}

		return thriftSlicePredicate;
			}

	/* Test for SELECT-specific taboos */
	private static void validateSelect(String keyspace, SelectStatement select) throws InvalidRequestException
	{
		// Finish key w/o start key (KEY < foo)
		if (!select.isKeyRange() && (select.getKeyFinish() != null))
			throw new InvalidRequestException("Key range clauses must include a start key (i.e. KEY > term)");

		// Key range and by-key(s) combined (KEY > foo AND KEY = bar)
		if (select.isKeyRange() && select.getKeys().size() > 0)
			throw new InvalidRequestException("You cannot combine key range and by-key clauses in a SELECT");

		// Start and finish keys, *and* column relations (KEY > foo AND KEY < bar and name1 = value1).
		if (select.isKeyRange() && (select.getKeyFinish() != null) && (select.getColumnRelations().size() > 0))
			throw new InvalidRequestException("You cannot combine key range and by-column clauses in a SELECT");

		// Can't use more than one KEY =
		if (!select.isMultiKey() && select.getKeys().size() > 1)
			throw new InvalidRequestException("You cannot use more than one KEY = in a SELECT");

		if (select.getColumnRelations().size() > 0)
		{
			AbstractType<?> comparator = select.getComparator(keyspace);
			Set<ByteBuffer> indexed = Table.open(keyspace).getColumnFamilyStore(select.getColumnFamily()).indexManager.getIndexedColumns();
			for (Relation relation : select.getColumnRelations())
			{
				if ((relation.operator() == RelationType.EQ) && indexed.contains(relation.getEntity().getByteBuffer(comparator)))
					return;
			}
			throw new InvalidRequestException("No indexed columns present in by-columns clause with \"equals\" operator");
		}
	}

	// Copypasta from o.a.c.thrift.CassandraDaemon
	private static void applyMigrationOnStage(final Migration m) throws SchemaDisagreementException, InvalidRequestException
	{
		Future<?> f = StageManager.getStage(Stage.MIGRATION).submit(new Callable<Object>()
				{
			public Object call() throws Exception
			{
				m.apply();
				m.announce();
				return null;
			}
				});
		try
		{
			f.get();
		}
		catch (InterruptedException e)
		{
			throw new RuntimeException(e);
		}
		catch (ExecutionException e)
		{
			// this means call() threw an exception. deal with it directly.
			if (e.getCause() != null)
			{
				InvalidRequestException ex = new InvalidRequestException(e.getCause().getMessage());
				ex.initCause(e.getCause());
				throw ex;
			}
			else
			{
				InvalidRequestException ex = new InvalidRequestException(e.getMessage());
				ex.initCause(e);
				throw ex;
			}
		}

		validateSchemaIsSettled();
	}

	public static void validateKey(ByteBuffer key) throws InvalidRequestException
	{
		if (key == null || key.remaining() == 0)
		{
			throw new InvalidRequestException("Key may not be empty");
		}

		// check that key can be handled by FBUtilities.writeShortByteArray
		if (key.remaining() > FBUtilities.MAX_UNSIGNED_SHORT)
		{
			throw new InvalidRequestException("Key length of " + key.remaining() +
					" is longer than maximum of " + FBUtilities.MAX_UNSIGNED_SHORT);
		}
	}

	public static void validateKeyAlias(CFMetaData cfm, String key) throws InvalidRequestException
	{
		assert key.toUpperCase().equals(key); // should always be uppercased by caller
		String realKeyAlias = bufferToString(cfm.getKeyName()).toUpperCase();
		if (!realKeyAlias.equals(key))
			throw new InvalidRequestException(String.format("Expected key '%s' to be present in WHERE clause for '%s'", realKeyAlias, cfm.cfName));
	}

	private static void validateColumnNames(Iterable<ByteBuffer> columns)
			throws InvalidRequestException
			{
		for (ByteBuffer name : columns)
		{
			if (name.remaining() > IColumn.MAX_NAME_LENGTH)
				throw new InvalidRequestException(String.format("column name is too long (%s > %s)",
						name.remaining(),
						IColumn.MAX_NAME_LENGTH));
			if (name.remaining() == 0)
				throw new InvalidRequestException("zero-length column name");
		}
			}

	public static void validateColumnName(ByteBuffer column)
			throws InvalidRequestException
			{
		validateColumnNames(Arrays.asList(column));
			}

	public static void validateColumn(CFMetaData metadata, ByteBuffer name, ByteBuffer value)
			throws InvalidRequestException
			{
		validateColumnName(name);
		AbstractType<?> validator = metadata.getValueValidator(name);

		try
		{
			if (validator != null)
				validator.validate(value);
		}
		catch (MarshalException me)
		{
			throw new InvalidRequestException(String.format("Invalid column value for column (name=%s); %s",
					ByteBufferUtil.bytesToHex(name),
					me.getMessage()));
		}
			}

	private static void validateSlicePredicate(CFMetaData metadata, SlicePredicate predicate)
			throws InvalidRequestException
			{
		if (predicate.slice_range != null)
			validateSliceRange(metadata, predicate.slice_range);
		else
			validateColumnNames(predicate.column_names);
			}

	private static void validateSliceRange(CFMetaData metadata, SliceRange range)
			throws InvalidRequestException
			{
		validateSliceRange(metadata, range.start, range.finish, range.reversed);
			}

	private static void validateSliceRange(CFMetaData metadata, ByteBuffer start, ByteBuffer finish, boolean reversed)
			throws InvalidRequestException
			{
		AbstractType<?> comparator = metadata.getComparatorFor(null);
		Comparator<ByteBuffer> orderedComparator = reversed ? comparator.reverseComparator: comparator;
		if (start.remaining() > 0 && finish.remaining() > 0 && orderedComparator.compare(start, finish) > 0)
			throw new InvalidRequestException("range finish must come after start in traversal order");
			}

	// Copypasta from CassandraServer (where it is private).
	private static void validateSchemaAgreement() throws SchemaDisagreementException
	{
		if (describeSchemaVersions().size() > 1)
			throw new SchemaDisagreementException();
	}

	private static Map<String, List<String>> describeSchemaVersions()
	{
		// unreachable hosts don't count towards disagreement
		return Maps.filterKeys(StorageProxy.describeSchemaVersions(),
				Predicates.not(Predicates.equalTo(StorageProxy.UNREACHABLE)));
	}

	//XXX add by hxd
	public static LaUDResult processStatement(CQLStatement statement,ClientState clientState) throws RecognitionException, UnavailableException, InvalidRequestException, TimedOutException, SchemaDisagreementException{
		String keyspace = null;
		//LaUDClientState laUDClientState=(LaUDClientState) clientState;
		//TODO clear the fileDataSchedule.
		// Some statements won't have (or don't need) a keyspace (think USE, or CREATE).
		if (statement.type != StatementType.SELECT && StatementType.requiresKeyspace.contains(statement.type))
			keyspace = clientState.getKeyspace();

		CqlResult result = new CqlResult();
		LaUDResult laUDResult=new LaUDResult().setType(LaUDJdbcUtils.TYPE_CASSANDRA);
		laUDResult.setCqlResult(result);
		logger.debug("CQL statement type: {}", statement.type.toString());
		CFMetaData metadata;
		switch (statement.type)
		{
		case SELECT:
			SelectStatement select = (SelectStatement)statement.statement;

			final String oldKeyspace = clientState.getRawKeyspace();

			if (select.isSetKeyspace())
			{
				keyspace = CliUtils.unescapeSQLString(select.getKeyspace());
				ThriftValidation.validateTable(keyspace);
			}
			else if (oldKeyspace == null)
				throw new InvalidRequestException("no keyspace has been specified");
			else
				keyspace = oldKeyspace;

			clientState.hasColumnFamilyAccess(keyspace, select.getColumnFamily(), Permission.READ);
			metadata = validateColumnFamily(keyspace, select.getColumnFamily());

			// need to do this in here because we need a CFMD.getKeyName()
			select.extractKeyAliasFromColumns(metadata);

			if (select.getKeys().size() > 0)
				validateKeyAlias(metadata, select.getKeyAlias());

			validateSelect(keyspace, select);

			List<org.apache.cassandra.db.Row> rows;

			// By-key
			if (!select.isKeyRange() && (select.getKeys().size() > 0))
			{
				rows = getSlice(metadata, select);
			}
			else
			{
				// Range query
				if ((select.getKeyFinish() != null) || (select.getColumnRelations().size() == 0))
				{
					rows = multiRangeSlice(metadata, select);
				}
				// Index scan
				else
				{
					rows = getIndexedSlices(metadata, select);
				}
			}

			// count resultset is a single column named "count"
			result.type = CqlResultType.ROWS;
			if (select.isCountOperation())
			{
				validateCountOperation(select);

				ByteBuffer countBytes = ByteBufferUtil.bytes("count");
				result.schema = new CqlMetadata(Collections.<ByteBuffer, String>emptyMap(),
						Collections.<ByteBuffer, String>emptyMap(),
						"AsciiType",
						"LongType");
				List<Column> columns = Collections.singletonList(new Column(countBytes).setValue(ByteBufferUtil.bytes((long) rows.size())));
				result.rows = Collections.singletonList(new CqlRow(countBytes, columns));
				return laUDResult;
			}

			// otherwise create resultset from query results
			result.schema = new CqlMetadata(new HashMap<ByteBuffer, String>(),
					new HashMap<ByteBuffer, String>(),
					TypeParser.getShortName(metadata.comparator),
					TypeParser.getShortName(metadata.getDefaultValidator()));
			List<CqlRow> cqlRows = new ArrayList<CqlRow>();
			List<Map<String,FileInfo>> fileInfos=new ArrayList<Map<String,FileInfo>>();

			LaUDClientState laUDClientState=(LaUDClientState)clientState;

			Map<String,FileInfo> fileMap=new HashMap<String,FileInfo>();
			boolean haveFile=false;
			for (org.apache.cassandra.db.Row row : rows)
			{

				List<Column> thriftColumns = new ArrayList<Column>();
				if (select.isColumnRange())
				{
					if (select.isWildcard())
					{
						// prepend key
						thriftColumns.add(new Column(metadata.getKeyName()).setValue(row.key.key).setTimestamp(-1));
						result.schema.name_types.put(metadata.getKeyName(), TypeParser.getShortName(AsciiType.instance));
						result.schema.value_types.put(metadata.getKeyName(), TypeParser.getShortName(metadata.getKeyValidator()));
					}

					// preserve comparator order
					if (row.cf != null)
					{
						for (IColumn c : row.cf.getSortedColumns())
						{
							if (c.isMarkedForDelete())
								continue;

							ColumnDefinition cd = metadata.getColumnDefinition(c.name());
							if (cd != null){
								result.schema.value_types.put(c.name(), TypeParser.getShortName(cd.getValidator()));
								if(cd.getValidator() instanceof LaUDFileType){
									//getFileInfos(c, cd, term, laUDClientState, fileInfos);

								}
							}

							thriftColumns.add(thriftify(c));
						}
					}
				}
				else
				{
					String keyString = getKeyString(metadata);

					// order columns in the order they were asked for
					for (Term term : select.getColumnNames())
					{
						if (term.getText().equalsIgnoreCase(keyString))
						{
							// preserve case of key as it was requested
							ByteBuffer requestedKey = ByteBufferUtil.bytes(term.getText());
							thriftColumns.add(new Column(requestedKey).setValue(row.key.key).setTimestamp(-1));
							result.schema.name_types.put(requestedKey, TypeParser.getShortName(AsciiType.instance));
							result.schema.value_types.put(requestedKey, TypeParser.getShortName(metadata.getKeyValidator()));
							continue;
						}

						if (row.cf == null)
							continue;

						ByteBuffer name;
						try
						{
							name = term.getByteBuffer(metadata.comparator);
						}
						catch (InvalidRequestException e)
						{
							throw new AssertionError(e);
						}

						ColumnDefinition cd = metadata.getColumnDefinition(name);
						//XXX modify by hxd
						//                            if (cd != null)
						//                                result.schema.value_types.put(name, TypeParser.getShortName(cd.getValidator()));
						if (cd != null){
							result.schema.value_types.put(name, TypeParser.getShortName(cd.getValidator()));

						}
						//XXX
						IColumn c = row.cf.getColumn(name);
						if (c == null || c.isMarkedForDelete())
							thriftColumns.add(new Column().setName(name));
						else{
							if(cd!=null&&cd.getValidator() instanceof LaUDFileType){
								//if(!c.value().equals(ByteBufferUtil.EMPTY_BYTE_BUFFER));
								if(!(cd.getValidator() instanceof LaUDSblobType) )
									getFileInfos(c, cd, term, laUDClientState, fileMap,row);
							}
							//else{
							thriftColumns.add(thriftify(c));
							//}

						}
					}
				}

				// Create a new row, add the columns to it, and then add it to the list of rows
				CqlRow cqlRow = new CqlRow();
				cqlRow.key = row.key.key;
				cqlRow.columns = thriftColumns;
				if (select.isColumnsReversed())
					Collections.reverse(cqlRow.columns);
				cqlRows.add(cqlRow);
				fileInfos.add(fileMap);
				if(fileMap.size()>0){
					haveFile=true;
				}
				fileMap=new HashMap<String, FileInfo>();

			}
			if(haveFile){
				LaUDDataServer.authorizedUsers.put(laUDClientState.getSessionId(), laUDClientState);	
				laUDResult.setFileInfos(fileInfos);
				laUDResult.setType(LaUDJdbcUtils.TYPE_MIX);
			}
			result.rows = cqlRows;
			return laUDResult;

		case INSERT: // insert uses UpdateStatement
		case UPDATE:
			UpdateStatement update = (LaUDUpdateStatement)statement.statement;

			List<Map<String,FileInfo>> files=  LaUDQueryProcessor.batchUpdate(clientState, Collections.singletonList(update), update.getConsistencyLevel());
			if(files.size()>0){
				laUDResult.setType(LaUDJdbcUtils.TYPE_FILE);
				laUDResult.setFileInfos(files);
			}
			result.type = CqlResultType.VOID;
			return laUDResult;

		case BATCH:
			BatchStatement batch = (BatchStatement) statement.statement;

			if (batch.getTimeToLive() != 0)
				throw new InvalidRequestException("Global TTL on the BATCH statement is not supported.");

			for (AbstractModification up : batch.getStatements())
			{
				if (up.isSetConsistencyLevel())
					throw new InvalidRequestException(
							"Consistency level must be set on the BATCH, not individual statements");

				if (batch.isSetTimestamp() && up.isSetTimestamp())
					throw new InvalidRequestException(
							"Timestamp must be set either on BATCH or individual statements");
			}

			try
			{
				StorageProxy.mutate(batch.getMutations(keyspace, clientState), batch.getConsistencyLevel());
			}
			catch (org.apache.cassandra.thrift.UnavailableException e)
			{
				throw new UnavailableException();
			}
			catch (TimeoutException e)
			{
				throw new TimedOutException();
			}

			result.type = CqlResultType.VOID;
			return laUDResult;

		case USE:
			clientState.setKeyspace(CliUtils.unescapeSQLString((String) statement.statement));
			result.type = CqlResultType.VOID;

			return laUDResult;

		case TRUNCATE:
			Pair<String, String> columnFamily = (Pair<String, String>)statement.statement;
			keyspace = columnFamily.left == null ? clientState.getKeyspace() : columnFamily.left;

			validateColumnFamily(keyspace, columnFamily.right);
			clientState.hasColumnFamilyAccess(keyspace, columnFamily.right, Permission.WRITE);

			try
			{
				StorageProxy.truncateBlocking(keyspace, columnFamily.right);
			}
			catch (TimeoutException e)
			{
				throw (UnavailableException) new UnavailableException().initCause(e);
			}
			catch (IOException e)
			{
				throw (UnavailableException) new UnavailableException().initCause(e);
			}

			result.type = CqlResultType.VOID;
			return laUDResult;

		case DELETE:
			DeleteStatement delete = (DeleteStatement)statement.statement;

			keyspace = delete.keyspace == null ? clientState.getKeyspace() : delete.keyspace;

			try
			{
				StorageProxy.mutate(delete.prepareRowMutations(keyspace, clientState), delete.getConsistencyLevel());
			}
			catch (TimeoutException e)
			{
				throw new TimedOutException();
			}

			result.type = CqlResultType.VOID;
			return laUDResult;

		case CREATE_KEYSPACE:
			CreateKeyspaceStatement create = (CreateKeyspaceStatement)statement.statement;
			create.validate();
			clientState.hasKeyspaceSchemaAccess(Permission.WRITE);
			validateSchemaAgreement();

			try
			{
				KsDef ksd = new KsDef(create.getName(),
						create.getStrategyClass(),
						Collections.<CfDef>emptyList())
				.setStrategy_options(create.getStrategyOptions());
				ThriftValidation.validateKsDef(ksd);
				ThriftValidation.validateKeyspaceNotYetExisting(create.getName());
				applyMigrationOnStage(new AddKeyspace(KSMetaData.fromThrift(ksd)));
			}
			catch (ConfigurationException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.getMessage());
				ex.initCause(e);
				throw ex;
			}
			catch (IOException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.getMessage());
				ex.initCause(e);
				throw ex;
			}

			result.type = CqlResultType.VOID;
			return laUDResult;

		case CREATE_COLUMNFAMILY:
			LaUDCreateColumnFamilyStatement createCf = (LaUDCreateColumnFamilyStatement)statement.statement;
			clientState.hasColumnFamilySchemaAccess(Permission.WRITE);
			validateSchemaAgreement();
			CFMetaData cfmd = createCf.getCFMetaData(keyspace);
			ThriftValidation.validateCfDef(cfmd.toThrift(), null);

			try
			{
				applyMigrationOnStage(new AddColumnFamily(cfmd));
			}
			catch (ConfigurationException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.toString());
				ex.initCause(e);
				throw ex;
			}
			catch (IOException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.toString());
				ex.initCause(e);
				throw ex;
			}

			result.type = CqlResultType.VOID;
			return laUDResult;

		case CREATE_INDEX:
			LaUDCreateIndexStatement createIdx = (LaUDCreateIndexStatement)statement.statement;
			clientState.hasColumnFamilySchemaAccess(Permission.WRITE);
			validateSchemaAgreement();
			CFMetaData oldCfm = Schema.instance.getCFMetaData(keyspace, createIdx.getColumnFamily());
			if (oldCfm == null)
				throw new InvalidRequestException("No such column family: " + createIdx.getColumnFamily());

			boolean columnExists = false;
			ByteBuffer columnName = createIdx.getColumnName().getByteBuffer();
			// mutating oldCfm directly would be bad, but mutating a Thrift copy is fine.  This also
			// sets us up to use validateCfDef to check for index name collisions.
			CfDef cf_def = oldCfm.toThrift();
			for (ColumnDef cd : cf_def.column_metadata)
			{
				if (cd.name.equals(columnName))
				{
					if (cd.index_type != null)
						throw new InvalidRequestException("Index already exists");
					logger.debug("Updating column {} definition for index {}", oldCfm.comparator.getString(columnName), createIdx.getIndexName());
					//XXX  modify by hxd.
					//cd.setIndex_type(IndexType.KEYS);
					cd.setIndex_type(createIdx.getIndexType());
					cd.setIndex_options(createIdx.getIndex_options());
					//XXX
					cd.setIndex_name(createIdx.getIndexName());
					columnExists = true;
					break;
				}
			}
			if (!columnExists)
				throw new InvalidRequestException("No column definition found for column " + oldCfm.comparator.getString(columnName));

			CFMetaData.addDefaultIndexNames(cf_def);
			LaUDThriftValidation.validateCfDef(cf_def, oldCfm);
			try
			{
				org.apache.cassandra.db.migration.avro.CfDef result1;
				try
				{
					result1 = CFMetaData.fromThrift(cf_def).toAvro();
				}
				catch (Exception e)
				{
					throw new RuntimeException(e);
				}
				applyMigrationOnStage(new UpdateColumnFamily(result1));
			}
			catch (ConfigurationException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.toString());
				ex.initCause(e);
				throw ex;
			}
			catch (IOException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.toString());
				ex.initCause(e);
				throw ex;
			}

			result.type = CqlResultType.VOID;
			return laUDResult;

		case DROP_INDEX:
			DropIndexStatement dropIdx = (DropIndexStatement)statement.statement;
			clientState.hasColumnFamilySchemaAccess(Permission.WRITE);
			validateSchemaAgreement();

			try
			{
				applyMigrationOnStage(dropIdx.generateMutation(clientState.getKeyspace()));
			}
			catch (ConfigurationException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.toString());
				ex.initCause(e);
				throw ex;
			}
			catch (IOException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.toString());
				ex.initCause(e);
				throw ex;
			}

			result.type = CqlResultType.VOID;
			return laUDResult;

		case DROP_KEYSPACE:
			String deleteKeyspace = (String)statement.statement;
			clientState.hasKeyspaceSchemaAccess(Permission.WRITE);
			validateSchemaAgreement();

			try
			{
				applyMigrationOnStage(new DropKeyspace(deleteKeyspace));
			}
			catch (ConfigurationException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.getMessage());
				ex.initCause(e);
				throw ex;
			}
			catch (IOException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.getMessage());
				ex.initCause(e);
				throw ex;
			}

			result.type = CqlResultType.VOID;
			return laUDResult;

		case DROP_COLUMNFAMILY:
			String deleteColumnFamily = (String)statement.statement;
			clientState.hasColumnFamilySchemaAccess(Permission.WRITE);
			validateSchemaAgreement();

			try
			{
				applyMigrationOnStage(new DropColumnFamily(keyspace, deleteColumnFamily));
			}
			catch (ConfigurationException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.getMessage());
				ex.initCause(e);
				throw ex;
			}
			catch (IOException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.getMessage());
				ex.initCause(e);
				throw ex;
			}

			result.type = CqlResultType.VOID;
			return laUDResult;

		case ALTER_TABLE:
			AlterTableStatement alterTable = (AlterTableStatement) statement.statement;

			validateColumnFamily(keyspace, alterTable.columnFamily);
			clientState.hasColumnFamilyAccess(alterTable.columnFamily, Permission.WRITE);
			validateSchemaAgreement();

			try
			{
				applyMigrationOnStage(new UpdateColumnFamily(alterTable.getCfDef(keyspace)));
			}
			catch (ConfigurationException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.getMessage());
				ex.initCause(e);
				throw ex;
			}
			catch (IOException e)
			{
				InvalidRequestException ex = new InvalidRequestException(e.getMessage());
				ex.initCause(e);
				throw ex;
			}

			result.type = CqlResultType.VOID;
			return laUDResult;
		}

		return null;    // We should never get here.
	}
	public static LaUDResult process(String queryString, ClientState clientState)
			throws RecognitionException, UnavailableException, InvalidRequestException, TimedOutException, SchemaDisagreementException
			{
		logger.trace("CQL QUERY: {}", queryString);

		CQLStatement statement = getStatement(queryString);

		//XXX modify by hxd
		return processStatement(statement, clientState);
		//XXX

			}

	private static Column thriftify(IColumn c)
	{
		ByteBuffer value = (c instanceof CounterColumn)
				? ByteBufferUtil.bytes(CounterContext.instance().total(c.value()))
						: c.value();
				return new Column(c.name()).setValue(value).setTimestamp(c.timestamp());
	}

	private static String getKeyString(CFMetaData metadata)
	{
		String keyString;
		try
		{
			keyString = ByteBufferUtil.string(metadata.getKeyName());
		}
		catch (CharacterCodingException e)
		{
			throw new AssertionError(e);
		}
		return keyString;
	}

	private static CQLStatement getStatement(String queryStr) throws InvalidRequestException, RecognitionException
	{
		//XXX modify by hxd
		// Lexer and parser
		//        CharStream stream = new ANTLRStringStream(queryStr);
		//        CqlLexer lexer = new CqlLexer(stream);
		//        TokenStream tokenStream = new CommonTokenStream(lexer);
		//        CqlParser parser = new CqlParser(tokenStream);
		//        
		//        // Parse the query string to a statement instance
		//        CQLStatement statement = parser.query();
		//        
		//        // The lexer and parser queue up any errors they may have encountered
		//        // along the way, if necessary, we turn them into exceptions here.
		//        lexer.throwLastRecognitionError();
		//        parser.throwLastRecognitionError();

		//XXX modify by hxd
		CharStream stream = new ANTLRStringStream(queryStr);
		LaSQLLexer lexer = new LaSQLLexer(stream);
		TokenStream tokenStream = new CommonTokenStream(lexer);
		LaSQLParser parser = new LaSQLParser(tokenStream);

		// Parse the query string to a statement instance
		CQLStatement statement = parser.query();

		// The lexer and parser queue up any errors they may have encountered
		// along the way, if necessary, we turn them into exceptions here.
		lexer.throwLastRecognitionError();
		parser.throwLastRecognitionError();
		//XXX
		return statement;
	}

	private static void validateSchemaIsSettled() throws SchemaDisagreementException
	{
		long limit = System.currentTimeMillis() + timeLimitForSchemaAgreement;

		outer:
			while (limit - System.currentTimeMillis() >= 0)
			{
				String currentVersionId = Schema.instance.getVersion().toString();
				for (String version : describeSchemaVersions().keySet())
				{
					if (!version.equals(currentVersionId))
						continue outer;
				}

				// schemas agree
				return;
			}

		throw new SchemaDisagreementException();
	}

	private static void validateCountOperation(SelectStatement select) throws InvalidRequestException
	{
		if (select.isWildcard())
			return; // valid count(*)

		if (!select.isColumnRange())
		{
			List<Term> columnNames = select.getColumnNames();
			String firstColumn = columnNames.get(0).getText();

			if (columnNames.size() == 1 && (firstColumn.equals("*") || firstColumn.equals("1")))
				return; // valid count(*) || count(1)
		}

		throw new InvalidRequestException("Only COUNT(*) and COUNT(1) operations are currently supported.");
	}

	private static String bufferToString(ByteBuffer string)
	{
		try
		{
			return ByteBufferUtil.string(string);
		}
		catch (CharacterCodingException e)
		{
			throw new RuntimeException(e.getMessage(), e);
		}
	}
	/**
	 * @author hxd
	 * @param c
	 * @param cd
	 * @param term colum name
	 * @param laUDClientState
	 * @param fileInfos
	 * @param row 
	 */
	private static void getFileInfos(IColumn c,ColumnDefinition cd,Term term,LaUDClientState laUDClientState,Map<String,FileInfo>fileInfos, Row row){
		FileInfo fileInfo=new FileInfo();

		String fileLocation=cd.getValidator().getString(c.value());
		fileInfo=new FileInfo();
		ServiceFileInfo sFileInfo=new ServiceFileInfo();


		fileInfo.setDirection((byte) LaUDJdbcUtils.READ_FROM);
		sFileInfo.setDirection(LaUDJdbcUtils.READ_FROM);

		sFileInfo.setLocation(fileLocation);
		sFileInfo.setOverwrite(false);
		if(cd.getValidator() instanceof LaUDHblobType){
			sFileInfo.setType(FileStoreType.HDFS);
		}else if(cd.getValidator() instanceof LaUDLblobType){
			sFileInfo.setType(FileStoreType.C_HDFS);
		}else if(cd.getValidator() instanceof LaUDSblobType) {
			fileInfo.setDirection((byte) LaUDJdbcUtils.READ_FROM_CASSANDRA);
			sFileInfo.setDirection(LaUDJdbcUtils.READ_FROM_CASSANDRA);
			sFileInfo.setType(FileStoreType.C_KV);
			sFileInfo.columnFamily=row.cf.metadata().cfName;
			sFileInfo.keyspace=row.cf.metadata().ksName;
			sFileInfo.keyBuffer=row.key.key;
			sFileInfo.columnNameBuffer=cd.name;
		}
		if(c.value().equals(ByteBufferUtil.EMPTY_BYTE_BUFFER)){
			fileInfo.setDirection((byte) LaUDJdbcUtils.OTHERS);
			sFileInfo.setDirection(LaUDJdbcUtils.OTHERS);
		}
		fileInfo.setCheckcode(laUDClientState.getCheckCode());
		laUDClientState.addFileDataSchedule(fileInfo.getCheckcode(), sFileInfo);
		fileInfos.put(term.getText(),fileInfo);
	}
}
