package cn.edu.thu.laud.lasql.processor;

import static org.apache.cassandra.cql.QueryProcessor.validateColumn;
import static org.apache.cassandra.thrift.ThriftValidation.validateColumnFamily;
import static org.apache.cassandra.thrift.ThriftValidation.validateCommutativeForWrite;

import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

import org.apache.cassandra.auth.Permission;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.ColumnDefinition;
import org.apache.cassandra.cql.Attributes;
import org.apache.cassandra.cql.LaUDTerm;
import org.apache.cassandra.cql.Operation;
import org.apache.cassandra.cql.QueryProcessor;
import org.apache.cassandra.cql.Term;
import org.apache.cassandra.cql.UpdateStatement;
import org.apache.cassandra.db.CounterMutation;
import org.apache.cassandra.db.IMutation;
import org.apache.cassandra.db.RowMutation;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.service.ClientState;
import org.apache.cassandra.thrift.InvalidRequestException;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import cn.edu.thu.laud.db.marshal.LaUDFileType;
import cn.edu.thu.laud.db.marshal.LaUDHblobType;
import cn.edu.thu.laud.db.marshal.LaUDLblobType;
import cn.edu.thu.laud.db.marshal.LaUDSblobType;
import cn.edu.thu.laud.jdbc.util.LaUDJdbcUtils;
import cn.edu.thu.laud.server.thrift.FileInfo;
import cn.edu.thu.laud.thrift.LaUDDataServer;
import cn.edu.thu.laud.thrift.service.LaUDClientState;
import cn.edu.thu.laud.thrift.service.ServiceFileInfo;
import cn.edu.thu.laud.thrift.service.ServiceFileInfo.FileStoreType;

public class LaUDUpdateStatement extends UpdateStatement {
	public static Logger  logger=LoggerFactory.getLogger(LaUDUpdateStatement.class);

	public LaUDUpdateStatement(String keyspace, String columnFamily,
			String keyName, Map<Term, Operation> columns, List<Term> keys,
			Attributes attrs) {
		super(keyspace, columnFamily, keyName, columns, keys, attrs);
		// TODO Auto-generated constructor stub
	}
	public LaUDUpdateStatement(String keyspace, String columnFamily,
			String keyName, List<Term> columnNames, List<Term> columnValues,
			List<Term> keys, Attributes attrs) {
		super(keyspace, columnFamily, keyName, columnNames, columnValues, keys, attrs);
		// TODO Auto-generated constructor stub
	}
	/**
	 * 
	 * @param keyspace
	 * @param clientState
	 * @return
	 * @throws InvalidRequestException
	 */
	public Map<String,FileInfo> getFileInfos(String keyspace, ClientState clientState) throws InvalidRequestException{
		CFMetaData metadata = validateColumnFamily(keyspace, columnFamily, false);
		if(this.getKeys().size()==0){
			throw new InvalidRequestException("must designed one row");
		}
		String keyString=this.getKeys().get(0).getText();
		FileInfo fileInfo;
		ServiceFileInfo sFileInfo;
		String fileLocation;
		LaUDClientState laUDClientState=(LaUDClientState) clientState;
		List<FileInfo> fileInfos= new ArrayList<FileInfo>();
		Map<String,FileInfo> list=new HashMap<String,FileInfo>();
		List<Term> fileTerms=new ArrayList<Term>();
		for(Term term :this.getColumns().keySet()){
			ColumnDefinition cDefinition=metadata.getColumnDefinition(term.getByteBuffer(metadata.comparator));

			if(cDefinition==null && !(metadata.getDefaultValidator() instanceof LaUDFileType)){
				continue;
			}
			if(cDefinition.getValidator() instanceof LaUDFileType ){

				if(this.getKeys().size()>1){
					throw new InvalidRequestException("in file transfer ,only one row can be signed.");
				}

				fileLocation="/usr/"+keyspace+"/"+metadata.cfName+"/"+term.getText()+"/"+keyString+"/";
				fileInfo=new FileInfo().setInfo(this.getColumns().get(term).a.getText());
				//we remove file column request from normal update command and we will save it after transfer file successfully,
				//this.getColumns().put(term,new Operation(new Term("", 7)));
				//
				fileTerms.add(term);

				list.put(term.getText(),fileInfo);
				
				//if this is a sblob ,we needn't put this command as a schedule in LaUDDataSerivce
				//					if(cDefinition.getValidator() instanceof  LaUDSblobType){
				//						fileInfo.setDirection((byte) LaUDJdbcUtils.WRITE_INTO_CASSANDRA);
				//						continue;
				//					}	
				fileInfo.setDirection((byte) LaUDJdbcUtils.WRITE_INTO);
				sFileInfo=new ServiceFileInfo();
				sFileInfo.setLocation(fileLocation);
				sFileInfo.setOverwrite(true);

				sFileInfo.keyName=this.keyName;
				//sFileInfo.key=this.getKeys().get(0);
				sFileInfo.keyBuffer=this.getKeys().get(0).getByteBuffer(metadata.getKeyValidator());
				sFileInfo.columnFamily=this.columnFamily;
				//sFileInfo.columnName=term;
				sFileInfo.columnNameBuffer=cDefinition.name;
				sFileInfo.setDirection(LaUDJdbcUtils.WRITE_INTO);
				Attributes attributes=new Attributes();
				attributes.setConsistencyLevel(this.cLevel);
				attributes.setTimestamp(this.timestamp);
				attributes.setTimeToLive(this.timeToLive);
				sFileInfo.attrs=attributes;
				//sFileInfo.columnValue=new Term(fileLocation,7);
				sFileInfo.columnValueBuffer=ByteBufferUtil.bytes(fileLocation);

				if(cDefinition.getValidator() instanceof LaUDHblobType){
					sFileInfo.setType(FileStoreType.HDFS);
				}else if(cDefinition.getValidator() instanceof LaUDLblobType){
					sFileInfo.setType(FileStoreType.C_HDFS);
				}else if(cDefinition.getValidator() instanceof  LaUDSblobType){
					sFileInfo.setType(FileStoreType.C_KV);
					sFileInfo.setDirection(LaUDJdbcUtils.WRITE_INTO_CASSANDRA);
					fileInfo.setDirection((byte) LaUDJdbcUtils.WRITE_INTO_CASSANDRA);
				}
				fileInfo.setCheckcode(laUDClientState.getCheckCode());
				laUDClientState.addFileDataSchedule(fileInfo.getCheckcode(), sFileInfo);
				fileInfos.add(fileInfo);
			}
		}
		//delete fileTerms from update command
		if(fileTerms.size()>0){
			for(Term term:fileTerms){
				this.getColumns().remove(term);
			}
		}
		if(fileInfos.size()>0)
			LaUDDataServer.authorizedUsers.put(laUDClientState.getSessionId(), laUDClientState);
		return list;
	}


	/**
	 * Compute a row mutation for a single key
	 * and exchange the function name and function value.
	 *@auther hxd 
	 *
	 *
	 * @param keyspace working keyspace
	 * @param key key to change
	 * @param metadata information about CF
	 * @param timestamp global timestamp to use for every key mutation
	 *
	 * @param clientState
	 * @return row mutation
	 *
	 * @throws InvalidRequestException on the wrong request
	 */
	protected IMutation mutationForKey(String keyspace, ByteBuffer key, CFMetaData metadata, Long timestamp, ClientState clientState) throws InvalidRequestException
	{
		AbstractType<?> comparator = getComparator(keyspace);

		// if true we need to wrap RowMutation into CounterMutation
		boolean hasCounterColumn = false;
		RowMutation rm = new RowMutation(keyspace, key);

		for (Map.Entry<Term, Operation> column : getColumns().entrySet())
		{
			ByteBuffer colName = column.getKey().getByteBuffer(comparator);
			Operation op = column.getValue();

			if (op.isUnary())
			{
				if (hasCounterColumn)
					throw new InvalidRequestException("Mix of commutative and non-commutative operations is not allowed.");

				ByteBuffer colValue = null;
				Term term=column.getValue().a;
				if(term instanceof LaUDTerm){
					//we should  check the validate .
					colValue=((LaUDTerm)term).getFunctionValue();      
					try{
						getValueValidator(keyspace, colName).validate(colValue);
					}catch (Exception e) {
						throw new InvalidRequestException().setWhy(e.getMessage());
					}
				}else{
					colValue=op.a.getByteBuffer(getValueValidator(keyspace, colName));
				}
				validateColumn(metadata, colName, colValue);
				rm.add(new QueryPath(columnFamily, null, colName),
						colValue,
						(timestamp == null) ? getTimestamp(clientState) : timestamp,
								getTimeToLive());
			}
			else
			{
				hasCounterColumn = true;

				if (!column.getKey().getText().equals(op.a.getText()))
					throw new InvalidRequestException("Only expressions like X = X + <long> are supported.");

				long value;

				try
				{
					value = Long.parseLong(op.b.getText());
				}
				catch (NumberFormatException e)
				{
					throw new InvalidRequestException(String.format("'%s' is an invalid value, should be a long.",
							op.b.getText()));
				}

				rm.addCounter(new QueryPath(columnFamily, null, colName), value);
			}
		}

		return (hasCounterColumn) ? new CounterMutation(rm, getConsistencyLevel()) : rm;
	}


	/** {@inheritDoc} */
	public List<IMutation> prepareRowMutations(String keyspace, ClientState clientState) throws InvalidRequestException
	{
		return prepareRowMutations(keyspace, clientState, null);
	}

	/** {@inheritDoc} */
	public List<IMutation> prepareRowMutations(String keyspace, ClientState clientState, Long timestamp) throws InvalidRequestException
	{
		List<String> cfamsSeen = new ArrayList<String>();

		boolean hasCommutativeOperation = false;

		for (Map.Entry<Term, Operation> column : getColumns().entrySet())
		{
			if (!column.getValue().isUnary())
				hasCommutativeOperation = true;

			if (hasCommutativeOperation && column.getValue().isUnary())
				throw new InvalidRequestException("Mix of commutative and non-commutative operations is not allowed.");
		}

		CFMetaData metadata = validateColumnFamily(keyspace, columnFamily, hasCommutativeOperation);
		if (hasCommutativeOperation)
			validateCommutativeForWrite(metadata, cLevel);

		QueryProcessor.validateKeyAlias(metadata, keyName);

		// Avoid unnecessary authorizations.
		if (!(cfamsSeen.contains(columnFamily)))
		{
			clientState.hasColumnFamilyAccess(columnFamily, Permission.WRITE);
			cfamsSeen.add(columnFamily);
		}

		List<IMutation> rowMutations = new LinkedList<IMutation>();

		List<Term> keys=super.getKeys();
		for (Term key: keys)
		{
			rowMutations.add(mutationForKey(keyspace, key.getByteBuffer(getKeyType(keyspace)), metadata, timestamp, clientState));
		}

		return rowMutations;
	}
}
