/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package cn.edu.thu.laud.objectstore;

import java.lang.reflect.Array;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;

import org.apache.cassandra.thrift.Cassandra;
import org.apache.cassandra.thrift.Cassandra.Client;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.datanucleus.ClassLoaderResolver;
import org.datanucleus.ObjectManager;
import org.datanucleus.api.ApiAdapter;
import org.datanucleus.exceptions.NucleusDataStoreException;
import org.datanucleus.exceptions.NucleusException;
import org.datanucleus.exceptions.NucleusObjectNotFoundException;
import org.datanucleus.metadata.AbstractClassMetaData;
import org.datanucleus.metadata.AbstractMemberMetaData;
import org.datanucleus.metadata.DiscriminatorMetaData;
import org.datanucleus.store.AbstractPersistenceHandler2;
import org.datanucleus.store.ExecutionContext;
import org.datanucleus.store.ObjectProvider;
import cn.edu.thu.laud.utils.serde.ConverterContext;
import cn.edu.thu.laud.objectstore.fieldmanager.FetchFieldManager;
import cn.edu.thu.laud.objectstore.fieldmanager.ObjectManagerDelete;
import cn.edu.thu.laud.objectstore.fieldmanager.ObjectMutationManager;
import cn.edu.thu.laud.objectstore.fieldmanager.StoreFieldManager;
import cn.edu.thu.laud.objectstore.index.BatchMutation;
import cn.edu.thu.laud.objectstore.index.IndexContainer;
import cn.edu.thu.laud.objectstore.index.IndexHandler;
import cn.edu.thu.laud.objectstore.index.IndexMeta;
import cn.edu.thu.laud.objectstore.index.KVColumn;
import cn.edu.thu.laud.utils.MetaDataUtils;
import org.datanucleus.util.Localiser;

/**
 * 
 * @author zhuoan
 *
 */
public class CassandraPersistenceHandler extends AbstractPersistenceHandler2 {

    /**
     * Localiser for messages.
     */
	private static final Localiser LOCALISER = Localiser.getInstance(
            "cn.edu.thu.laud.objectstore.Localisation",
            CassandraPersistenceHandler.class.getClassLoader());

    protected final CassandraStoreManager storeManager;
    private ObjectMutationManager batchManager;
    private ConverterContext converterContext;
   
    public CassandraPersistenceHandler(CassandraStoreManager stm) {

        this.storeManager = stm;
        this.batchManager = new ObjectMutationManager();
        this.converterContext = storeManager.getConverterContext();
       
    }

    @Override
    public void close() {
        // TODO Auto-generated method stub
    }

	@Override
	public void insertObject(ObjectProvider op) {
		// just delegate to update. They both perform the same logic
		updateObject(op, op.getClassMetaData().getAllMemberPositions());		
	}

	@Override
	public void updateObject(ObjectProvider op,
			int[] paramArrayOfInt) {
		// TODO Auto-generated method stub
		try {
			storeManager.assertReadOnlyForUpdateOfObject(op);
			ObjectManager om = storeManager.getObjectManager();
			CassandraManagedConnection managedConnection = (CassandraManagedConnection) storeManager
	                .getConnection(om);
			Cassandra.Client client = (Client)managedConnection.getConnection();
			String keySpace = storeManager.getConnectionInfo().getKeyspace();
			client.set_keyspace(keySpace);   
	        
	        AbstractClassMetaData metaData = op.getClassMetaData();
	     // signal a write is about to start
	        BatchMutation mutator = this.batchManager.beginWrite(om).getMutator();
	        ByteBuffer key = converterContext.getRowKey(op);
	        KVColumn kvColumn = new KVColumn(key);
	        kvColumn.addResult(new Column(MetaDataUtils.getIdentityColumn(metaData))
	        						.setValue(converterContext.getPKValue(om, op)).setTimestamp(System.currentTimeMillis()));

	        String cfName = MetaDataUtils.getColumnFamily(metaData);
	        IndexMeta idxMeta = new IndexMeta(cfName, null,
    				keySpace + "_index", keySpace
    						+ "_index_entries"); 
	 		// if we have a discriminator, write the value
	 		if (metaData.hasDiscriminatorStrategy()) {
	 			
	 			DiscriminatorMetaData discriminator = metaData
	 					.getDiscriminatorMetaData();
	
	 			ByteBuffer colName = MetaDataUtils.getDiscriminatorColumnName(discriminator);
	
	 			String value = discriminator.getValue();
	 			
	 			ByteBuffer byteValue = converterContext.getBytes(value);
	 			Column col = new Column(colName).setValue(byteValue).setTimestamp(System.currentTimeMillis());
				mutator.addInsertion(key, Arrays.asList(new String[] { cfName }), col);
				kvColumn.addResult(col);
	 		}
	        
	     // Write our all our primary object data
	 		StoreFieldManager manager = new StoreFieldManager(om, op,
	 				converterContext, client,  metaData, kvColumn, cfName, mutator, idxMeta);
	
	 		op.provideFields(metaData.getAllMemberPositions(), manager);
	
 			this.batchManager.endWrite(om);
 			

 		} catch (NucleusException ne) {
 			throw ne;
 		} catch (Exception e) {

 			throw new NucleusDataStoreException(e.getMessage(), e);
 		}
        
	}

	@Override
	public void deleteObject(ObjectProvider op) {
		// TODO Auto-generated method stub
		  try {
			    storeManager.assertReadOnlyForUpdateOfObject(op);
			    ObjectManager om = storeManager.getObjectManager();
				
		        ByteBuffer key = converterContext.getRowKey(op);		        
				AbstractClassMetaData metaData = op.getClassMetaData();
				
	            String cfName = MetaDataUtils.getColumnFamily(metaData);
		        ObjectManagerDelete deletor = this.batchManager.beginDelete(om);
	
				// we've already visited this object, do nothing
				if (!deletor.addDeletion(op, key, cfName)) {
					return;
				}
	
				// delete our dependent objects as well.		        
				int[] fields = metaData.getAllMemberPositions();
	
				for (int current : fields) {
					
					AbstractMemberMetaData fieldMetaData = metaData
							.getMetaDataForManagedMemberAtAbsolutePosition(current);
					String indexName = MetaDataUtils.getIndexName(fieldMetaData);
					
					if(indexName != null){
						
						CassandraManagedConnection managedConnection = (CassandraManagedConnection) storeManager
				                .getConnection(om);
						Cassandra.Client client = (Client)managedConnection.getConnection();
						String keySpace = storeManager.getConnectionInfo().getKeyspace();
						client.set_keyspace(keySpace);  
						IndexMeta idxMeta = new IndexMeta(cfName, null,
			    				keySpace + "_index", keySpace
			    						+ "_index_entries");
						ByteBuffer colName = MetaDataUtils.getColumnName(metaData, current);
						KVColumn kvColumn = new KVColumn(key);
				        kvColumn.addResult(new Column(MetaDataUtils.getIdentityColumn(metaData))
				        						.setValue(converterContext.getPKValue(om, op)).setTimestamp(System.currentTimeMillis()));
						// if we have a discriminator, write the value
				 		if (metaData.hasDiscriminatorStrategy()) {
				 			
				 			DiscriminatorMetaData discriminator = metaData
				 					.getDiscriminatorMetaData();
				
				 			ByteBuffer discColName = MetaDataUtils.getDiscriminatorColumnName(discriminator);
				
				 			String value = discriminator.getValue();
				 			
				 			ByteBuffer byteValue = converterContext.getBytes(value);
				 			Column col = new Column(discColName).setValue(byteValue).setTimestamp(System.currentTimeMillis());
							deletor.getMutator().addInsertion(key, Arrays.asList(new String[] { cfName }), col);
							kvColumn.addResult(col);
				 		}
						IndexContainer container = new IndexContainer(idxMeta.getItem(), indexName);
						IndexHandler.updateCompositeIndex(client, deletor.getMutator(), kvColumn, ByteBufferUtil.string(colName), null, container, idxMeta, CassandraConsistency.get());
					}
	
					// if we're a collection, delete each element
					// recurse to delete this object if it's marked as dependent
					if (fieldMetaData.isDependent()
							|| (fieldMetaData.getCollection() != null && fieldMetaData
									.getCollection().isDependentElement())) {
	
						// here we have the field value
						Object value = op.provideField(current);
	
						if (value == null) {
							continue;
						}
	
						ClassLoaderResolver clr = om.getClassLoaderResolver();
	
						int relationType = fieldMetaData.getRelationType(clr);
	
						// check if this is a relationship
	
						if (MetaDataUtils.isRelationSingleValued(relationType)) {
							// Persistable object - persist the related object and
							// store the identity in the cell
	
							om.deleteObjectInternal(value);
						}
	
						else if (MetaDataUtils.isRelationMultiValued(relationType)) {
							// Collection/Map/Array
	
							if (fieldMetaData.hasCollection()) {
	
								for (Object element : (Collection<?>) value) {
									// delete the object
									om.deleteObjectInternal(element);
								}
	
							} else if (fieldMetaData.hasMap()) {
								ApiAdapter adapter = om.getApiAdapter();
	
								Map<?, ?> map = ((Map<?, ?>) value);
								Object mapValue;
	
								// get each element and persist it.
								for (Object mapKey : map.keySet()) {
	
									mapValue = map.get(mapKey);
	
									// handle the case if our key is a persistent
									// class
									// itself
									if (adapter.isPersistable(mapKey)) {
										om.deleteObjectInternal(mapKey);
	
									}
									// persist the value if it can be persisted
									if (adapter.isPersistable(mapValue)) {
										om.deleteObjectInternal(mapValue);
									}
	
								}
	
							} else if (fieldMetaData.hasArray()
									&& fieldMetaData.isDependent()) {
								Object persisted = null;
	
								for (int i = 0; i < Array.getLength(value); i++) {
									// persist the object
									persisted = Array.get(value, i);
									om.deleteObjectInternal(persisted);
								}
							}
	
						}
	
					}
	
				}
	
				this.batchManager.endDelete(om);

			} catch (NucleusException ne) {
				throw ne;
			} catch (Exception e) {
				throw new NucleusDataStoreException(e.getMessage(), e);
			}
	}
	
	/**
	 * Checks if a pk field was requested to be loaded. If it is null a
	 * NucleusObjectNotFoundException is thrown because we only call this with 0
	 * column results
	 * 
	 * @param metaData
	 * @param requestedFields
	 */
	private void pksearched(AbstractClassMetaData metaData,	int[] requestedFields) {

		int[] pkPositions = metaData.getPKMemberPositions();

		for (int pkPosition : pkPositions) {
			for (int requestedField : requestedFields) {
				// our pk was a requested field, throw an exception b/c we
				// didn't find anything
				if (requestedField == pkPosition) {
					throw new NucleusObjectNotFoundException();
				}
			}
		}
	}


	@Override
	public void fetchObject(ObjectProvider op, int[] fieldNumbers) {
		// TODO Auto-generated method stub
		try{
			CassandraManagedConnection managedConnection = (CassandraManagedConnection) storeManager
	                .getConnection(storeManager.getObjectManager());
			Cassandra.Client client = (Client)managedConnection.getConnection();
			client.set_keyspace(storeManager.getConnectionInfo().getKeyspace());
			
			AbstractClassMetaData metaData = op.getClassMetaData();
			ByteBuffer key = converterContext.getRowKey(op);
			String cfName = MetaDataUtils.getColumnFamily(metaData);
	
			List<ColumnOrSuperColumn> columns = client.get_slice(key, new ColumnParent(cfName),
					MetaDataUtils.getFetchColumnList(metaData, fieldNumbers), CassandraConsistency.get());
	
			// nothing to do
			if (columns == null || columns.size() == 0) {
				// check if the pk field was requested. If so, throw an
				// exception b/c the object doesn't exist
				pksearched(metaData, fieldNumbers);
	
	
			}
	
			FetchFieldManager manager = new FetchFieldManager(storeManager.getObjectManager(), op, converterContext, 
					client, columns, key, cfName);
	
			op.replaceFields(fieldNumbers, manager);
			
		}catch (Exception e){
			throw new NucleusDataStoreException(e.getMessage(), e);
		}

	}

	@Override
	public void locateObject(ObjectProvider op) {
		
		fetchObject(op, op.getClassMetaData().getAllMemberPositions());
	}

	@Override
	public Object findObject(ExecutionContext ec,
			Object id) {
		// TODO Auto-generated method stub
		return null;
	}
}
