/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package cn.edu.thu.laud.lasql.index;

import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;

import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.Row;
import org.apache.cassandra.db.columniterator.IColumnIterator;
import org.apache.cassandra.db.filter.IFilter;
import org.apache.cassandra.db.filter.NamesQueryFilter;
import org.apache.cassandra.db.filter.QueryFilter;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.db.filter.SliceQueryFilter;
import org.apache.cassandra.db.index.SecondaryIndex;
import org.apache.cassandra.db.index.SecondaryIndexManager;
import org.apache.cassandra.db.index.SecondaryIndexSearcher;
import org.apache.cassandra.dht.AbstractBounds;
import org.apache.cassandra.thrift.IndexClause;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.HeapAllocator;

public class LaUDSearcher extends SecondaryIndexSearcher
{
//	private static final Logger logger = LoggerFactory.getLogger(LaUDSearcher.class);

	public LaUDSearcher(SecondaryIndexManager indexManager, Set<ByteBuffer> columns)
	{
		super(indexManager, columns);
	}

	public boolean isIndexing(List<IndexExpression> clause)
	{
		for (IndexExpression expression : clause)
		{
			//skip columns belonging to a different index type
			if(!columns.contains(expression.column_name))
				continue;
			SecondaryIndex index = indexManager.getIndexForColumn(expression.column_name);
			if (index == null ){
				continue;
			}else{
				return true;
			}
		}
		return false;
	}

	@Override
	public List<Row> search(IndexClause clause, AbstractBounds range,
			IFilter dataFilter) {
		//        assert clause != null && !clause.isEmpty();
		//        ExtendedFilter filter = ExtendedFilter.create(baseCfs, dataFilter, clause, maxResults, maxIsColumns, false);
		//        return baseCfs.filter(getIndexedIterator(range, filter), filter);
		List<Row> rows=new ArrayList<Row>();
		rows=iner_search(clause, range, dataFilter);
		
//		Set<ByteBuffer> keysSet=new HashSet<ByteBuffer>();
//		Row row;
//		//it seems that range has Bounds and Range. 
//		if(range instanceof Bounds){
//			List<org.apache.cassandra.thrift.IndexExpression> clauses=clause.expressions;
//			for(org.apache.cassandra.thrift.IndexExpression ie:clauses){
//				SecondaryIndex index = indexManager.getIndexForColumn(ie.column_name);
//
//				if(index instanceof LaUDIndex){
//					List<ByteBuffer> keys=((LaUDIndex)index).search(ie.value);
//
//					for(ByteBuffer key:keys){
//						if(!keysSet.contains(key)){
//							DecoratedKey<org.apache.cassandra.dht.Token> decoratedKey=new DecoratedKey<org.apache.cassandra.dht.Token>(StorageService.getPartitioner().getToken(key), key);
//							row=new Row(decoratedKey,ColumnFamily.create(baseCfs.metadata));
//							rows.add(row);
//							keysSet.add(key);
//						}
//					}
//				}else{
//					;
//				}
//			}
//		}
//		keysSet.clear();
//		keysSet=null;
		return rows;
	}
	public List<Row> iner_search(IndexClause clause, AbstractBounds range,
			IFilter dataFilter) {
		org.apache.cassandra.thrift.IndexExpression primary =clause.expressions.get(0);
		 SecondaryIndex index = indexManager.getIndexForColumn(primary.column_name);
	       assert index != null;
	        DecoratedKey indexKey = indexManager.getIndexKeyFor(primary.column_name, primary.value);

	        // if the slicepredicate doesn't contain all the columns for which we have expressions to evaluate,
	        // it needs to be expanded to include those too
	        IFilter firstFilter = dataFilter;
	        if (dataFilter instanceof SliceQueryFilter)
	        {
	            // if we have a high chance of getting all the columns in a single index slice, do that.
	            // otherwise, we'll create an extraFilter (lazily) to fetch by name the columns referenced by the additional expressions.
	            if (baseCfs.getMaxRowSize() < DatabaseDescriptor.getColumnIndexSize())
	            {
	                firstFilter = new SliceQueryFilter(ByteBufferUtil.EMPTY_BYTE_BUFFER,
	                        ByteBufferUtil.EMPTY_BYTE_BUFFER,
	                        ((SliceQueryFilter) dataFilter).reversed,
	                        Integer.MAX_VALUE);
	            }
	        }
	        else
	        {
	            // just add in columns that are not part of the resultset
	            assert dataFilter instanceof NamesQueryFilter;
	            SortedSet<ByteBuffer> columns = new TreeSet<ByteBuffer>(baseCfs.getComparator());
	            for (IndexExpression expr : clause.expressions)
	            {
	                columns.add(expr.column_name);
	            }
	            if (columns.size() > 0)
	            {
	                columns.addAll(((NamesQueryFilter) dataFilter).columns);
	                firstFilter = new NamesQueryFilter(columns);
	            }
	        }

	        List<Row> rows = new ArrayList<Row>();
	        ByteBuffer startKey = clause.start_key;
	        QueryPath path = new QueryPath(baseCfs.columnFamily);

	        // we need to store last data key accessed to avoid duplicate results
	        // because in the while loop new iteration we can access the same column if start_key was not set
	        ByteBuffer lastDataKey = null;

	        // fetch row keys matching the primary expression, fetch the slice predicate for each
	        // and filter by remaining expressions.  repeat until finished w/ assigned range or index row is exhausted.
	        outer:
	        while (true)
	        {
	            /* we don't have a way to get the key back from the DK -- we just have a token --
	             * so, we need to loop after starting with start_key, until we get to keys in the given `range`.
	             * But, if the calling StorageProxy is doing a good job estimating data from each range, the range
	             * should be pretty close to `start_key`. */
	          	            // We shouldn't fetch only 1 row as this provides buggy paging in case the first row doesn't satisfy all clauses
	            int count = Math.max(clause.count, 2);
	           
	           List<ByteBuffer>keys= ((LaUDIndex)index).search(primary.value,false,count);
	           if(keys==null){
	        	   break;
	           }
	           
	                    ByteBuffer dataKey = null;
	            int n = 0;
	            for(ByteBuffer dataKey1:keys){
	            	dataKey=dataKey1;
	                n++;
	                DecoratedKey dk = baseCfs.partitioner.decorateKey(dataKey);
	                if (!range.right.equals(baseCfs.partitioner.getMinimumToken()) && range.right.compareTo(dk.token) < 0)
	                    break outer;
	                if (!range.contains(dk.token) || dataKey.equals(lastDataKey))
	                    continue;

	                // get the row columns requested, and additional columns for the expressions if necessary
	                ColumnFamily data = baseCfs.getColumnFamily(new QueryFilter(dk, path, firstFilter));
	                // While we the column family we'll get in the end should contains the primary clause column, the firstFilter may not have found it.
	                if (data == null)
	                    data = ColumnFamily.create(baseCfs.metadata);
	                NamesQueryFilter extraFilter = null;
	                if (dataFilter instanceof SliceQueryFilter 
	                		//&& !isIdentityFilter((SliceQueryFilter)dataFilter)
	                		)
	                {
	                    // we might have gotten the expression columns in with the main data slice, but
	                    // we can't know for sure until that slice is done.  So, we'll do the extra query
	                    // if we go through and any expression columns are not present.
	                    boolean needExtraFilter = false;
	                    for (IndexExpression expr : clause.expressions)
	                    {
	                        if (data.getColumn(expr.column_name) == null)
	                        {
	                            // Lazily creating extra filter
	                            needExtraFilter = true;
	                            break;
	                        }
	                    }
	                    if (needExtraFilter)
	                    {
	                        // Note: for counters we must be careful to not add a column that was already there (to avoid overcount). That is
	                        // why we do the dance of avoiding to query any column we already have (it's also more efficient anyway)
	                        extraFilter = getExtraFilter(clause);
	                        for (IndexExpression expr : clause.expressions)
	                        {
	                            if (data.getColumn(expr.column_name) != null)
	                                extraFilter.columns.remove(expr.column_name);
	                        }
	                        assert !extraFilter.columns.isEmpty();
	                        ColumnFamily cf = baseCfs.getColumnFamily(new QueryFilter(dk, path, extraFilter));
	                        if (cf != null)
	                            data.addAll(cf, HeapAllocator.instance);
	                    }

	                }

	                
	                if (satisfies(data, clause, primary))
	                {
	                    // cut the resultset back to what was requested, if necessary
	                    if (firstFilter != dataFilter || extraFilter != null)
	                    {
	                        ColumnFamily expandedData = data;
	                        data = expandedData.cloneMeShallow();
	                        IColumnIterator iter = dataFilter.getMemtableColumnIterator(expandedData, dk, baseCfs.getComparator());
	                        new QueryFilter(dk, path, dataFilter).collateColumns(data, Collections.singletonList(iter), baseCfs.getComparator(), baseCfs.gcBefore());
	                    }

	                    rows.add(new Row(dk, data));
	                }

	                if (rows.size() == clause.count)
	                    break outer;
	            }
	            if (n < clause.count || startKey.equals(dataKey))
	                break;

	            lastDataKey = startKey = dataKey;
	        }
	        return rows;
	}
	/**
	 * this the same with SecondaryIndexSearcher.satisfies.<br>but we skip the indexed column's value because we doesn't want to check it if EQ.
	 * @author hxd
	 * @param data
	 * @param clause
	 * @param first
	 * @param column_name
	 * @return
	 */
	 public static boolean satisfies(ColumnFamily data, IndexClause clause, IndexExpression first)
	    {
	        // We enforces even the primary clause because reads are not synchronized with writes and it is thus possible to have a race
	        // where the index returned a row which doesn't have the primarycolumn when we actually read it
	        for (IndexExpression expression : clause.expressions)
	        {
	        	//skip first IndexExpression
	        	if(expression.column_name.equals(first.column_name)){
	        		continue;
	        	}
	            // check column data vs expression
	            IColumn column = data.getColumn(expression.column_name);
	            if (column == null)
	                return false;
	            int v = data.metadata().getValueValidator(expression.column_name).compare(column.value(), expression.value);
	            if (!satisfies(v, expression.op))
	                return false;
	        }
	        return true;
	    }

	
	
}
