package drds.server.execute_engine.data_handling;

import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;

import drds.server.Server;
import drds.server.handler.MultiNodeQueryHandler;
import drds.server.net.packet.RowDataPacket;
import drds.server.route.Route;
import drds.server.server.Session;

public abstract class AbstractDataNodeMerge implements Runnable {

	/**
	 * row 有多少col
	 */
	protected int fieldCount;

	/**
	 * 本次select的路由缓存集
	 */
	protected final Route route;
	/**
	 * 夸分片处理handler
	 */
	protected MultiNodeQueryHandler multiQueryHandler = null;
	/**
	 * 分片结束包
	 */
	public PackWraper END_FLAG_PACK = new PackWraper();

	/**
	 * 是否执行流式结果集输出
	 */

	protected boolean isStreamOutputResult = false;

	/**
	 * rowData缓存队列
	 */
	protected BlockingQueue<PackWraper> packWraperQueue = new LinkedBlockingQueue<PackWraper>();

	/**
	 * 标志业务线程是否启动了？
	 */
	protected final AtomicBoolean running = new AtomicBoolean(false);

	public AbstractDataNodeMerge(MultiNodeQueryHandler multiQueryHandler, Route route) {
		this.route = route;
		this.multiQueryHandler = multiQueryHandler;
	}

	public boolean isStreamOutputResult() {
		return isStreamOutputResult;
	}

	public void setStreamOutputResult(boolean streamOutputResult) {
		isStreamOutputResult = streamOutputResult;
	}

	/**
	 * Add a row pack, and may be wake up a business thread to work if not
	 * running.
	 * 
	 * @param packWraper
	 *            row pack
	 * @return true wake up a business thread, otherwise false
	 */
	protected final boolean addPackWraper(final PackWraper packWraper) {
		packWraperQueue.add(packWraper);
		if (running.get()) {
			return false;
		}
		final Server server = Server.getInstance();
		server.getBusinessExecutor().execute(this);
		return true;
	}

	/**
	 * 处理新进来每个row数据，通过PackWraper进行封装， 投递到队列中进行后续处理即可。 process new record (mysql
	 * binary data),if data can output to client ,return true
	 * 
	 * @param dataNode
	 *            DN's name (data from this dataNode)
	 * @param rowBytes
	 *            raw data
	 */
	public boolean onNewRecord(String dataNode, byte[] rowBytes) {
		final PackWraper packWraper = new PackWraper();
		packWraper.dataNode = dataNode;
		packWraper.rowData = rowBytes;
		addPackWraper(packWraper);

		return false;
	}

	/**
	 * 将Map对应的col字段集，返回row中对应的index数组
	 */
	protected static int[] toColumnIndex(String[] columnNames, Map<String, Column> columnName$ColumnMap) {
		int[] result = new int[columnNames.length];
		Column colum;
		for (int i = 0; i < columnNames.length; i++) {
			colum = columnName$ColumnMap.get(columnNames[i]);
			if (colum == null) {
				throw new IllegalArgumentException("all columns in group by clause should be in the selected column list.!" + columnNames[i]);
			}
			result[i] = colum.columIndex;
		}
		return result;
	}

	@Override
	public abstract void run();

	public abstract void onRowMetaData(Map<String, Column> columMap, int fieldCount) throws IOException;

	public void outputMergeResult(Session session, byte[] eofBytes) {
		addPackWraper(END_FLAG_PACK);
	}

	public Route getRoute() {
		return this.route;
	}

	/**
	 * 做最后的结果集输出
	 * 
	 * @return (最多i*(offset+size)行数据)
	 */
	public abstract List<RowDataPacket> getRowDataPacketList(byte[] eofBytes);

	public abstract void clear();

}
