package drds.server.handler;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import drds.server.Server;
import drds.server.api_warpper.Lock;
import drds.server.cache.LayeredCache;
import drds.server.config.Config;
import drds.server.datasource.GroupDataSourceNode;
import drds.server.datasource.IConnectionHolder;
import drds.server.execute_engine.data_handling.AbstractDataNodeMerge;
import drds.server.execute_engine.data_handling.AggregateFunctionType;
import drds.server.execute_engine.data_handling.Column;
import drds.server.execute_engine.data_handling.DataMergeService;
import drds.server.execute_engine.data_handling.DataNodeMergeManager;
import drds.server.memory.unsafe.row.UnsafeRow;
import drds.server.net.packet.BinaryRowDataPacket;
import drds.server.net.packet.FieldPacket;
import drds.server.net.packet.OkPacket;
import drds.server.net.packet.ResultSetHeaderPacket;
import drds.server.net.packet.RowDataPacket;
import drds.server.route.Route;
import drds.server.route.RouteNode;
import drds.server.server.Session;
import drds.server.server.SessionContext;
import drds.server.server.parser.ServerParse;

public class MultiNodeQueryHandler extends MultiNodeHandler {

	private static final Logger LOGGER = LoggerFactory.getLogger(MultiNodeQueryHandler.class);

	private final Route route;
	private final Session session;
	// private final CommitNodeHandler icHandler;
	private final AbstractDataNodeMerge dataMergeService;
	private final boolean autocommit;
	private String priamaryKeyTable = null;
	private int primaryKeyIndex = -1;
	private int fieldCount = 0;
	private final Lock lock;
	private long affectedRows;
	private long selectRows;
	private long insertId;
	private volatile boolean fieldsReturned;
	private int okCount;
	private long startTime;
	private long netInBytes;
	private long netOutBytes;
	private int execCount = 0;

	private boolean prepared;
	private List<FieldPacket> fieldPacketList = new ArrayList<FieldPacket>();
	private int isOffHeapuseOffHeapForMerge = 1;
	/**
	 * Limit N，M
	 */
	private int limitStart;
	@SuppressWarnings("unused")
	private int end = 0;

	public MultiNodeQueryHandler(int sqlType, Route route, boolean autoCommit, Session session) {

		super(session);

		if (route.getRouteNodes() == null) {
			throw new IllegalArgumentException("routeNode is null!");
		}

		this.route = route;
		isOffHeapuseOffHeapForMerge = Server.getInstance().getConfig().getDRDS().getUseOffHeapForMerge();
		if (ServerParse.SELECT == sqlType && route.needMerge()) {
			/**
			 * 使用Off Heap
			 */
			if (isOffHeapuseOffHeapForMerge == 1) {
				dataMergeService = new DataNodeMergeManager(this, route);
			} else {
				dataMergeService = new DataMergeService(this, route);
			}
		} else {
			dataMergeService = null;
		}

		this.autocommit = session.getSessionContext().isAutoCommit();
		this.session = session;
		this.lock = new Lock();
		// this.icHandler = new CommitNodeHandler(session);

		this.limitStart = route.getLimitStart();
		route.getLimitSize();
		this.end = limitStart + route.getLimitSize();

		if (this.limitStart < 0)
			this.limitStart = 0;

		if (route.getLimitSize() < 0)
			end = Integer.MAX_VALUE;
		if ((dataMergeService != null) && LOGGER.isDebugEnabled()) {
			LOGGER.debug("has data merge logic ");
		}

		if (route != null && route.getSql() != null) {
			netInBytes += route.getSql().getBytes().length;
		}
	}

	protected void reset(int initCount) {
		super.reset(initCount);
		this.okCount = initCount;
		this.execCount = 0;
		this.netInBytes = 0;
		this.netOutBytes = 0;
	}

	public Session getSession() {
		return session;
	}

	public void execute() throws Exception {
		final Lock lock = this.lock;
		lock.lock();
		try {
			this.reset(route.getRouteNodes().length);
			this.fieldsReturned = false;
			this.affectedRows = 0L;
			this.insertId = 0L;
		} finally {
			lock.unlock();
		}
		Config config = Server.getInstance().getConfig();
		startTime = System.currentTimeMillis();
		for (final RouteNode node : route.getRouteNodes()) {
			IConnectionHolder connectionHolder = session.getConnectionHolder(node);
			if (session.tryExistsCon(connectionHolder, node)) {
				node.setMasterTrue$SlaveFalse(route.getMasterTrue$SlaveFalse()); // 实现
				// master/slave注解
				execute(connectionHolder, node);
			} else {
				// create new connection
				node.setMasterTrue$SlaveFalse(route.getMasterTrue$SlaveFalse()); // 实现
				// master/slave注解
				GroupDataSourceNode groupDataSourceNode = config.getGroupDataSourceNodeMap().get(node.getName());
				groupDataSourceNode.getConnection(groupDataSourceNode.getSchemaName(), autocommit, node, this, node);
				// 注意该方法不仅仅是获取连接，获取新连接成功之后，会通过层层回调，最后回调到本类 的connectionAcquired
				// 这是通过 上面方法的 this 参数的层层传递完成的。
				// connectionAcquired 进行执行操作:
				// session.bindConnection(node, conn);
				// _execute(conn, node);
			}

		}
	}

	private void execute(IConnectionHolder connectionHolder, RouteNode routeNode) {
		if (clearIfSessionClosed(session)) {
			return;
		}
		connectionHolder.setHandler(this);
		try {
			connectionHolder.execute(routeNode, session.getSessionContext(), autocommit);
		} catch (IOException e) {
			connectionError(connectionHolder, e);
		}
	}

	@Override
	public void connectionAcquired(final IConnectionHolder connectionHolder) {
		final RouteNode node = (RouteNode) connectionHolder.getAttachment();
		session.bind(node, connectionHolder);
		execute(connectionHolder, node);
	}

	@SuppressWarnings("unused")
	private boolean decrementOkCountBy(int finished) {
		lock.lock();
		try {
			return --okCount == 0;
		} finally {
			lock.unlock();
		}
	}

	@Override
	public void okResponse(IConnectionHolder connectionHolder, byte[] okBytes) {

		this.netOutBytes += okBytes.length;

		boolean executeResponse = connectionHolder.syncAndExcute();
		if (executeResponse) {

			SessionContext sessionContext = session.getSessionContext();
			OkPacket okPacket = new OkPacket();
			okPacket.read(okBytes);
			// 存储过程
			boolean isCanClose2Client = true;
			if (clearIfSessionClosed(session)) {
				return;
			} else if (canClose(connectionHolder, false)) {
				return;
			}
			lock.lock();
			try {
				// 判断是否是全局表，如果是，执行行数不做累加，以最后一次执行的为准。
				if (!route.isGlobalTable()) {
					affectedRows += okPacket.affectedRowCount;
				} else {
					affectedRows = okPacket.affectedRowCount;
				}
				if (okPacket.insertId > 0) {
					insertId = (insertId == 0) ? okPacket.insertId : Math.min(insertId, okPacket.insertId);
				}
			} finally {
				lock.unlock();
			}
			// 对于存储过程，其比较特殊，查询结果返回EndRow报文以后，还会再返回一个OK报文，才算结束
			boolean isEndPacket = decrementCountBy(1);
			if (isEndPacket && isCanClose2Client) {

				if (this.autocommit) {// clear all connections
					session.releaseConnections(false);
				}

				if (this.isFail() || session.closed()) {
					tryErrorFinished(true);
					return;
				}

				lock.lock();
				try {
					okPacket.packetId = ++packetId;// OK_PACKET

					okPacket.affectedRowCount = affectedRows;
					okPacket.serverStatus = sessionContext.isAutoCommit() ? 2 : 1;
					if (insertId > 0) {
						okPacket.insertId = insertId;
						sessionContext.setLastInsertId(insertId);
					}
					okPacket.write(sessionContext);
				} catch (Exception e) {
					handleDataProcessException(e);
				} finally {
					lock.unlock();
				}
			}
		}
	}

	@Override
	public void rowEofResponse(IConnectionHolder connectionHolder, final byte[] eofBytes) {

		this.netOutBytes += eofBytes.length;

		if (errorRepsponsed.get()) {
			return;
		}

		final SessionContext sessionContext = session.getSessionContext();
		if (clearIfSessionClosed(session)) {
			return;
		} else if (canClose(connectionHolder, false)) {
			return;
		}
		if (decrementCountBy(1)) {

			if (this.autocommit) {// clear all connections
				session.releaseConnections(false);
			}

			if (this.isFail() || session.closed()) {
				tryErrorFinished(true);
				return;
			}

			if (dataMergeService != null) {
				try {
					dataMergeService.outputMergeResult(session, eofBytes);
				} catch (Exception e) {
					handleDataProcessException(e);
				}

			} else {
				try {
					lock.lock();
					eofBytes[3] = ++packetId;
					sessionContext.write(eofBytes);
				} finally {
					lock.unlock();

				}
			}
		}
		execCount++;
		if (execCount == route.getRouteNodes().length) {
			int resultSize = sessionContext.getWriteByteBufferQueue().size() * Server.getInstance().getConfig().getDRDS().getBufferPageSize();
			// TODO: add by zhuam
		}

	}

	/**
	 * 将汇聚结果集数据真正的发送给客户端
	 */
	public void outputMergeResult(final SessionContext sessionContext, final byte[] eofBytes, Iterator<UnsafeRow> iterator) {
		try {
			lock.lock();
			ByteBuffer byteBuffer = session.getSessionContext().allocate();
			final Route route = this.dataMergeService.getRoute();

			/**
			 * 处理limit语句的start 和 end位置，将正确的结果发送给 客户端
			 */
			int start = route.getLimitStart();
			int end = start + route.getLimitSize();
			int index = 0;

			if (start < 0)
				start = 0;

			if (route.getLimitSize() < 0)
				end = Integer.MAX_VALUE;

			if (prepared) {
				while (iterator.hasNext()) {
					UnsafeRow unsafeRow = iterator.next();
					if (index >= start) {
						unsafeRow.packetId = ++packetId;
						BinaryRowDataPacket binRowPacket = new BinaryRowDataPacket();
						binRowPacket.read(fieldPacketList, unsafeRow);
						byteBuffer = binRowPacket.write(byteBuffer, sessionContext, true);
					}
					index++;
					if (index == end) {
						break;
					}
				}
			} else {
				while (iterator.hasNext()) {
					UnsafeRow unsafeRow = iterator.next();
					if (index >= start) {
						unsafeRow.packetId = ++packetId;
						byteBuffer = unsafeRow.write(byteBuffer, sessionContext, true);
					}
					index++;
					if (index == end) {
						break;
					}
				}
			}

			eofBytes[3] = ++packetId;

			/**
			 * 真正的开始把Writer Buffer的数据写入到channel 中
			 */
			sessionContext.write(sessionContext.writeToByteBuffer(eofBytes, byteBuffer));
		} catch (Exception e) {
			handleDataProcessException(e);
		} finally {
			lock.unlock();
			dataMergeService.clear();
		}
	}

	public void outputMergeResult(final SessionContext sessionContext, final byte[] eofBytes, List<RowDataPacket> rowDataPacketList) {
		try {
			lock.lock();
			ByteBuffer byteBuffer = session.getSessionContext().allocate();
			final Route route = this.dataMergeService.getRoute();

			// 处理limit语句
			int start = route.getLimitStart();
			int end = start + route.getLimitSize();

			if (start < 0) {
				start = 0;
			}

			if (route.getLimitSize() < 0) {
				end = rowDataPacketList.size();
			}

			// // 对于不需要排序的语句,返回的数据只有rrs.getLimitSize()
			// if (rrs.getOrderByCols() == null) {
			// end = results.size();
			// start = 0;
			// }
			if (end > rowDataPacketList.size()) {
				end = rowDataPacketList.size();
			}

			// for (int i = start; i < end; i++) {
			// RowDataPacket row = results.get(i);
			// if( prepared ) {
			// BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket();
			// binRowDataPk.read(fieldPackets, row);
			// binRowDataPk.packetId = ++packetId;
			// //binRowDataPk.write(source);
			// buffer = binRowDataPk.write(buffer, session.getSource(), true);
			// } else {
			// row.packetId = ++packetId;
			// buffer = row.write(buffer, source, true);
			// }
			// }

			if (prepared) {
				for (int i = start; i < end; i++) {
					RowDataPacket rowDataPacket = rowDataPacketList.get(i);
					BinaryRowDataPacket binaryRowDataPacket = new BinaryRowDataPacket();
					binaryRowDataPacket.read(fieldPacketList, rowDataPacket);
					binaryRowDataPacket.packetId = ++packetId;
					// binRowDataPk.write(source);
					byteBuffer = binaryRowDataPacket.write(byteBuffer, session.getSessionContext(), true);
				}
			} else {
				for (int i = start; i < end; i++) {
					RowDataPacket rowDataPacket = rowDataPacketList.get(i);
					rowDataPacket.packetId = ++packetId;
					byteBuffer = rowDataPacket.write(byteBuffer, sessionContext, true);
				}
			}

			eofBytes[3] = ++packetId;
			if (LOGGER.isDebugEnabled()) {
				LOGGER.debug("last packet id:" + packetId);
			}
			sessionContext.write(sessionContext.writeToByteBuffer(eofBytes, byteBuffer));

		} catch (Exception e) {
			handleDataProcessException(e);
		} finally {
			lock.unlock();
			dataMergeService.clear();
		}
	}

	@Override
	public void fieldEofResponse(IConnectionHolder connectionHolder, byte[] headerBytes, List<byte[]> fieldBytesList, byte[] eofBytes) {

		this.netOutBytes += headerBytes.length;
		this.netOutBytes += eofBytes.length;
		for (int i = 0, size = fieldBytesList.size(); i < size; ++i) {
			byte[] fieldBytes = fieldBytesList.get(i);
			this.netOutBytes += fieldBytes.length;
		}

		SessionContext source = null;
		// 多个节点
		if (fieldsReturned) {
			return;
		}
		lock.lock();
		try {
			if (fieldsReturned) {
				return;
			}
			fieldsReturned = true;

			boolean needMerg = (dataMergeService != null) && dataMergeService.getRoute().needMerge();
			Set<String> shouldRemoveAvgField = new HashSet<String>();
			Set<String> shouldRenameAvgField = new HashSet<String>();
			if (needMerg) {
				Map<String, Integer> mergeColumnAndIndexMap = dataMergeService.getRoute().getMergeColumnAndIndexMap();
				if (mergeColumnAndIndexMap != null) {
					for (Map.Entry<String, Integer> entry : mergeColumnAndIndexMap.entrySet()) {
						String key = entry.getKey();
						int mergeType = entry.getValue();
						if (AggregateFunctionType.AVG == mergeType && mergeColumnAndIndexMap.containsKey(key + "sum")) {
							shouldRemoveAvgField.add((key + "COUNT").toUpperCase());
							shouldRenameAvgField.add((key + "SUM").toUpperCase());
						}
					}
				}

			}

			source = session.getSessionContext();
			ByteBuffer byteBuffer = source.allocate();
			fieldCount = fieldBytesList.size();
			if (shouldRemoveAvgField.size() > 0) {
				ResultSetHeaderPacket resultSetHeaderPacket = new ResultSetHeaderPacket();
				resultSetHeaderPacket.packetId = ++packetId;
				resultSetHeaderPacket.fieldCount = fieldCount - shouldRemoveAvgField.size();
				byteBuffer = resultSetHeaderPacket.write(byteBuffer, source, true);
			} else {

				headerBytes[3] = ++packetId;
				byteBuffer = source.writeToByteBuffer(headerBytes, byteBuffer);
			}

			String primaryKey = null;
			if (route.hasCachePrimaryKey()) {
				String[] items = route.getPrimaryKeyItems();
				priamaryKeyTable = items[0];
				primaryKey = items[1];
			}

			Map<String, Column> columMap = new HashMap<String, Column>(fieldCount);

			for (int i = 0, length = fieldCount; i < length; ++i) {
				boolean shouldSkip = false;
				byte[] fieldBytes = fieldBytesList.get(i);
				if (needMerg) {
					FieldPacket fieldPacket = new FieldPacket();
					fieldPacket.read(fieldBytes);
					fieldPacketList.add(fieldPacket);
					String fieldName = new String(fieldPacket.name).toUpperCase();
					if (columMap != null && !columMap.containsKey(fieldName)) {
						if (shouldRemoveAvgField.contains(fieldName)) {
							shouldSkip = true;
						}
						if (shouldRenameAvgField.contains(fieldName)) {
							String newFieldName = fieldName.substring(0, fieldName.length() - 3);
							fieldPacket.name = newFieldName.getBytes();
							fieldPacket.packetId = ++packetId;
							shouldSkip = true;
							byteBuffer = fieldPacket.write(byteBuffer, source, false);

						}

						columMap.put(fieldName, new Column(i, fieldPacket.type));
					}
				} else {
					FieldPacket fieldPacket = new FieldPacket();
					fieldPacket.read(fieldBytes);
					fieldPacketList.add(fieldPacket);
					fieldCount = fieldBytesList.size();
					if (primaryKey != null && primaryKeyIndex == -1) {
						// find primary key index
						String fieldName = new String(fieldPacket.name);
						if (primaryKey.equalsIgnoreCase(fieldName)) {
							primaryKeyIndex = i;
						}
					}
				}
				if (!shouldSkip) {
					fieldBytes[3] = ++packetId;
					byteBuffer = source.writeToByteBuffer(fieldBytes, byteBuffer);
				}
			}
			eofBytes[3] = ++packetId;
			byteBuffer = source.writeToByteBuffer(eofBytes, byteBuffer);
			source.write(byteBuffer);
			if (dataMergeService != null) {
				dataMergeService.onRowMetaData(columMap, fieldCount);

			}
		} catch (Exception e) {
			handleDataProcessException(e);
		} finally {
			lock.unlock();
		}
	}

	public void handleDataProcessException(Exception e) {
		if (!errorRepsponsed.get()) {
			this.errorMessage = e.toString();
			LOGGER.error("caught exception ", e);
			setFail(e.toString());
			this.tryErrorFinished(true);
		}
	}

	@Override
	public void rowResponse(final IConnectionHolder connectionHolder, final byte[] rowBytes) {

		if (errorRepsponsed.get()) {
			return;
		}

		lock.lock();
		try {

			this.selectRows++;

			RouteNode node = (RouteNode) connectionHolder.getAttachment();
			String dataNode = node.getName();
			if (dataMergeService != null) {
				dataMergeService.onNewRecord(dataNode, rowBytes);
			} else {
				RowDataPacket rowDataPkg = null;
				// cache primaryKey-> dataNode
				if (primaryKeyIndex != -1) {
					rowDataPkg = new RowDataPacket(fieldCount);
					rowDataPkg.read(rowBytes);
					String primaryKey = new String(rowDataPkg.fieldBytesList.get(primaryKeyIndex));
					LayeredCache pool = Server.getInstance().getRouterservice().getTableId2DataNodeCache();
					pool.putIfAbsent(priamaryKeyTable, primaryKey, dataNode);
				}
				rowBytes[3] = ++packetId;
				if (prepared) {
					if (rowDataPkg == null) {
						rowDataPkg = new RowDataPacket(fieldCount);
						rowDataPkg.read(rowBytes);
					}
					BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket();
					binRowDataPk.read(fieldPacketList, rowDataPkg);
					binRowDataPk.write(session.getSessionContext());
				} else {
					session.getSessionContext().write(rowBytes);
				}
			}

		} catch (Exception e) {
			handleDataProcessException(e);
		} finally {
			lock.unlock();
		}
	}

	public void clearResources() {
		if (dataMergeService != null) {
			dataMergeService.clear();
		}
	}

	public boolean isPrepared() {
		return prepared;
	}

	public void setPrepared(boolean prepared) {
		this.prepared = prepared;
	}

	@Override
	public void terminate(Runnable runnable) {

	}

}
