/**
 * Copyright (C) 2010-2013 Alibaba Group Holding Limited
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.alibaba.rocketmq.store;

import static com.alibaba.rocketmq.store.config.BrokerRole.SLAVE;

import java.io.File;
import java.io.IOException;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.alibaba.rocketmq.common.ServiceThread;
import com.alibaba.rocketmq.common.SystemClock;
import com.alibaba.rocketmq.common.ThreadFactoryImpl;
import com.alibaba.rocketmq.common.UtilAll;
import com.alibaba.rocketmq.common.constant.LoggerName;
import com.alibaba.rocketmq.common.message.MessageConst;
import com.alibaba.rocketmq.common.message.MessageDecoder;
import com.alibaba.rocketmq.common.message.MessageExt;
import com.alibaba.rocketmq.common.protocol.heartbeat.SubscriptionData;
import com.alibaba.rocketmq.common.running.RunningStats;
import com.alibaba.rocketmq.common.sysflag.MessageSysFlag;
import com.alibaba.rocketmq.store.config.BrokerRole;
import com.alibaba.rocketmq.store.config.MessageStoreConfig;
import com.alibaba.rocketmq.store.config.StorePathConfigHelper;
import com.alibaba.rocketmq.store.ha.HAService;
import com.alibaba.rocketmq.store.index.IndexService;
import com.alibaba.rocketmq.store.index.QueryOffsetResult;
import com.alibaba.rocketmq.store.schedule.ScheduleMessageService;
import com.alibaba.rocketmq.store.stats.BrokerStatsManager;

/**
 * 存储层默认实现
 * 
 * @author shijia.wxr<vintage.wang@gmail.com>
 * @since 2013-7-21
 */
public class DefaultMessageStore implements MessageStore {
	private static final Logger log = LoggerFactory.getLogger(LoggerName.StoreLoggerName);
	// 消息过滤
	private final MessageFilter messageFilter = new DefaultMessageFilter();
	// 存储配置
	private final MessageStoreConfig messageStoreConfig;
	// CommitLog
	private final CommitLog commitLog;
	// ConsumeQueue集合
	private final ConcurrentHashMap<String/* topic */, ConcurrentHashMap<Integer/* queueId */, ConsumeQueue>> consumeQueueTable;
	// 逻辑队列刷盘服务
	private final FlushConsumeQueueService flushConsumeQueueService;
	// 清理物理文件服务
	private final CleanCommitLogService cleanCommitLogService;
	// 清理逻辑文件服务
	private final CleanConsumeQueueService cleanConsumeQueueService;
	// 分发消息索引服务
	private final DispatchMessageService dispatchMessageService;
	// 消息索引服务
	private final IndexService indexService;
	// 预分配MapedFile对象服务
	private final AllocateMapedFileService allocateMapedFileService;
	// 从物理队列解析消息重新发送到逻辑队列
	private final ReputMessageService reputMessageService;
	// HA服务
	private final HAService haService;
	// 定时服务
	private final ScheduleMessageService scheduleMessageService;
	// 运行时数据统计
	private final StoreStatsService storeStatsService;
	// 运行过程标志位
	private final RunningFlags runningFlags = new RunningFlags();
	// 优化获取时间性能，精度1ms
	private final SystemClock systemClock = new SystemClock(1);
	// 存储服务是否启动
	private volatile boolean shutdown = true;
	// 存储检查点
	private StoreCheckpoint storeCheckpoint;
	// 权限控制后，打印间隔次数
	private AtomicLong printTimes = new AtomicLong(0);
	// 存储层的定时线程
	private final ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryImpl("StoreScheduledThread"));
	private final BrokerStatsManager brokerStatsManager;

	public DefaultMessageStore(final MessageStoreConfig messageStoreConfig, final BrokerStatsManager brokerStatsManager) throws IOException {
		this.messageStoreConfig = messageStoreConfig;
		this.brokerStatsManager = brokerStatsManager;
		// ==========服务线程====================
		// 当需要创建MappedFile时（在MapedFileQueue.getLastMapedFile方法中），向该线程的requestQueue队列中放入AllocateRequest请求对象，
		// 该线程会在后台监听该队列，并在后台创建MapedFile对象，即同时创建了物理文件。
		this.allocateMapedFileService = new AllocateMapedFileService();
		// 初始化CommitLog对象，在初始化该对象的过程中，
		// 第一，根据刷盘类型初始化FlushCommitLogService线程服务，
		// 若为同步刷盘（SYNC_FLUSH），则创建初始化为GroupCommitService线程服务；若为异步刷盘（ASYNC_FLUSH）则创建初始化FlushRealTimeService线程服务；
		// 第二，初始化DefaultAppendMessageCallback对象；
		this.commitLog = new CommitLog(this);
		this.consumeQueueTable = new ConcurrentHashMap<String/* topic */, ConcurrentHashMap<Integer/* queueId */, ConsumeQueue>>(32);
		// ==========服务线程====================
		// 逻辑队列刷盘服务，每隔1秒钟就将ConsumeQueue逻辑队列、TransactionStateService.TranRedoLog变量的数据持久化到磁盘物理文件中
		this.flushConsumeQueueService = new FlushConsumeQueueService();
		// ==========服务线程====================
		// 清理物理文件服务，定期清理72小时之前的物理文件。
		this.cleanCommitLogService = new CleanCommitLogService();
		// ==========服务线程====================
		// 清理逻辑文件服务，定期清理在逻辑队列中的物理偏移量小于commitlog中的最小物理偏移量的数据，同时也清理Index中物理偏移量小于commitlog中的最小物理偏移量的数据。
		this.cleanConsumeQueueService = new CleanConsumeQueueService();
		// ==========服务线程====================
		// 该服务线程负责给commitlog数据创建ConsumeQueue数据和创建Index索引。
		this.dispatchMessageService = new DispatchMessageService(this.messageStoreConfig.getPutMsgIndexHightWater());
		// ==========服务线程====================
		// 存储层内部统计服务
		this.storeStatsService = new StoreStatsService();
		// ==========服务线程====================
		// 该服务线程负责创建Index索引
		this.indexService = new IndexService(this);
		// ==========服务线程====================
		// 用于commitlog数据的主备同步
		// 启动了监听新的Socket连接的服务AcceptSocketService
		// 启动了监听同步进度的服务GroupTransferService
		// 启动了将备用Broker的commitlog文件的最大数据位置每隔5秒给主用Broker发送一次的服务HAClient
		this.haService = new HAService(this);

		switch (this.messageStoreConfig.getBrokerRole()) {
			case SLAVE :
				/**=================================服务线程==============================
				 * 会一直不间断的监听reputFromOffset偏移量之后的commitlog数据，
				 * 若commitlog数据有增加，即reputFromOffset偏移量之后新增了数据，则获取出来并创建consumequeue和index，同时更新reputFromOffset偏移量
				 */
				this.reputMessageService = new ReputMessageService();
				// reputMessageService依赖scheduleMessageService做定时消息的恢复，确保储备数据一致
				this.scheduleMessageService = new ScheduleMessageService(this);
				break;
			case ASYNC_MASTER :
			case SYNC_MASTER :
				this.reputMessageService = null;
				// ==========服务线程====================
				// 用于监控延迟消息，并到期后执行
				this.scheduleMessageService = new ScheduleMessageService(this);
				break;
			default :
				this.reputMessageService = null;
				this.scheduleMessageService = null;
		}

		// load过程依赖此服务，所以提前启动
		// 当需要创建MappedFile时（在MapedFileQueue.getLastMapedFile方法中），向该线程的requestQueue队列中放入AllocateRequest请求对象，
		// 该线程会在后台监听该队列，并在后台创建MapedFile对象，即同时创建了物理文件。
		this.allocateMapedFileService.start();
		// 该服务线程负责给commitlog数据创建ConsumeQueue数据和创建Index索引。
		this.dispatchMessageService.start();
		// 因为下面的recover会分发请求到索引服务，如果不启动，分发过程会被流控
		// 该服务线程负责创建Index索引
		this.indexService.start();
	}

	public void truncateDirtyLogicFiles(long phyOffset) {
		ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> tables = DefaultMessageStore.this.consumeQueueTable;

		for (ConcurrentHashMap<Integer/* queueId */, ConsumeQueue> maps : tables.values()) {
			for (ConsumeQueue logic : maps.values()) {
				logic.truncateDirtyLogicFiles(phyOffset);
			}
		}
	}

	/**
	 * 加载数据
	 * 
	 * @throws IOException
	 */
	public boolean load() {
		boolean result = true;

		try {
			// 检查是否存在abort文件
			boolean lastExitOK = !this.isTempFileExist();
			log.info("last shutdown {}", (lastExitOK ? "normally" : "abnormally"));

			// load 定时进度
			// 这个步骤要放置到最前面，从CommitLog里Recover定时消息需要依赖加载的定时级别参数
			// slave依赖scheduleMessageService做定时消息的恢复
			if (null != scheduleMessageService) {
				// 初始化延迟级别列表。
				// 将这些级别（"1s 5s 10s 30s 1m 2m 3m 4m 5m 6m 7m 8m 9m 10m 20m 30m 1h 2h"）的延时存入延迟级别delayLevelTable：ConcurrentHashMap<Integer /* level */, Long/* delay timeMillis */>变量中，
				// 例如1s的kv值为1:1000,5s的kv值为2:5000，key值依次类推；每个延迟级别即为一个队列。
				result = result && this.scheduleMessageService.load();
			}

			// load Commit Log
			// 在此方法中调用MapedFileQueue.load方法，将$HOME /store/commitlog目录下的所有文件加载到MapedFileQueue的List<MapedFile>变量中；
			result = result && this.commitLog.load();

			// load Consume Queue
			// 加载$HOME /store/consumequeue目录下面的消息文件数据到DefaultMessageStore.consumeQueueTable集合中。
			result = result && this.loadConsumeQueue();

			// 调用TransactionStateService.load()方法加载tranStateTable文件和tranRedoLog文件；
			// ...Todo

			if (result) {
				// 初始化StoreCheckPoint对象，加载$HOME/store/checkpoint文件，该文件记录三个字段值，分别是物理队列消息时间戳、逻辑队列消息时间戳、索引队列消息时间戳。
				this.storeCheckpoint = new StoreCheckpoint(StorePathConfigHelper.getStoreCheckpoint(this.messageStoreConfig.getStorePathRootDir()));
				// 加载$HOME/store/index目录下的文件。对该目录下的每个文件初始化一个IndexFile对象。
				// 然后调用IndexFile对象的load方法将IndexHeader加载到对象的变量中；
				// 再根据检查是否存在abort文件，若有存在abort文件，则表示Broker上次是异常退出的，
				// 则检查checkpoint的indexMsgTimestamp字段值是否小于IndexHeader的endTimestamp值，indexMsgTimestamp值表示最后刷盘的时间，
				// 若小于则表示在最后刷盘之后在该文件中还创建了索引，则要删除该Index文件，否则将该IndexFile对象放入indexFileList:ArrayList<IndexFile>索引文件集合中。
				this.indexService.load(lastExitOK/* 检查是否存在abort文件 */);

				// 尝试恢复内存数据
				this.recover(lastExitOK);

				log.info("load over, and the max phy offset = {}", this.getMaxPhyOffset());
			}
		} catch (Exception e) {
			log.error("load exception", e);
			result = false;
		}

		if (!result) {
			this.allocateMapedFileService.shutdown();
		}

		return result;
	}

	private void addScheduleTask() {
		// 定时删除过期文件
		this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
			@Override
			public void run() {
				DefaultMessageStore.this.cleanFilesPeriodically();
			}
		}, 1000 * 60, this.messageStoreConfig.getCleanResourceInterval(), TimeUnit.MILLISECONDS);

		// 定时清理完全不使用的队列
		// this.scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
		// @Override
		// public void run() {
		// DefaultMessageStore.this.cleanExpiredConsumerQueue();
		// }
		// }, 1, 1, TimeUnit.HOURS);
	}

	private void cleanFilesPeriodically() {
		// 根据文件过期时间来删除物理队列文件
		this.cleanCommitLogService.run();
		this.cleanConsumeQueueService.run();
	}

	public void cleanExpiredConsumerQueue() {
		// CommitLog的最小Offset
		long minCommitLogOffset = this.commitLog.getMinOffset();

		Iterator<Entry<String, ConcurrentHashMap<Integer, ConsumeQueue>>> it = this.consumeQueueTable.entrySet().iterator();
		while (it.hasNext()) {
			Entry<String, ConcurrentHashMap<Integer, ConsumeQueue>> next = it.next();
			String topic = next.getKey();
			if (!topic.equals(ScheduleMessageService.SCHEDULE_TOPIC)) {
				ConcurrentHashMap<Integer, ConsumeQueue> queueTable = next.getValue();
				Iterator<Entry<Integer, ConsumeQueue>> itQT = queueTable.entrySet().iterator();
				while (itQT.hasNext()) {
					Entry<Integer, ConsumeQueue> nextQT = itQT.next();
					long maxCLOffsetInConsumeQueue = nextQT.getValue().getLastOffset();

					// maxCLOffsetInConsumeQueue==-1有可能正好是索引文件刚好创建的那一时刻,此时不清除数据
					if (maxCLOffsetInConsumeQueue == -1) {
						log.warn("maybe ConsumeQueue was created just now. topic={} queueId={} maxPhysicOffset={} minLogicOffset={}.", //
								nextQT.getValue().getTopic(), //
								nextQT.getValue().getQueueId(), //
								nextQT.getValue().getMaxPhysicOffset(), //
								nextQT.getValue().getMinLogicOffset());
					} else if (maxCLOffsetInConsumeQueue < minCommitLogOffset) {
						log.info("cleanExpiredConsumerQueue: {} {} consumer queue destroyed, minCommitLogOffset: {} maxCLOffsetInConsumeQueue: {}", //
								topic, //
								nextQT.getKey(), //
								minCommitLogOffset, //
								maxCLOffsetInConsumeQueue);

						DefaultMessageStore.this.commitLog.removeQueurFromTopicQueueTable(nextQT.getValue().getTopic(), nextQT.getValue().getQueueId());

						nextQT.getValue().destroy();
						itQT.remove();
					}
				}

				if (queueTable.isEmpty()) {
					log.info("cleanExpiredConsumerQueue: {},topic destroyed", topic);
					it.remove();
				}
			}
		}
	}

	/**
	 * 启动存储服务
	 * 
	 * @throws Exception
	 */
	public void start() throws Exception {
		// 在构造函数已经start了。
		// this.indexService.start();
		// 在构造函数已经start了。
		// this.dispatchMessageService.start();
		this.flushConsumeQueueService.start();
		// 启动CommitLog对象中的FlushCommitLogService线程服务，
		// 若是同步刷盘（SYNC_FLUSH）则是启动GroupCommitService线程服务；
		// 若是异步刷盘（ASYNC_FLUSH）则是启动FlushRealTimeService线程服务；
		this.commitLog.start();
		this.storeStatsService.start();

		// ==================Broker是主用模式======================
		// slave不启动scheduleMessageService避免对消费队列的并发操作
		/**
		 *  在Broker是主用模式的时候，启动ScheduleMessageService线程服务；
		 *  在启动的过程中，向每个级别的对应的队列都增加定时任务，周期性的检查队列中是否有延迟消息存在，若有则到期后就执行该延迟消息，
		 *  执行的目的是将真正的消息写入commitlog中，并生成consumequeue和index数据。
		 */
		if (this.scheduleMessageService != null && SLAVE != messageStoreConfig.getBrokerRole()) {
			this.scheduleMessageService.start();
		}

		/**
		 * 在Broker是备用模式的时候，启动ReputMessageService线程服务；
		 * 首先设置该线程服务的reputFromOffset值等于备用Broker本地commitlog文件的最大物理offset值；
		 * 在该线程的run方法中，不断地检查在commitlog文件中reputFromOffset偏移量之后是否有新数据添加，
		 * 若有则对这些数据创建逻辑队列和Index索引；在主从同步时会使用该线程服务。
		 */
		if (this.reputMessageService != null) {
			this.reputMessageService.setReputFromOffset(this.commitLog.getMaxOffset());
			this.reputMessageService.start();
		}

		// 启动HAService线程服务，进行commitlog数据的主备同步
		this.haService.start();

		/**
		 * 在目录$HOME/store下面创建abort文件，没有任何内容；只是标记是否正常关闭，
		 * 若为正常关闭，则在关闭时会删掉此文件；
		 * 若未正常关闭则此文件一直保留，下次启动时根据是否存在此文件进行不同方式的内存数据恢复。
		 */
		this.createTempFile();
		/**
		 * 设置定时任务，每隔10秒调用CleanCommitLogService.run和CleanConsumeQueueService.run方法进行一次资源清理，分别清理物理文件以及对应的逻辑文件。
		 */
		this.addScheduleTask();
		this.shutdown = false;

		// TODO
		/**
		 * 调用TransactionStateService.start方法，在该方法中为每个tranStateTable文件初始化一个定时任务，
		 * 该定时任务的作用是每隔1分钟遍历一遍tranStateTable文件中的数据对于处于PREPARED状态的事务消息，向Producer回查事务消息的最新状态；
		 */
	}

	/**
	 * 关闭存储服务
	 */
	public void shutdown() {
		if (!this.shutdown) {
			this.shutdown = true;

			this.scheduledExecutorService.shutdown();

			try {
				// 等待其他调用停止
				Thread.sleep(1000 * 3);
			} catch (InterruptedException e) {
				log.error("shutdown Exception, ", e);
			}

			if (this.scheduleMessageService != null) {
				this.scheduleMessageService.shutdown();
			}

			this.haService.shutdown();

			this.storeStatsService.shutdown();
			this.dispatchMessageService.shutdown();
			this.indexService.shutdown();
			this.flushConsumeQueueService.shutdown();
			this.commitLog.shutdown();
			this.allocateMapedFileService.shutdown();
			if (this.reputMessageService != null) {
				this.reputMessageService.shutdown();
			}
			this.storeCheckpoint.flush();
			this.storeCheckpoint.shutdown();

			this.deleteFile(StorePathConfigHelper.getAbortFile(this.messageStoreConfig.getStorePathRootDir()));
		}
	}

	public void destroy() {
		this.destroyLogics();
		this.commitLog.destroy();
		this.indexService.destroy();
		this.deleteFile(StorePathConfigHelper.getAbortFile(this.messageStoreConfig.getStorePathRootDir()));
		this.deleteFile(StorePathConfigHelper.getStoreCheckpoint(this.messageStoreConfig.getStorePathRootDir()));
	}

	public void destroyLogics() {
		for (ConcurrentHashMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
			for (ConsumeQueue logic : maps.values()) {
				logic.destroy();
			}
		}
	}
	/**
	 * ===========================将消息写入commitlog中========================
	 */
	public PutMessageResult putMessage(MessageExtBrokerInner msg) {
		// 检查是否shutdown，若是则直接返回服务不可用的错误
		if (this.shutdown) {
			log.warn("message store has shutdown, so putMessage is forbidden");
			return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
		}
		// 检查是否为备用Broker，若是则直接返回服务不可用的错误；
		if (BrokerRole.SLAVE == this.messageStoreConfig.getBrokerRole()) {
			long value = this.printTimes.getAndIncrement();
			if ((value % 50000) == 0) {
				log.warn("message store is slave mode, so putMessage is forbidden ");
			}

			return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
		}
		// 检查是否有写的权限，若是则直接返回服务不可用的错误；
		if (!this.runningFlags.isWriteable()) {
			long value = this.printTimes.getAndIncrement();
			if ((value % 50000) == 0) {
				log.warn("message store is not writeable, so putMessage is forbidden " + this.runningFlags.getFlagBits());
			}

			return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
		} else {
			this.printTimes.set(0);
		}

		// message topic长度校验
		// 检查topic的长度是否大于最大值127，若是则返回消息不合法；
		if (msg.getTopic().length() > Byte.MAX_VALUE) {
			log.warn("putMessage message topic length too long " + msg.getTopic().length());
			return new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, null);
		}

		// message properties长度校验
		// 检查peroperties的长度是否大于32767，若是则返回消息不合法
		if (msg.getPropertiesString() != null && msg.getPropertiesString().length() > Short.MAX_VALUE) {
			log.warn("putMessage message properties length too long " + msg.getPropertiesString().length());
			return new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, null);
		}

		long beginTime = this.getSystemClock().now();
		// 上述检查全部通过之后，调用CommitLog对象的putMessage方法进行消息的写入；
		PutMessageResult result = this.commitLog.putMessage(msg);
		// 性能数据统计
		long eclipseTime = this.getSystemClock().now() - beginTime;
		if (eclipseTime > 1000) {
			log.warn("putMessage not in lock eclipse time(ms) " + eclipseTime);
		}
		this.storeStatsService.setPutMessageEntireTimeMax(eclipseTime);
		this.storeStatsService.getSinglePutMessageTopicTimesTotal(msg.getTopic()).incrementAndGet();

		if (null == result || !result.isOk()) {
			this.storeStatsService.getPutMessageFailedTimes().incrementAndGet();
		}

		return result;
	}

	public SystemClock getSystemClock() {
		return systemClock;
	}

	/**
	 * =================================读取commitlog消息=================================
	 */
	public GetMessageResult getMessage(final String group, final String topic, final int queueId, final long offset, final int maxMsgNums, final SubscriptionData subscriptionData) {
		if (this.shutdown) {
			log.warn("message store has shutdown, so getMessage is forbidden");
			return null;
		}

		if (!this.runningFlags.isReadable()) {
			log.warn("message store is not readable, so getMessage is forbidden " + this.runningFlags.getFlagBits());
			return null;
		}

		long beginTime = this.getSystemClock().now();

		// 枚举变量，取消息结果
		GetMessageStatus status = GetMessageStatus.NO_MESSAGE_IN_QUEUE;
		// 当被过滤后，返回下一次开始的Offset
		long nextBeginOffset = offset;
		// 逻辑队列中的最小Offset
		long minOffset = 0;
		// 逻辑队列中的最大Offset
		long maxOffset = 0;

		GetMessageResult getResult = new GetMessageResult();

		// 有个读写锁，所以只访问一次，避免锁开销影响性能
		// 获取commitlog文件目前写入的最大位置maxOffsetPy
		final long maxOffsetPy = this.commitLog.getMaxOffset();
		// 从DefaultMessageStore.consumeQueueTable中获取ConsumeQueue对象
		ConsumeQueue consumeQueue = findConsumeQueue(topic, queueId);
		if (consumeQueue != null) {
			// 获取该ConsumeQueue对象的最小逻辑offset（命名：minOffset=minLogicOffset/20）
			minOffset = consumeQueue.getMinOffsetInQuque();
			// 获取该ConsumeQueue对象的最大逻辑 offset（命名：maxOffset=MapedFileQueue.getMaxOffset()/20）；
			maxOffset = consumeQueue.getMaxOffsetInQuque();

			if (maxOffset == 0) {
				status = GetMessageStatus.NO_MESSAGE_IN_QUEUE;
				nextBeginOffset = 0;
			} else if (offset < minOffset) {
				status = GetMessageStatus.OFFSET_TOO_SMALL;
				nextBeginOffset = minOffset;
			} else if (offset == maxOffset) {
				status = GetMessageStatus.OFFSET_OVERFLOW_ONE;
				nextBeginOffset = offset;
			} else if (offset > maxOffset) {
				status = GetMessageStatus.OFFSET_OVERFLOW_BADLY;
				if (0 == minOffset) {
					nextBeginOffset = minOffset;
				} else {
					nextBeginOffset = maxOffset;
				}
			} else
			// 若请求消息中的queueoffset在minOffset与maxOffset之间，则继续执行后续操作步骤读取消息。
			{
				SelectMapedBufferResult bufferConsumeQueue = consumeQueue.getIndexBuffer(offset);
				if (bufferConsumeQueue != null) {
					try {
						status = GetMessageStatus.NO_MATCHED_MESSAGE;
						// 用于检测每个数据单元所保存的物理偏移量的commitlog数据是否还存在，若不存在则继续解析下一个数据单元。
						long nextPhyFileStartOffset = Long.MIN_VALUE;
						long maxPhyOffsetPulling = 0;

						int i = 0;
						final int MaxFilterMessageCount = 16000;
						boolean diskFallRecorded = false;
						// 以20个字节为单元格逐个解析每个数据单元
						for (; i < bufferConsumeQueue.getSize() && i < MaxFilterMessageCount; i += ConsumeQueue.CQStoreUnitSize) {
							// 在数据单元中读取第1-8个字节为物理偏移量offsetPy
							long offsetPy = bufferConsumeQueue.getByteBuffer().getLong();
							// 第9-12个字节为消息大小sizePy
							int sizePy = bufferConsumeQueue.getByteBuffer().getInt();
							// 第13-16个字节为tagsCode
							long tagsCode = bufferConsumeQueue.getByteBuffer().getLong();

							maxPhyOffsetPulling = offsetPy;

							// 说明物理文件正在被删除
							// 若不是初始值（说明之前有通过物理偏移量从commitlog中获取数据未获取到，
							// 将nextPhyFileStartOffset重置为了未获取到数据的物理偏移量所在文件的下一个文件）
							if (nextPhyFileStartOffset != Long.MIN_VALUE) {
								// 并且此次解析的数据单元的物理偏移量小于该nextPhyFileStartOffset值
								if (offsetPy < nextPhyFileStartOffset)
									// 则跳过下面的处理逻辑，继续解析下一个数据单元
									continue;
							}

							// 判断是否拉磁盘数据
							// 检查数据是磁盘中还是在内存中
							boolean isInDisk = checkInDiskByCommitOffset(offsetPy, maxOffsetPy);
							// 此批消息达到上限了
							// 检查此次读取的数据量是否大于了阀值
							if (this.isTheBatchFull(sizePy, maxMsgNums, getResult.getBufferTotalSize(), getResult.getMessageCount(), isInDisk)) {
								break;
							}

							// 消息过滤
							if (this.messageFilter.isMessageMatched(subscriptionData, tagsCode)) {
								// 从commitlog中获取指定读取偏离量和消息大小的数据
								SelectMapedBufferResult selectResult = this.commitLog.getMessage(offsetPy, sizePy);
								if (selectResult != null) {
									// 将统计服务StoreStatsService.getMessageTransferedMsgCount变量加1；
									this.storeStatsService.getGetMessageTransferedMsgCount().incrementAndGet();
									getResult.addMessage(selectResult);
									status = GetMessageStatus.FOUND;
									nextPhyFileStartOffset = Long.MIN_VALUE;

									// 统计读取磁盘落后情况
									if (diskFallRecorded) {
										diskFallRecorded = true;
										long fallBehind = consumeQueue.getMaxPhysicOffset() - offsetPy;
										brokerStatsManager.recordDiskFallBehind(group, topic, queueId, fallBehind);
									}
								} else {
									// 若未获取到数据并且获取的数据总大小为零，则暂时将status=MESSAGE_WAS_REMOVING
									if (getResult.getBufferTotalSize() == 0) {
										status = GetMessageStatus.MESSAGE_WAS_REMOVING;
									}

									// 物理文件正在被删除，尝试跳过
									// 获取该物理偏移量offsetPy所在文件的下一个commitlog文件的起始偏移量，
									// 因为以该offsetPy为读取开始位置未获取到数据说明对应的commitlog文件被删除了，
									// 应该更新下一次读取位置值nextPhyFileStartOffset等于该被删文件的下个文件的起始偏移量
									nextPhyFileStartOffset = this.commitLog.rollNextFile(offsetPy);
								}
							} else {
								// 若没有匹配成功并且获取的数据总大小为零则暂时将status=NO_MATCHED_MESSAGE;
								if (getResult.getBufferTotalSize() == 0) {
									status = GetMessageStatus.NO_MATCHED_MESSAGE;
								}

								if (log.isDebugEnabled()) {
									log.debug("message type not matched, client: " + subscriptionData + " server: " + tagsCode);
								}
							}
						}
						// 计算下一次读取数据时的开始偏移量NextBeginOffset，计算方式是：请求消息中的offset+上面遍历完之后最后一个offset值除以20；
						nextBeginOffset = offset + (i / ConsumeQueue.CQStoreUnitSize);

						// TODO 是否会影响性能，需要测试
						// 未拉取的消息的大小计算方式是：commitlog的最大物理偏移offset减去此次拉取的最后一个消息的物理偏移offset；
						long diff = this.getMaxPhyOffset() - maxPhyOffsetPulling;
						long memory = (long) (StoreUtil.TotalPhysicalMemorySize * (this.messageStoreConfig.getAccessMessageInMemoryMaxRatio() / 100.0));
						// 检查未拉取的消息的大小是否大于最大可使用内存，若大于，则建议从备用Broker拉取消息，即设置GetMessageResult.suggestPullingFromSlave等于true
						getResult.setSuggestPullingFromSlave(diff > memory);
					} finally {
						// 必须释放资源
						bufferConsumeQueue.release();
					}
				} else {
					status = GetMessageStatus.OFFSET_FOUND_NULL;
					// NextBeginOffset等于该queueoffset所在consumequeue文件的下一个文件的起始偏移量的消息
					nextBeginOffset = consumeQueue.rollNextFile(offset);
					log.warn("consumer request topic: " + topic + "offset: " + offset + " minOffset: " + minOffset + " maxOffset: " + maxOffset + ", but access logic queue failed.");
				}
			}
		}
		// 请求的队列Id没有
		// 若没有获取到该ConsumeQueue对象（一般不会出现此情况）表示该topic和queuId下面的队列不存在
		else {
			status = GetMessageStatus.NO_MATCHED_LOGIC_QUEUE;
			nextBeginOffset = 0;
		}

		if (GetMessageStatus.FOUND == status) {
			this.storeStatsService.getGetMessageTimesTotalFound().incrementAndGet();
		} else {
			this.storeStatsService.getGetMessageTimesTotalMiss().incrementAndGet();
		}
		long eclipseTime = this.getSystemClock().now() - beginTime;
		this.storeStatsService.setGetMessageEntireTimeMax(eclipseTime);

		getResult.setStatus(status);
		getResult.setNextBeginOffset(nextBeginOffset);
		getResult.setMaxOffset(maxOffset);
		getResult.setMinOffset(minOffset);
		return getResult;
	}

	/**
	 * ============================获取该topic和queueId下面的队列的最大逻辑Offset============================
	 * 返回的是当前队列的最大Offset，这个Offset没有对应的消息
	 */
	public long getMaxOffsetInQuque(String topic, int queueId) {
		// 调用findConsumeQueue方法找到给topic和queueId下面的队列对应的ConsumeQueue对象
		ConsumeQueue logic = this.findConsumeQueue(topic, queueId);
		if (logic != null) {
			// 获取MapedFileQueue对象的MapedFile队列中最大Offset值，再除以consumequeue消息单元的大小（20），即得到指定队列的最大逻辑Offset值
			long offset = logic.getMaxOffsetInQuque();
			return offset;
		}

		return 0;
	}

	/**
	 * ==========================获取该topic和queueId下面的队列的最小逻辑Offset============================
	 * 返回的是当前队列的最小Offset
	 * 最小值返回-1表示还未创建该topic和queueId下面的consumequeue；
	 * 最小值返回0可能是才启动没多久，没有来得及更新最小逻辑minLogicOffest值。
	 */
	public long getMinOffsetInQuque(String topic, int queueId) {
		// 找到给topic和queueId下面的队列对应的ConsumeQueue对象
		ConsumeQueue logic = this.findConsumeQueue(topic, queueId);
		if (logic != null) {
			// 获取ConsumeQueue对象的minLogicOffest值，再除以consumequeue消息单元的大小（20），即得到指定队列的最小逻辑Offset值
			return logic.getMinOffsetInQuque();
		}

		return -1;
	}

	public long getOffsetInQueueByTime(String topic, int queueId, long timestamp) {
		ConsumeQueue logic = this.findConsumeQueue(topic, queueId);
		if (logic != null) {
			return logic.getOffsetInQueueByTime(timestamp);
		}

		return 0;
	}

	public MessageExt lookMessageByOffset(long commitLogOffset) {
		// 先从CommitLog文件中获取该commitLogOffset偏移量的4个字节（即为此消息的内容大小）
		SelectMapedBufferResult sbr = this.commitLog.getMessage(commitLogOffset, 4);
		if (null != sbr) {
			try {
				// 1 TOTALSIZE
				int size = sbr.getByteBuffer().getInt();
				// 获取从指定开始位置读取size大小的消息内容（MessageExt对象）；
				return lookMessageByOffset(commitLogOffset, size);
			} finally {
				sbr.release();
			}
		}

		return null;
	}

	@Override
	public SelectMapedBufferResult selectOneMessageByOffset(long commitLogOffset) {
		SelectMapedBufferResult sbr = this.commitLog.getMessage(commitLogOffset, 4);
		if (null != sbr) {
			try {
				// 1 TOTALSIZE
				int size = sbr.getByteBuffer().getInt();
				return this.commitLog.getMessage(commitLogOffset, size);
			} finally {
				sbr.release();
			}
		}

		return null;
	}

	@Override
	public SelectMapedBufferResult selectOneMessageByOffset(long commitLogOffset, int msgSize) {
		return this.commitLog.getMessage(commitLogOffset, msgSize);
	}

	public String getRunningDataInfo() {
		return this.storeStatsService.toString();
	}

	@Override
	public HashMap<String, String> getRuntimeInfo() {
		HashMap<String, String> result = this.storeStatsService.getRuntimeInfo();
		// 检测物理文件磁盘空间
		{
			String storePathPhysic = DefaultMessageStore.this.getMessageStoreConfig().getStorePathCommitLog();
			double physicRatio = UtilAll.getDiskPartitionSpaceUsedPercent(storePathPhysic);
			result.put(RunningStats.commitLogDiskRatio.name(), String.valueOf(physicRatio));

		}

		// 检测逻辑文件磁盘空间
		{

			String storePathLogics = StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir());
			double logicsRatio = UtilAll.getDiskPartitionSpaceUsedPercent(storePathLogics);
			result.put(RunningStats.consumeQueueDiskRatio.name(), String.valueOf(logicsRatio));
		}

		// 延时进度
		{
			if (this.scheduleMessageService != null) {
				this.scheduleMessageService.buildRunningStats(result);
			}
		}

		result.put(RunningStats.commitLogMinOffset.name(), String.valueOf(DefaultMessageStore.this.getMinPhyOffset()));
		result.put(RunningStats.commitLogMaxOffset.name(), String.valueOf(DefaultMessageStore.this.getMaxPhyOffset()));

		return result;
	}

	/**
	 * =============================获取最大物理偏移量=========================
	 * 最大物理偏移量就是在commitlog中当前写入消息的位置
	 */
	@Override
	public long getMaxPhyOffset() {
		return this.commitLog.getMaxOffset();
	}

	@Override
	public long getEarliestMessageTime(String topic, int queueId) {
		ConsumeQueue logicQueue = this.findConsumeQueue(topic, queueId);
		if (logicQueue != null) {
			long minLogicOffset = logicQueue.getMinLogicOffset();

			SelectMapedBufferResult result = logicQueue.getIndexBuffer(minLogicOffset / ConsumeQueue.CQStoreUnitSize);
			if (result != null) {
				try {
					final long phyOffset = result.getByteBuffer().getLong();
					final int size = result.getByteBuffer().getInt();
					long storeTime = this.getCommitLog().pickupStoretimestamp(phyOffset, size);
					return storeTime;
				} catch (Exception e) {
				} finally {
					result.release();
				}
			}
		}

		return -1;
	}

	@Override
	public long getMessageStoreTimeStamp(String topic, int queueId, long offset) {
		ConsumeQueue logicQueue = this.findConsumeQueue(topic, queueId);
		if (logicQueue != null) {
			SelectMapedBufferResult result = logicQueue.getIndexBuffer(offset);
			if (result != null) {
				try {
					final long phyOffset = result.getByteBuffer().getLong();
					final int size = result.getByteBuffer().getInt();
					long storeTime = this.getCommitLog().pickupStoretimestamp(phyOffset, size);
					return storeTime;
				} catch (Exception e) {
				} finally {
					result.release();
				}
			}
		}

		return -1;
	}

	@Override
	public long getMessageTotalInQueue(String topic, int queueId) {
		ConsumeQueue logicQueue = this.findConsumeQueue(topic, queueId);
		if (logicQueue != null) {
			return logicQueue.getMessageTotalInQueue();
		}

		return -1;
	}

	/**
	 * ==============================获取指定偏移量之后的所有commitlog数据======================================
	 */
	@Override
	public SelectMapedBufferResult getCommitLogData(final long offset) {
		if (this.shutdown) {
			log.warn("message store has shutdown, so getPhyQueueData is forbidden");
			return null;
		}

		return this.commitLog.getData(offset);
	}

	/**
	 * ============================从指定位置开始追加commitlog数据==========================================
	 */
	@Override
	public boolean appendToCommitLog(long startOffset, byte[] data) {
		if (this.shutdown) {
			log.warn("message store has shutdown, so appendToPhyQueue is forbidden");
			return false;
		}

		boolean result = this.commitLog.appendData(startOffset, data);
		if (result) {
			/**
			 * =====================若追加数据成功之后，要完成该数据对应的consumequeue和index索引的创建，
			 * =====================唤醒DefaultMessageStore.ReputMessageService服务线程，由该线程来执行。
			 * =====================在备用Broker启动时首先用本地的commitlog最大偏移量（最后写入消息位置）来初始化该线程的reputFromOffset变量；然后启动该线程。===================
			 */
			this.reputMessageService.wakeup();
		} else {
			log.error("appendToPhyQueue failed " + startOffset + " " + data.length);
		}

		return result;
	}

	@Override
	public void excuteDeleteFilesManualy() {
		this.cleanCommitLogService.excuteDeleteFilesManualy();
	}

	@Override
	public QueryMessageResult queryMessage(String topic, String key, int maxNum, long begin, long end) {
		QueryMessageResult queryMessageResult = new QueryMessageResult();

		long lastQueryMsgTime = end;

		for (int i = 0; i < 3; i++) {
			QueryOffsetResult queryOffsetResult = this.indexService.queryOffset(topic, key, maxNum, begin, lastQueryMsgTime);
			if (queryOffsetResult.getPhyOffsets().isEmpty()) {
				break;
			}

			// 从小到达排序
			Collections.sort(queryOffsetResult.getPhyOffsets());

			queryMessageResult.setIndexLastUpdatePhyoffset(queryOffsetResult.getIndexLastUpdatePhyoffset());
			queryMessageResult.setIndexLastUpdateTimestamp(queryOffsetResult.getIndexLastUpdateTimestamp());

			for (int m = 0; m < queryOffsetResult.getPhyOffsets().size(); m++) {
				long offset = queryOffsetResult.getPhyOffsets().get(m);

				try {
					// 在服务器检验Hash冲突
					boolean match = true;
					MessageExt msg = this.lookMessageByOffset(offset);
					if (0 == m) {
						lastQueryMsgTime = msg.getStoreTimestamp();
					}

					String[] keyArray = msg.getKeys().split(MessageConst.KEY_SEPARATOR);
					if (topic.equals(msg.getTopic())) {
						for (String k : keyArray) {
							if (k.equals(key)) {
								match = true;
								break;
							}
						}
					}

					if (match) {
						SelectMapedBufferResult result = this.commitLog.getData(offset, false);
						if (result != null) {
							int size = result.getByteBuffer().getInt(0);
							result.getByteBuffer().limit(size);
							result.setSize(size);
							queryMessageResult.addMessage(result);
						}
					} else {
						log.warn("queryMessage hash duplicate, {} {}", topic, key);
					}
				} catch (Exception e) {
					log.error("queryMessage exception", e);
				}
			}

			// 只要查到记录就返回
			if (queryMessageResult.getBufferTotalSize() > 0) {
				break;
			}

			// 都遍历完了， 但是没有找到消息
			if (lastQueryMsgTime < begin) {
				break;
			}
		}

		return queryMessageResult;
	}

	@Override
	public void updateHaMasterAddress(String newAddr) {
		this.haService.updateMasterAddress(newAddr);
	}

	@Override
	public long now() {
		return this.systemClock.now();
	}

	public CommitLog getCommitLog() {
		return commitLog;
	}

	/**
	 * ============================根据指定物理偏移量commitLogOffset读取size大小的消息内容============================
	 * @param commitLogOffset
	 * @param size
	 * @return
	 */
	public MessageExt lookMessageByOffset(long commitLogOffset, int size) {
		SelectMapedBufferResult sbr = this.commitLog.getMessage(commitLogOffset, size);
		if (null != sbr) {
			try {
				// 对commitlog消息进行解码，最后返回MessageExt对象
				return MessageDecoder.decode(sbr.getByteBuffer(), true, false);
			} finally {
				sbr.release();
			}
		}

		return null;
	}

	/**
	 * ==========================根据topic和queueId查找ConsumeQueue============================
	 * @param topic
	 * @param queueId
	 * @return
	 */
	public ConsumeQueue findConsumeQueue(String topic, int queueId) {
		ConcurrentHashMap<Integer, ConsumeQueue> map = consumeQueueTable.get(topic);
		if (null == map) {
			ConcurrentHashMap<Integer, ConsumeQueue> newMap = new ConcurrentHashMap<Integer, ConsumeQueue>(128);
			ConcurrentHashMap<Integer, ConsumeQueue> oldMap = consumeQueueTable.putIfAbsent(topic, newMap);
			if (oldMap != null) {
				map = oldMap;
			} else {
				map = newMap;
			}
		}

		ConsumeQueue logic = map.get(queueId);
		if (null == logic) {
			ConsumeQueue newLogic = new ConsumeQueue(//
					topic, //
					queueId, //
					StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()), //
					this.getMessageStoreConfig().getMapedFileSizeConsumeQueue(), //
					this);
			ConsumeQueue oldLogic = map.putIfAbsent(queueId, newLogic);
			if (oldLogic != null) {
				logic = oldLogic;
			} else {
				logic = newLogic;
			}
		}

		return logic;
	}

	private boolean isTheBatchFull(int sizePy, int maxMsgNums, int bufferTotal, int messageTotal, boolean isInDisk) {
		// 第一条消息可以不做限制
		if (0 == bufferTotal || 0 == messageTotal) {
			return false;
		}

		if ((messageTotal + 1) >= maxMsgNums) {
			return true;
		}

		// 消息在磁盘
		if (isInDisk) {
			// 一次被拉取的消息字节数不得大于1024 * 64，
			if ((bufferTotal + sizePy) > this.messageStoreConfig.getMaxTransferBytesOnMessageInDisk()) {
				return true;
			}
			// 一次被拉取的消息个数不得大于8
			if ((messageTotal + 1) > this.messageStoreConfig.getMaxTransferCountOnMessageInDisk()) {
				return true;
			}
		}
		// 消息在内存
		else {
			// 一次被拉取的消息字节数不得大于1024 * 256，
			if ((bufferTotal + sizePy) > this.messageStoreConfig.getMaxTransferBytesOnMessageInMemory()) {
				return true;
			}
			// 一次被拉取的消息个数不得大于32
			if ((messageTotal + 1) > this.messageStoreConfig.getMaxTransferCountOnMessageInMemory()) {
				return true;
			}
		}

		return false;
	}

	private void deleteFile(final String fileName) {
		File file = new File(fileName);
		boolean result = file.delete();
		log.info(fileName + (result ? " delete OK" : " delete Failed"));
	}

	/**
	 * 启动服务后，在存储根目录创建临时文件，类似于 UNIX VI编辑工具
	 * 
	 * @throws IOException
	 */
	private void createTempFile() throws IOException {
		String fileName = StorePathConfigHelper.getAbortFile(this.messageStoreConfig.getStorePathRootDir());
		File file = new File(fileName);
		MapedFile.ensureDirOK(file.getParent());
		boolean result = file.createNewFile();
		log.info(fileName + (result ? " create OK" : " already exists"));
	}

	private boolean isTempFileExist() {
		String fileName = StorePathConfigHelper.getAbortFile(this.messageStoreConfig.getStorePathRootDir());
		File file = new File(fileName);
		return file.exists();
	}

	/**
	 * =========================加载ConsumeQueue队列数据========================
	 * 在初始化Broker的时候，调用DefaultMessageStore.loadConsumeQueue方法加载$HOME /store/consumequeue目录下面的消息文件。
	 * 该目录下面的文件结构是：以主题为名的一级目录，然后是以queueID为名的二级目录，在二级目录下面是消息文件内容。
	 * 对每个二级目录下的文件初始化一个Consumequeue对象，调用Consumequeue.load方法，
	 * 在该方法中调用MapedFileQueue的load方法完成消息内容的加载，其中MapedFile的wrotePostion和CommittedPosition两个变量均初始化为文件大小。
	 * 将consumequeue文件的内容存入consumeQueueTable：ConcurrentHashMap
	 * @return
	 */
	private boolean loadConsumeQueue() {
		File dirLogic = new File(StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()));
		File[] fileTopicList = dirLogic.listFiles();
		if (fileTopicList != null) {
			// TOPIC 遍历
			for (File fileTopic : fileTopicList) {
				String topic = fileTopic.getName();
				// TOPIC 下队列遍历
				File[] fileQueueIdList = fileTopic.listFiles();
				if (fileQueueIdList != null) {
					for (File fileQueueId : fileQueueIdList) {
						int queueId = Integer.parseInt(fileQueueId.getName());
						ConsumeQueue logic = new ConsumeQueue(//
								topic, //
								queueId, //
								StorePathConfigHelper.getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()), //
								this.getMessageStoreConfig().getMapedFileSizeConsumeQueue(), //
								this);
						this.putConsumeQueue(topic, queueId, logic);
						if (!logic.load()) {
							return false;
						}
					}
				}
			}
		}

		log.info("load logics queue all over, OK");

		return true;
	}

	public MessageStoreConfig getMessageStoreConfig() {
		return messageStoreConfig;
	}

	private void putConsumeQueue(final String topic, final int queueId, final ConsumeQueue consumeQueue) {
		ConcurrentHashMap<Integer/* queueId */, ConsumeQueue> map = this.consumeQueueTable.get(topic);
		if (null == map) {
			map = new ConcurrentHashMap<Integer/* queueId */, ConsumeQueue>();
			map.put(queueId, consumeQueue);
			this.consumeQueueTable.put(topic, map);
		} else {
			map.put(queueId, consumeQueue);
		}
	}

	private void recover(final boolean lastExitOK) {
		// 先按照正常流程恢复Consume Queue
		// 恢复每个ConsumeQueue对象的maxPhysicOffset变量的值（最后一个消息的物理offset），
		// 遍历consumeQueueTable集合中的每个topic/queueId下面的ConsumeQueue对象，调用ConsumeQueue对象recover方法。
		this.recoverConsumeQueue();

		// 根据是否有abort文件来确定选择何种方法恢复commitlog数据
		// 主要是恢复MapedFileQueue对象的commitedWhere变量值（即刷盘的位置），删除该commitedWhere值所在文件之后的commitlog文件以及对应的MapedFile对象。
		// 正常数据恢复
		if (lastExitOK) {
			// 若无abort文件则调用CommitLog对象的recoverNormally方法进行恢复
			this.commitLog.recoverNormally();
		}
		// 异常数据恢复，OS CRASH或者JVM CRASH或者机器掉电
		else {
			// 若有abort文件则调用CommitLog对象的recoverAbnormally方法进行恢复
			this.commitLog.recoverAbnormally();
		}

		// 保证消息都能从DispatchService缓冲队列进入到真正的队列
		while (this.dispatchMessageService.hasRemainMessage()) {
			try {
				Thread.sleep(500);
				log.info("waiting dispatching message over");
			} catch (InterruptedException e) {
			}
		}

		this.recoverTopicQueueTable();

		// Todo...
		// 调用TransactionStateService.tranRedoLog:ConsumeQueue对象的recover()方法恢复tranRedoLog文件；
		// 调用TransactionStateService.recoverStateTable(boolean lastExitOK)方法恢复tranStateTable文件，
	}

	private void recoverTopicQueueTable() {
		HashMap<String/* topic-queueid */, Long/* offset */> table = new HashMap<String, Long>(1024);
		long minPhyOffset = this.commitLog.getMinOffset();
		for (ConcurrentHashMap<Integer/* queueId */, ConsumeQueue> maps : this.consumeQueueTable.values()) {
			for (ConsumeQueue logic : maps.values()) {
				// 恢复写入消息时，记录的队列offset
				String key = logic.getTopic() + "-" + logic.getQueueId();
				table.put(key, logic.getMaxOffsetInQuque()/* 获取该对象的最大偏移量，等于MapedFileQueue.getMaxOffset/20 */);
				// 恢复每个队列的最小offset
				// 以commitlog的最小物理偏移量修正ConsumeQueue对象的最小逻辑偏移量minLogicOffset
				logic.correctMinOffset(minPhyOffset);
			}
		}

		this.commitLog.setTopicQueueTable(table);
	}

	private void recoverConsumeQueue() {
		for (ConcurrentHashMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
			for (ConsumeQueue logic : maps.values()) {
				logic.recover();
			}
		}
	}

	public void putMessagePostionInfo(String topic, int queueId, long offset, int size, long tagsCode, long storeTimestamp, long logicOffset) {
		ConsumeQueue cq = this.findConsumeQueue(topic, queueId);
		// 将consumequeue的相关数据写入该对象的缓存中
		cq.putMessagePostionInfoWrapper(offset, size, tagsCode, storeTimestamp, logicOffset);
	}

	public void putDispatchRequest(final DispatchRequest dispatchRequest) {
		this.dispatchMessageService.putRequest(dispatchRequest);
	}

	public DispatchMessageService getDispatchMessageService() {
		return dispatchMessageService;
	}

	public AllocateMapedFileService getAllocateMapedFileService() {
		return allocateMapedFileService;
	}

	public StoreStatsService getStoreStatsService() {
		return storeStatsService;
	}

	public RunningFlags getAccessRights() {
		return runningFlags;
	}

	public ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> getConsumeQueueTable() {
		return consumeQueueTable;
	}

	public StoreCheckpoint getStoreCheckpoint() {
		return storeCheckpoint;
	}

	public HAService getHaService() {
		return haService;
	}

	public ScheduleMessageService getScheduleMessageService() {
		return scheduleMessageService;
	}

	public RunningFlags getRunningFlags() {
		return runningFlags;
	}

	/**
	 * 清理物理文件服务
	 */
	class CleanCommitLogService {
		// 手工触发一次最多删除次数
		private final static int MaxManualDeleteFileTimes = 20;
		// 磁盘空间警戒水位，超过，则停止接收新消息（出于保护自身目的）
		private final double DiskSpaceWarningLevelRatio = Double.parseDouble(System.getProperty("rocketmq.broker.diskSpaceWarningLevelRatio", "0.90"));
		// 磁盘空间强制删除文件水位
		private final double DiskSpaceCleanForciblyRatio = Double.parseDouble(System.getProperty("rocketmq.broker.diskSpaceCleanForciblyRatio", "0.85"));
		private long lastRedeleteTimestamp = 0;
		// 手工触发删除消息
		private volatile int manualDeleteFileSeveralTimes = 0;
		// 立刻开始强制删除文件
		private volatile boolean cleanImmediately = false;

		public void excuteDeleteFilesManualy() {
			this.manualDeleteFileSeveralTimes = MaxManualDeleteFileTimes;
			DefaultMessageStore.log.info("excuteDeleteFilesManualy was invoked");
		}

		public void run() {
			try {
				this.deleteExpiredFiles();

				this.redeleteHangedFile();
			} catch (Exception e) {
				DefaultMessageStore.log.warn(this.getServiceName() + " service has exception. ", e);
			}
		}

		public String getServiceName() {
			return CleanCommitLogService.class.getSimpleName();
		}

		/**
		 * 最前面的文件有可能Hang住，定期检查一下
		 */
		private void redeleteHangedFile() {
			int interval = DefaultMessageStore.this.getMessageStoreConfig().getRedeleteHangedFileInterval();
			long currentTimestamp = System.currentTimeMillis();
			if ((currentTimestamp - this.lastRedeleteTimestamp) > interval) {
				this.lastRedeleteTimestamp = currentTimestamp;
				int destroyMapedFileIntervalForcibly = DefaultMessageStore.this.getMessageStoreConfig().getDestroyMapedFileIntervalForcibly();
				if (DefaultMessageStore.this.commitLog.retryDeleteFirstFile(destroyMapedFileIntervalForcibly)) {
					// TODO
				}
			}
		}

		private void deleteExpiredFiles() {
			int deleteCount = 0;
			long fileReservedTime = DefaultMessageStore.this.getMessageStoreConfig().getFileReservedTime();
			int deletePhysicFilesInterval = DefaultMessageStore.this.getMessageStoreConfig().getDeleteCommitLogFilesInterval();
			int destroyMapedFileIntervalForcibly = DefaultMessageStore.this.getMessageStoreConfig().getDestroyMapedFileIntervalForcibly();

			boolean timeup = this.isTimeToDelete();
			boolean spacefull = this.isSpaceToDelete();
			boolean manualDelete = this.manualDeleteFileSeveralTimes > 0;

			// 删除物理队列文件
			if (timeup || spacefull || manualDelete) {

				if (manualDelete)
					this.manualDeleteFileSeveralTimes--;

				// 是否立刻强制删除文件
				boolean cleanAtOnce = DefaultMessageStore.this.getMessageStoreConfig().isCleanFileForciblyEnable() && this.cleanImmediately;

				log.info("begin to delete before {} hours file. timeup: {} spacefull: {} manualDeleteFileSeveralTimes: {} cleanAtOnce: {}", //
						fileReservedTime, //
						timeup, //
						spacefull, //
						manualDeleteFileSeveralTimes, //
						cleanAtOnce);

				// 小时转化成毫秒
				fileReservedTime *= 60 * 60 * 1000;

				deleteCount = DefaultMessageStore.this.commitLog.deleteExpiredFile(fileReservedTime, deletePhysicFilesInterval, destroyMapedFileIntervalForcibly, cleanAtOnce);
				if (deleteCount > 0) {
					// TODO
				}
				// 危险情况：磁盘满了，但是又无法删除文件
				else if (spacefull) {
					// XXX: warn and notify me
					log.warn("disk space will be full soon, but delete file failed.");
				}
			}
		}

		/**
		 * 是否可以删除文件，空间是否满足
		 */
		private boolean isSpaceToDelete() {
			double ratio = DefaultMessageStore.this.getMessageStoreConfig().getDiskMaxUsedSpaceRatio() / 100.0;

			cleanImmediately = false;

			// 检测物理文件磁盘空间
			{
				String storePathPhysic = DefaultMessageStore.this.getMessageStoreConfig().getStorePathCommitLog();
				double physicRatio = UtilAll.getDiskPartitionSpaceUsedPercent(storePathPhysic);
				if (physicRatio > DiskSpaceWarningLevelRatio) {
					boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskFull();
					if (diskok) {
						DefaultMessageStore.log.error("physic disk maybe full soon " + physicRatio + ", so mark disk full");
						System.gc();
					}

					cleanImmediately = true;
				} else if (physicRatio > DiskSpaceCleanForciblyRatio) {
					cleanImmediately = true;
				} else {
					boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskOK();
					if (!diskok) {
						DefaultMessageStore.log.info("physic disk space OK " + physicRatio + ", so mark disk ok");
					}
				}

				if (physicRatio < 0 || physicRatio > ratio) {
					DefaultMessageStore.log.info("physic disk maybe full soon, so reclaim space, " + physicRatio);
					return true;
				}
			}

			// 检测逻辑文件磁盘空间
			{
				String storePathLogics = StorePathConfigHelper.getStorePathConsumeQueue(DefaultMessageStore.this.getMessageStoreConfig().getStorePathRootDir());
				double logicsRatio = UtilAll.getDiskPartitionSpaceUsedPercent(storePathLogics);
				if (logicsRatio > DiskSpaceWarningLevelRatio) {
					boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskFull();
					if (diskok) {
						DefaultMessageStore.log.error("logics disk maybe full soon " + logicsRatio + ", so mark disk full");
						System.gc();
					}

					cleanImmediately = true;
				} else if (logicsRatio > DiskSpaceCleanForciblyRatio) {
					cleanImmediately = true;
				} else {
					boolean diskok = DefaultMessageStore.this.runningFlags.getAndMakeDiskOK();
					if (!diskok) {
						DefaultMessageStore.log.info("logics disk space OK " + logicsRatio + ", so mark disk ok");
					}
				}

				if (logicsRatio < 0 || logicsRatio > ratio) {
					DefaultMessageStore.log.info("logics disk maybe full soon, so reclaim space, " + logicsRatio);
					return true;
				}
			}

			return false;
		}

		/**
		 * 是否可以删除文件，时间是否满足
		 */
		private boolean isTimeToDelete() {
			String when = DefaultMessageStore.this.getMessageStoreConfig().getDeleteWhen();
			if (UtilAll.isItTimeToDo(when)) {
				DefaultMessageStore.log.info("it's time to reclaim disk space, " + when);
				return true;
			}

			return false;
		}

		public int getManualDeleteFileSeveralTimes() {
			return manualDeleteFileSeveralTimes;
		}

		public void setManualDeleteFileSeveralTimes(int manualDeleteFileSeveralTimes) {
			this.manualDeleteFileSeveralTimes = manualDeleteFileSeveralTimes;
		}
	}

	/**
	 * 清理逻辑文件服务
	 */
	class CleanConsumeQueueService {
		private long lastPhysicalMinOffset = 0;

		private void deleteExpiredFiles() {
			int deleteLogicsFilesInterval = DefaultMessageStore.this.getMessageStoreConfig().getDeleteConsumeQueueFilesInterval();

			long minOffset = DefaultMessageStore.this.commitLog.getMinOffset();
			if (minOffset > this.lastPhysicalMinOffset) {
				this.lastPhysicalMinOffset = minOffset;

				// 删除逻辑队列文件
				ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> tables = DefaultMessageStore.this.consumeQueueTable;

				for (ConcurrentHashMap<Integer, ConsumeQueue> maps : tables.values()) {
					for (ConsumeQueue logic : maps.values()) {
						// 根据物理队列最小Offset来删除逻辑队列
						int deleteCount = logic.deleteExpiredFile(minOffset);

						if (deleteCount > 0 && deleteLogicsFilesInterval > 0) {
							try {
								Thread.sleep(deleteLogicsFilesInterval);
							} catch (InterruptedException e) {
							}
						}
					}
				}

				// 删除索引
				DefaultMessageStore.this.indexService.deleteExpiredFile(minOffset);
			}
		}

		public void run() {
			try {
				this.deleteExpiredFiles();
			} catch (Exception e) {
				DefaultMessageStore.log.warn(this.getServiceName() + " service has exception. ", e);
			}
		}

		public String getServiceName() {
			return CleanConsumeQueueService.class.getSimpleName();
		}
	}

	/**
	 * 逻辑队列刷盘服务
	 */
	class FlushConsumeQueueService extends ServiceThread {
		private static final int RetryTimesOver = 3;
		private long lastFlushTimestamp = 0;

		private void doFlush(int retryTimes) {
			/**
			 * 变量含义：如果大于0，则标识这次刷盘必须刷多少个page，如果=0，则有多少刷多少
			 */
			int flushConsumeQueueLeastPages = DefaultMessageStore.this.getMessageStoreConfig().getFlushConsumeQueueLeastPages();

			if (retryTimes == RetryTimesOver) {
				flushConsumeQueueLeastPages = 0;
			}

			long logicsMsgTimestamp = 0;

			// 定时刷盘
			int flushConsumeQueueThoroughInterval = DefaultMessageStore.this.getMessageStoreConfig().getFlushConsumeQueueThoroughInterval();
			long currentTimeMillis = System.currentTimeMillis();
			if (currentTimeMillis >= (this.lastFlushTimestamp + flushConsumeQueueThoroughInterval)) {
				this.lastFlushTimestamp = currentTimeMillis;
				flushConsumeQueueLeastPages = 0;
				logicsMsgTimestamp = DefaultMessageStore.this.getStoreCheckpoint().getLogicsMsgTimestamp();
			}

			ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> tables = DefaultMessageStore.this.consumeQueueTable;

			for (ConcurrentHashMap<Integer, ConsumeQueue> maps : tables.values()) {
				for (ConsumeQueue cq : maps.values()) {
					boolean result = false;
					for (int i = 0; i < retryTimes && !result; i++) {
						result = cq.commit(flushConsumeQueueLeastPages);
					}
				}
			}

			if (0 == flushConsumeQueueLeastPages) {
				if (logicsMsgTimestamp > 0) {
					DefaultMessageStore.this.getStoreCheckpoint().setLogicsMsgTimestamp(logicsMsgTimestamp);
				}
				DefaultMessageStore.this.getStoreCheckpoint().flush();
			}
		}

		public void run() {
			DefaultMessageStore.log.info(this.getServiceName() + " service started");

			while (!this.isStoped()) {
				try {
					int interval = DefaultMessageStore.this.getMessageStoreConfig().getFlushIntervalConsumeQueue();
					this.waitForRunning(interval);
					this.doFlush(1);
				} catch (Exception e) {
					DefaultMessageStore.log.warn(this.getServiceName() + " service has exception. ", e);
				}
			}

			// 正常shutdown时，要保证全部刷盘才退出
			this.doFlush(RetryTimesOver);

			DefaultMessageStore.log.info(this.getServiceName() + " service end");
		}

		@Override
		public String getServiceName() {
			return FlushConsumeQueueService.class.getSimpleName();
		}

		@Override
		public long getJointime() {
			return 1000 * 60;
		}
	}

	/**
	 * 分发消息索引服务
	 * ======================该线程==========================
	 * 会负责处理DispatchRequest请求，为请求中的信息创建consumequeue数据和index索引。
	 * 当调用者调用DispatchMessageService.putRequest(DispatchRequest dispatchRequest)方法时，即唤醒该线程，
	 * 并将requestsWrite队列和requestsRead队列互换，其中requestsRead队列一般是空的，然后遍历requestsRead队列
	 */
	class DispatchMessageService extends ServiceThread {
		private volatile List<DispatchRequest> requestsWrite;
		private volatile List<DispatchRequest> requestsRead;

		public DispatchMessageService(int putMsgIndexHightWater) {
			putMsgIndexHightWater *= 1.5;
			this.requestsWrite = new ArrayList<DispatchRequest>(putMsgIndexHightWater);
			this.requestsRead = new ArrayList<DispatchRequest>(putMsgIndexHightWater);
		}

		public boolean hasRemainMessage() {
			List<DispatchRequest> reqs = this.requestsWrite;
			if (reqs != null && !reqs.isEmpty()) {
				return true;
			}

			reqs = this.requestsRead;
			if (reqs != null && !reqs.isEmpty()) {
				return true;
			}

			return false;
		}

		public void putRequest(final DispatchRequest dispatchRequest) {
			int requestsWriteSize = 0;
			int putMsgIndexHightWater = DefaultMessageStore.this.getMessageStoreConfig().getPutMsgIndexHightWater();
			synchronized (this) {
				// 将请求消息放入DispatchMessageService.requestsWrite队列中
				this.requestsWrite.add(dispatchRequest);
				requestsWriteSize = this.requestsWrite.size();
				if (!this.hasNotified) {
					this.hasNotified = true;
					this.notify();
				}
			}

			DefaultMessageStore.this.getStoreStatsService().setDispatchMaxBuffer(requestsWriteSize);

			// 这里主动做流控，防止CommitLog写入太快，导致消费队列被冲垮
			if (requestsWriteSize > putMsgIndexHightWater) {
				try {
					if (log.isDebugEnabled()) {
						log.debug("Message index buffer size " + requestsWriteSize + " > high water " + putMsgIndexHightWater);
					}

					Thread.sleep(1);
				} catch (InterruptedException e) {
				}
			}
		}

		private void swapRequests() {
			List<DispatchRequest> tmp = this.requestsWrite;
			this.requestsWrite = this.requestsRead;
			this.requestsRead = tmp;
		}

		private void doDispatch() {
			if (!this.requestsRead.isEmpty()) {
				for (DispatchRequest req : this.requestsRead) {
					// 从DispatchRequest对象的sysFlag变量中判断消息类型
					final int tranType = MessageSysFlag.getTransactionValue(req.getSysFlag());
					// 1、分发消息位置信息到ConsumeQueue
					switch (tranType) {
						// 非事务消息
						case MessageSysFlag.TransactionNotType :
							// 提交事务（Commit类型）的消息
						case MessageSysFlag.TransactionCommitType :
							// 将请求发到具体的Consume Queue
							// 将相关信息存入consumequeue文件中
							DefaultMessageStore.this.putMessagePostionInfo(req.getTopic(), req.getQueueId(), req.getCommitLogOffset(), req.getMsgSize(), req.getTagsCode(), req.getStoreTimestamp(), req.getConsumeQueueOffset());
							break;
						// Prepared事务
						case MessageSysFlag.TransactionPreparedType :
							// Rollback事务
						case MessageSysFlag.TransactionRollbackType :
							// 则暂时不将该信息放入ConsumeQueue文件中，即保证了在没有commit之前Consumer端暂时消费不到该信息；
							break;
					}
				}

				if (DefaultMessageStore.this.getMessageStoreConfig().isMessageIndexEnable()) {
					DefaultMessageStore.this.indexService.putRequest(this.requestsRead.toArray());
				}

				this.requestsRead.clear();
			}
		}

		public void run() {
			DefaultMessageStore.log.info(this.getServiceName() + " service started");

			while (!this.isStoped()) {
				try {
					this.waitForRunning(0);
					this.doDispatch();
				} catch (Exception e) {
					DefaultMessageStore.log.warn(this.getServiceName() + " service has exception. ", e);
				}
			}

			// 在正常shutdown情况下，要保证所有消息都dispatch
			try {
				Thread.sleep(5 * 1000);
			} catch (InterruptedException e) {
				DefaultMessageStore.log.warn("DispatchMessageService Exception, ", e);
			}

			synchronized (this) {
				this.swapRequests();
			}

			this.doDispatch();

			DefaultMessageStore.log.info(this.getServiceName() + " service end");
		}

		@Override
		protected void onWaitEnd() {
			this.swapRequests();
		}

		@Override
		public String getServiceName() {
			return DispatchMessageService.class.getSimpleName();
		}
	}

	/**
	 * SLAVE: 从物理队列Load消息，并分发到各个逻辑队列
	 * =================================该服务线程==============================
	 * 会一直不间断的监听reputFromOffset偏移量之后的commitlog数据，
	 * 若commitlog数据有增加，即reputFromOffset偏移量之后新增了数据，则获取出来并创建consumequeue和index，同时更新reputFromOffset偏移量
	 */
	class ReputMessageService extends ServiceThread {
		// 从这里开始解析物理队列数据，并分发到逻辑队列
		private volatile long reputFromOffset = 0;

		public long getReputFromOffset() {
			return reputFromOffset;
		}

		public void setReputFromOffset(long reputFromOffset) {
			this.reputFromOffset = reputFromOffset;
		}

		private void doReput() {
			for (boolean doNext = true; doNext;) {
				// 以reputFromOffset值作为开始读取偏移量从commitlog中获取该值之后的所有数据
				SelectMapedBufferResult result = DefaultMessageStore.this.commitLog.getData(reputFromOffset);
				if (result != null) {
					try {
						// 当主机有很多数据，备机没有数据时，此时启动备机，备机会从主机的末尾开始拉数据
						// 这时reputFromOffset的初始值和commitlog的值不匹配。
						this.reputFromOffset = result.getStartOffset();

						for (int readSize = 0; readSize < result.getSize() && doNext;) {
							// 根据commitlog的数据结构先解析一个消息单元的数据；
							// 包括topic、queueId、physicOffset、totalSize、tagsCode、storeTimestamp 、queueOffset 、keys、sysFlag、preparedTransactionOffset这10个参数，
							// 并封装成DispatchRequest对象
							DispatchRequest dispatchRequest = DefaultMessageStore.this.commitLog.checkMessageAndReturnSize(result.getByteBuffer(), false, false);
							int size = dispatchRequest.getMsgSize();
							// 正常数据
							if (size > 0) {
								// 将DispatchRequest对象放入DefaultMessageStore.DispatchMessageService服务线程的requestsWrite队列中，等待执行
								DefaultMessageStore.this.putDispatchRequest(dispatchRequest);

								// FIXED BUG By shijia
								// 将reputFromOffset的值累加totalSize
								this.reputFromOffset += size;
								readSize += size;
								DefaultMessageStore.this.storeStatsService.getSinglePutMessageTopicTimesTotal(dispatchRequest.getTopic()).incrementAndGet();
								DefaultMessageStore.this.storeStatsService.getSinglePutMessageTopicSizeTotal(dispatchRequest.getTopic()).addAndGet(dispatchRequest.getMsgSize());
							}
							// 文件中间读到错误
							else if (size == -1) {
								doNext = false;
							}
							// 走到文件末尾，切换至下一个文件
							else if (size == 0) {
								this.reputFromOffset = DefaultMessageStore.this.commitLog.rollNextFile(this.reputFromOffset);
								readSize = result.getSize();
							}
						}
					} finally {
						result.release();
					}
				} else {
					// 若没有获取到数据则退出，该线程等待1秒之后再次从reputFromOffset值开始获取commitlog数据
					doNext = false;
				}
			}
		}

		@Override
		public void run() {
			DefaultMessageStore.log.info(this.getServiceName() + " service started");

			while (!this.isStoped()) {
				try {
					this.waitForRunning(1000);
					this.doReput();
				} catch (Exception e) {
					DefaultMessageStore.log.warn(this.getServiceName() + " service has exception. ", e);
				}
			}

			DefaultMessageStore.log.info(this.getServiceName() + " service end");
		}

		@Override
		public String getServiceName() {
			return ReputMessageService.class.getSimpleName();
		}

	}

	@Override
	public long getCommitLogOffsetInQueue(String topic, int queueId, long cqOffset) {
		ConsumeQueue consumeQueue = findConsumeQueue(topic, queueId);
		if (consumeQueue != null) {
			SelectMapedBufferResult bufferConsumeQueue = consumeQueue.getIndexBuffer(cqOffset);
			if (bufferConsumeQueue != null) {
				try {
					long offsetPy = bufferConsumeQueue.getByteBuffer().getLong();
					return offsetPy;
				} finally {
					bufferConsumeQueue.release();
				}
			}
		}

		return 0;
	}

	@Override
	public long getMinPhyOffset() {
		return this.commitLog.getMinOffset();
	}

	@Override
	public long slaveFallBehindMuch() {
		// 主用Broker的写入位置计算方式：获取最后一个MappedFile的fileFromOffset和wrotePostion值，相加即为最新的写入位置。
		// 备用Broker的写入位置从HAService.push2SlaveMaxOffset获得，该值表示主用Broker同步到Slave的最大Offset。
		return this.commitLog.getMaxOffset() - this.haService.getPush2SlaveMaxOffset().get();
	}

	@Override
	public int cleanUnusedTopic(Set<String> topics) {
		// 遍历DefaultMessageStore.consumeQueueTable集合
		Iterator<Entry<String/* topic */, ConcurrentHashMap<Integer, ConsumeQueue>>> it = this.consumeQueueTable.entrySet().iterator();
		while (it.hasNext()) {
			Entry<String, ConcurrentHashMap<Integer, ConsumeQueue>> next = it.next();
			String topic = next.getKey();
			// Topic可以删除
			// 若该集合中的某个topic在topicConfigTable列表已经找不到了
			if (!topics.contains(topic) && !topic.equals(ScheduleMessageService.SCHEDULE_TOPIC)) {
				ConcurrentHashMap<Integer, ConsumeQueue> queueTable = next.getValue();
				for (ConsumeQueue cq : queueTable.values()) {
					// 删除该topic目录下面的所有物理文件
					cq.destroy();
					log.info("cleanUnusedTopic: {} {} ConsumeQueue cleaned", //
							cq.getTopic(), //
							cq.getQueueId() //
					);
					// 以该topic和该topic目录下面所有队列ID组成的"topic-queueid"为key值删除CommitLog.topicQueueTable集合中对应的记录；
					this.commitLog.removeQueurFromTopicQueueTable(cq.getTopic(), cq.getQueueId());
				}
				// 在consumeQueueTable集合中删除该topic对应的记录
				it.remove();

				log.info("cleanUnusedTopic: {},topic destroyed", topic);
			}
		}

		return 0;
	}

	public Map<String, Long> getMessageIds(final String topic, final int queueId, long minOffset, long maxOffset, SocketAddress storeHost) {
		Map<String, Long> messageIds = new HashMap<String, Long>();
		if (this.shutdown) {
			return messageIds;
		}

		ConsumeQueue consumeQueue = findConsumeQueue(topic, queueId);
		if (consumeQueue != null) {
			minOffset = Math.max(minOffset, consumeQueue.getMinOffsetInQuque());
			maxOffset = Math.min(maxOffset, consumeQueue.getMaxOffsetInQuque());

			if (maxOffset == 0) {
				return messageIds;
			}

			long nextOffset = minOffset;
			while (nextOffset < maxOffset) {
				SelectMapedBufferResult bufferConsumeQueue = consumeQueue.getIndexBuffer(nextOffset);
				if (bufferConsumeQueue != null) {
					try {
						int i = 0;
						for (; i < bufferConsumeQueue.getSize(); i += ConsumeQueue.CQStoreUnitSize) {
							long offsetPy = bufferConsumeQueue.getByteBuffer().getLong();
							final ByteBuffer msgIdMemory = ByteBuffer.allocate(MessageDecoder.MSG_ID_LENGTH);
							String msgId = MessageDecoder.createMessageId(msgIdMemory, MessageExt.SocketAddress2ByteBuffer(storeHost), offsetPy);
							messageIds.put(msgId, nextOffset++);
							if (nextOffset > maxOffset) {
								return messageIds;
							}
						}
					} finally {
						// 必须释放资源
						bufferConsumeQueue.release();
					}
				} else {
					return messageIds;
				}
			}
		}
		return messageIds;
	}

	/**
	 * 以最大物理偏移量maxOffsetPy减去该读取到的物理偏移量offsetPy，若差值大于可使用的内存大小，则认为数据在磁盘中否则可以从内存中获取
	 * @param offsetPy
	 * @param maxOffsetPy
	 * @return
	 */
	private boolean checkInDiskByCommitOffset(long offsetPy, long maxOffsetPy) {
		long memory = (long) (StoreUtil.TotalPhysicalMemorySize * (this.messageStoreConfig.getAccessMessageInMemoryMaxRatio() / 100.0));
		return (maxOffsetPy - offsetPy) > memory;
	}

	@Override
	public boolean checkInDiskByConsumeOffset(final String topic, final int queueId, long consumeOffset) {
		// 有个读写锁，所以只访问一次，避免锁开销影响性能
		final long maxOffsetPy = this.commitLog.getMaxOffset();

		ConsumeQueue consumeQueue = findConsumeQueue(topic, queueId);
		if (consumeQueue != null) {
			SelectMapedBufferResult bufferConsumeQueue = consumeQueue.getIndexBuffer(consumeOffset);
			if (bufferConsumeQueue != null) {
				try {
					for (int i = 0; i < bufferConsumeQueue.getSize();) {
						i += ConsumeQueue.CQStoreUnitSize;
						long offsetPy = bufferConsumeQueue.getByteBuffer().getLong();
						return checkInDiskByCommitOffset(offsetPy, maxOffsetPy);
					}
				} finally {
					// 必须释放资源
					bufferConsumeQueue.release();
				}
			} else {
				return false;
			}
		}
		return false;
	}

	public BrokerStatsManager getBrokerStatsManager() {
		return brokerStatsManager;
	}
}
