/**
 * Copyright (C) 2010-2013 Alibaba Group Holding Limited
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.alibaba.rocketmq.store;

import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.alibaba.rocketmq.common.ServiceThread;
import com.alibaba.rocketmq.common.UtilAll;
import com.alibaba.rocketmq.common.constant.LoggerName;
import com.alibaba.rocketmq.common.message.MessageAccessor;
import com.alibaba.rocketmq.common.message.MessageConst;
import com.alibaba.rocketmq.common.message.MessageDecoder;
import com.alibaba.rocketmq.common.message.MessageExt;
import com.alibaba.rocketmq.common.sysflag.MessageSysFlag;
import com.alibaba.rocketmq.store.config.BrokerRole;
import com.alibaba.rocketmq.store.config.FlushDiskType;
import com.alibaba.rocketmq.store.ha.HAService;
import com.alibaba.rocketmq.store.schedule.ScheduleMessageService;

/**
 * Store all metadata downtime for recovery, data protection reliability
 * 
 * @author shijia.wxr<vintage.wang@gmail.com>
 * @since 2013-7-21
 */
public class CommitLog {
	private static final Logger log = LoggerFactory.getLogger(LoggerName.StoreLoggerName);
	// Message's MAGIC CODE daa320a7
	public final static int MessageMagicCode = 0xAABBCCDD ^ 1880681586 + 8;
	// End of file empty MAGIC CODE cbd43194
	private final static int BlankMagicCode = 0xBBCCDDEE ^ 1880681586 + 8;
	private final MapedFileQueue mapedFileQueue;
	private final DefaultMessageStore defaultMessageStore;
	private final FlushCommitLogService flushCommitLogService;
	private final AppendMessageCallback appendMessageCallback;
	private HashMap<String/* topic-queueid */, Long/* offset */> topicQueueTable = new HashMap<String, Long>(1024);

	public CommitLog(final DefaultMessageStore defaultMessageStore) {
		this.mapedFileQueue = new MapedFileQueue(defaultMessageStore.getMessageStoreConfig().getStorePathCommitLog(), defaultMessageStore.getMessageStoreConfig().getMapedFileSizeCommitLog(), defaultMessageStore.getAllocateMapedFileService());
		this.defaultMessageStore = defaultMessageStore;

		if (FlushDiskType.SYNC_FLUSH == defaultMessageStore.getMessageStoreConfig().getFlushDiskType()) {
			// 同步刷盘
			this.flushCommitLogService = new GroupCommitService();
		} else {
			// 异步刷盘
			this.flushCommitLogService = new FlushRealTimeService();
		}

		this.appendMessageCallback = new DefaultAppendMessageCallback(defaultMessageStore.getMessageStoreConfig().getMaxMessageSize());
	}

	// 将$HOME /store/commitlog目录下的所有文件加载到MapedFileQueue的List<MapedFile>变量中；
	public boolean load() {
		boolean result = this.mapedFileQueue.load();
		log.info("load commit log " + (result ? "OK" : "Failed"));
		return result;
	}

	public void start() {
		this.flushCommitLogService.start();
	}

	public void shutdown() {
		this.flushCommitLogService.shutdown();
	}

	// 获取最小Offset
	public long getMinOffset() {
		// 从MapedFileQueue中获取第一个MapedFile对象（即第一个文件）
		MapedFile mapedFile = this.mapedFileQueue.getFirstMapedFileOnLock();
		if (mapedFile != null) {
			if (mapedFile.isAvailable()) {
				// 该文件可用（MapedFile对象的availabe变量值）则返回该对象的fileFromOffset值
				return mapedFile.getFileFromOffset();
			} else {
				// 若不可用，则取下一个文件的起始偏移量,
				// 计算方式为：fileFromOffset值+文件的固定大小1G-fileFromOffset%1G。fileFromOffset%1G一般情况下为0。
				return this.rollNextFile(mapedFile.getFileFromOffset());
			}
		}

		return -1;
	}

	/*
	 * 获取指定位置所在文件的下一个文件的起始偏移量
	 */
	public long rollNextFile(final long offset) {
		// 文件的固定大小1G
		int mapedFileSize = this.defaultMessageStore.getMessageStoreConfig().getMapedFileSizeCommitLog();
		// fileFromOffset值+文件的固定大小1G-fileFromOffset%1G。fileFromOffset%1G一般情况下为0。
		return (offset + mapedFileSize - offset % mapedFileSize);
	}

	/*
	 * 获取最大物理偏移量
	 */
	public long getMaxOffset() {
		return this.mapedFileQueue.getMaxOffset();
	}

	public int deleteExpiredFile(//
			final long expiredTime, //
			final int deleteFilesInterval, //
			final long intervalForcibly, //
			final boolean cleanImmediately//
	) {
		return this.mapedFileQueue.deleteExpiredFileByTime(expiredTime, deleteFilesInterval, intervalForcibly, cleanImmediately);
	}

	/**
	 * ===================================读取指定起始位置offset所在文件的全部剩余消息======================
	 * Read CommitLog data, use data replication
	 */
	public SelectMapedBufferResult getData(final long offset) {
		return this.getData(offset, (0 == offset ? true : false));
	}

	public SelectMapedBufferResult getData(final long offset, final boolean returnFirstOnNotFound) {
		int mapedFileSize = this.defaultMessageStore.getMessageStoreConfig().getMapedFileSizeCommitLog();
		// 获取指定起始位置offset所在的文件对应的MapedFile对象
		MapedFile mapedFile = this.mapedFileQueue.findMapedFileByOffset(offset, returnFirstOnNotFound);
		if (mapedFile != null) {
			// 计算指定的offset在该文件内部的起始位置
			int pos = (int) (offset % mapedFileSize);
			// 获取该文件中从起始位置开始的所有剩余信息
			SelectMapedBufferResult result = mapedFile.selectMapedBuffer(pos);
			return result;
		}

		return null;
	}

	/**
	 * ========================================正常恢复CommitLog内存数据===========================
	 * ========================================在Broker启动过程中会调用该方法=======================
	 * When the normal exit, data recovery, all memory data have been flush
	 */
	public void recoverNormally() {
		boolean checkCRCOnRecover = this.defaultMessageStore.getMessageStoreConfig().isCheckCRCOnRecover();
		final List<MapedFile> mapedFiles = this.mapedFileQueue.getMapedFiles();
		if (!mapedFiles.isEmpty()) {
			// Began to recover from the last third file
			// 从MapedFileQueue的MapedFile列表的倒数第三个对象（即倒数第三个文件）开始遍历每块消息单元
			int index = mapedFiles.size() - 3;
			if (index < 0)
				// 若总共没有三个文件，则从第一个文件开始遍历每块消息单元
				index = 0;

			MapedFile mapedFile = mapedFiles.get(index);
			ByteBuffer byteBuffer = mapedFile.sliceByteBuffer();
			long processOffset = mapedFile.getFileFromOffset();
			long mapedFileOffset = 0;
			while (true) {
				DispatchRequest dispatchRequest = this.checkMessageAndReturnSize(byteBuffer, checkCRCOnRecover);
				int size = dispatchRequest.getMsgSize();
				// Normal data
				// 对于msgSize大于零，则读取的偏移量mapedFileOffset累加msgSize
				if (size > 0) {
					mapedFileOffset += size;
				}
				// Intermediate file read error
				else if (size == -1) {
					log.info("recover physics file end, " + mapedFile.getFileName());
					break;
				}
				//
				// Come the end of the file, switch to the next file
				// Since the return 0 representatives met last hole, this can
				// not be included in truncate offset
				//
				// 若等于零，则表示读取到了文件的最后一块信息，则继续读取下一个MapedFile对象的文件；直到消息的CRC校验未通过或者读取完所有信息为止
				else if (size == 0) {
					index++;
					if (index >= mapedFiles.size()) {
						// Current branch can not happen
						log.info("recover last 3 physics file over, last maped file " + mapedFile.getFileName());
						break;
					} else {
						mapedFile = mapedFiles.get(index);
						byteBuffer = mapedFile.sliceByteBuffer();
						processOffset = mapedFile.getFileFromOffset();
						mapedFileOffset = 0;
						log.info("recover next physics file, " + mapedFile.getFileName());
					}
				}
			}
			// 计算有效信息的最后位置processOffset
			// 计算方式为：取最后读取的MapedFile对象的fileFromOffset加上最后读取的位置mapedFileOffset值。
			processOffset += mapedFileOffset;
			// =================最后更新内存中的对象数据信息==========================
			// 恢复MapedFileQueue对象的commitedWhere变量值（即刷盘的位置）
			this.mapedFileQueue.setCommittedWhere(processOffset);
			// 删除该commitedWhere值所在文件之后的commitlog文件以及对应的MapedFile对象。
			this.mapedFileQueue.truncateDirtyFiles(processOffset);
		}
	}

	public DispatchRequest checkMessageAndReturnSize(java.nio.ByteBuffer byteBuffer, final boolean checkCRC) {
		return this.checkMessageAndReturnSize(byteBuffer, checkCRC, true);
	}

	/**
	 * =====================================每次读取到消息单元块之后，进行CRC的校验======================
	 * check the message and returns the message size
	 * 
	 * @return 0 Come the end of the file // >0 Normal messages // -1 Message
	 *         checksum failure
	 */
	public DispatchRequest checkMessageAndReturnSize(java.nio.ByteBuffer byteBuffer, final boolean checkCRC, final boolean readBody) {
		try {
			java.nio.ByteBuffer byteBufferMessage = ((DefaultAppendMessageCallback) this.appendMessageCallback).getMsgStoreItemMemory();
			byte[] bytesContent = byteBufferMessage.array();

			// 1 TOTALSIZE
			int totalSize = byteBuffer.getInt();

			// 2 MAGICCODE
			int magicCode = byteBuffer.getInt();
			switch (magicCode) {
				case MessageMagicCode :
					break;
				case BlankMagicCode :
					// 若检查到第5至8字节MAGICCODE字段等于BlankMagicCode（cbd43194）则返回msgSize=0的DispatchRequest对象
					return new DispatchRequest(0);
				default :
					log.warn("found a illegal magic code 0x" + Integer.toHexString(magicCode));
					return new DispatchRequest(-1);
			}

			// 3 BODYCRC
			int bodyCRC = byteBuffer.getInt();

			// 4 QUEUEID
			int queueId = byteBuffer.getInt();

			// 5 FLAG
			int flag = byteBuffer.getInt();
			flag = flag + 0;

			// 6 QUEUEOFFSET
			long queueOffset = byteBuffer.getLong();

			// 7 PHYSICALOFFSET
			long physicOffset = byteBuffer.getLong();

			// 8 SYSFLAG
			int sysFlag = byteBuffer.getInt();

			// 9 BORNTIMESTAMP
			long bornTimeStamp = byteBuffer.getLong();
			bornTimeStamp = bornTimeStamp + 0;

			// 10 BORNHOST（IP+PORT）
			byteBuffer.get(bytesContent, 0, 8);

			// 11 STORETIMESTAMP
			long storeTimestamp = byteBuffer.getLong();

			// 12 STOREHOST（IP+PORT）
			byteBuffer.get(bytesContent, 0, 8);

			// 13 RECONSUMETIMES
			int reconsumeTimes = byteBuffer.getInt();

			// 14 Prepared Transaction Offset
			long preparedTransactionOffset = byteBuffer.getLong();

			// 15 BODY
			int bodyLen = byteBuffer.getInt();
			if (bodyLen > 0) {
				if (readBody) {
					byteBuffer.get(bytesContent, 0, bodyLen);

					if (checkCRC) {
						int crc = UtilAll.crc32(bytesContent, 0, bodyLen);
						if (crc != bodyCRC) {
							log.warn("CRC check failed " + crc + " " + bodyCRC);
							return new DispatchRequest(-1);
						}
					}
				} else {
					byteBuffer.position(byteBuffer.position() + bodyLen);
				}
			}

			// 16 TOPIC
			byte topicLen = byteBuffer.get();
			byteBuffer.get(bytesContent, 0, topicLen);
			String topic = new String(bytesContent, 0, topicLen);

			long tagsCode = 0;
			String keys = "";

			// 17 properties
			short propertiesLength = byteBuffer.getShort();
			if (propertiesLength > 0) {
				byteBuffer.get(bytesContent, 0, propertiesLength);
				String properties = new String(bytesContent, 0, propertiesLength);
				Map<String, String> propertiesMap = MessageDecoder.string2messageProperties(properties);

				keys = propertiesMap.get(MessageConst.PROPERTY_KEYS);
				String tags = propertiesMap.get(MessageConst.PROPERTY_TAGS);
				if (tags != null && tags.length() > 0) {
					tagsCode = MessageExtBrokerInner.tagsString2tagsCode(MessageExt.parseTopicFilterType(sysFlag), tags);
				}

				// Timing message processing
				{
					String t = propertiesMap.get(MessageConst.PROPERTY_DELAY_TIME_LEVEL);
					if (ScheduleMessageService.SCHEDULE_TOPIC.equals(topic) && t != null) {
						int delayLevel = Integer.parseInt(t);

						if (delayLevel > this.defaultMessageStore.getScheduleMessageService().getMaxDelayLevel()) {
							delayLevel = this.defaultMessageStore.getScheduleMessageService().getMaxDelayLevel();
						}

						if (delayLevel > 0) {
							tagsCode = this.defaultMessageStore.getScheduleMessageService().computeDeliverTimestamp(delayLevel, storeTimestamp);
						}
					}
				}
			}

			return new DispatchRequest(//
					topic, // 1
					queueId, // 2
					physicOffset, // 3
					totalSize, // 4
					tagsCode, // 5
					storeTimestamp, // 6
					queueOffset, // 7
					keys, // 8
					sysFlag, // 9
					preparedTransactionOffset// 10
			);
		} catch (BufferUnderflowException e) {
			byteBuffer.position(byteBuffer.limit());
		} catch (Exception e) {
			byteBuffer.position(byteBuffer.limit());
		}
		// 若校验未通过或者读取到的信息为空则返回msgSize=-1的DispatchRequest对象
		return new DispatchRequest(-1);
	}

	/*
	 * =================异常恢复CommitLog内存数据===============在Broker启动过程中会调用该方法========== 与正常恢复的区别在于：正常恢复是从倒数第3个文件开始恢复；而异常恢复是从最后的文件开始往前寻找与checkpoint文件的记录相匹配的一个文件。
	 */
	public void recoverAbnormally() {
		// recover by the minimum time stamp
		boolean checkCRCOnRecover = this.defaultMessageStore.getMessageStoreConfig().isCheckCRCOnRecover();
		final List<MapedFile> mapedFiles = this.mapedFileQueue.getMapedFiles();
		if (!mapedFiles.isEmpty()) {
			// Looking beginning to recover from which file
			// 从MapedFileQueue的MapedFile列表的最后一个对象（即倒数第一个文件）
			int index = mapedFiles.size() - 1;
			MapedFile mapedFile = null;
			// 从MapedFile列表中的最后一个对象开始往前遍历每个MapedFile对象
			for (; index >= 0; index--) {
				mapedFile = mapedFiles.get(index);
				// 检查该MapedFile对象对应的文件是否满足恢复条件
				if (this.isMapedFileMatchedRecover(mapedFile)) {
					log.info("recover from this maped file " + mapedFile.getFileName());
					break;
				}
			}

			if (index < 0) {
				index = 0;
				// 若查找完整个队列未找到符合条件的MapedFile对象，则从第一个文件开始恢复
				mapedFile = mapedFiles.get(index);
			}

			ByteBuffer byteBuffer = mapedFile.sliceByteBuffer();
			long processOffset = mapedFile.getFileFromOffset();
			long mapedFileOffset = 0;
			while (true) {
				// 每次读取到消息单元块之后，进行CRC的校验
				DispatchRequest dispatchRequest = this.checkMessageAndReturnSize(byteBuffer, checkCRCOnRecover);
				int size = dispatchRequest.getMsgSize();
				// Normal data
				if (size > 0) {
					/*
					 * 对于msgSize大于零，则读取的偏移量mapedFileOffset累加msgSize， 并将DispatchRequest对象放入DefaultMessageStore.DispatchMessageService服务线程中， 由该线程在后台进行ConsumeQueue队列和Index服务的数据加载
					 */
					mapedFileOffset += size;
					this.defaultMessageStore.putDispatchRequest(dispatchRequest);
				}
				// Intermediate file read error
				else if (size == -1) {
					log.info("recover physics file end, " + mapedFile.getFileName());
					break;
				}
				// Come the end of the file, switch to the next file
				// Since the return 0 representatives met last hole, this can
				// not be included in truncate offset
				// 若等于零，则表示读取到了文件的最后一块信息，则继续读取下一个MapedFile对象的文件；直到消息的CRC校验未通过或者读取完所有信息为止
				else if (size == 0) {
					index++;
					if (index >= mapedFiles.size()) {
						// The current branch under normal circumstances should
						// not happen
						log.info("recover physics file over, last maped file " + mapedFile.getFileName());
						break;
					} else {
						mapedFile = mapedFiles.get(index);
						byteBuffer = mapedFile.sliceByteBuffer();
						processOffset = mapedFile.getFileFromOffset();
						mapedFileOffset = 0;
						log.info("recover next physics file, " + mapedFile.getFileName());
					}
				}
			}
			// 计算有效信息的最后位置processOffset，计算方式为：取最后读取的MapedFile对象的fileFromOffset加上最后读取的位置mapedFileOffset值。
			processOffset += mapedFileOffset;
			// ===================更新内存中的对象数据信息:====================
			this.mapedFileQueue.setCommittedWhere(processOffset);
			// 将processOffset所在文件之后的文件全部清理掉，
			// 并且将所在文件对应的MapedFile对象的wrotepostion和commitPosition设置为processOffset%fileSize，即等于mapedFileOffset值为当前文件的偏移量
			this.mapedFileQueue.truncateDirtyFiles(processOffset);

			// Clear ConsumeQueue redundant data
			// 根据物理偏移值processOffset删除无效的逻辑文件。
			this.defaultMessageStore.truncateDirtyLogicFiles(processOffset);
		}
		// Commitlog case files are deleted
		// 若该MapedFile队列为空
		else {
			this.mapedFileQueue.setCommittedWhere(0);
			// 删除掉逻辑队列consumequeue中的物理文件以及清理内存数据
			this.defaultMessageStore.destroyLogics();
		}
	}

	/*
	 * 检查该MapedFile对象对应的文件是否满足恢复条件
	 */
	private boolean isMapedFileMatchedRecover(final MapedFile mapedFile) {
		ByteBuffer byteBuffer = mapedFile.sliceByteBuffer();
		// 从该文件中获取第一个消息单元的第5至8字节的MAGICCODE字段
		int magicCode = byteBuffer.getInt(MessageDecoder.MessageMagicCodePostion);
		// 若该字段等于MessageMagicCode（即不是正常的消息内容）
		if (magicCode != MessageMagicCode) {
			return false;
		}

		// 获取第一个消息单元的第56位开始的8个字节的storeTimeStamp字段
		long storeTimestamp = byteBuffer.getLong(MessageDecoder.MessageStoreTimestampPostion);
		if (0 == storeTimestamp) {
			return false;
		}

		if (this.defaultMessageStore.getMessageStoreConfig().isMessageIndexEnable()// 检查是否开启消息索引功能========>默认为true
				&& this.defaultMessageStore.getMessageStoreConfig().isMessageIndexSafe()// 检查是否使用安全的消息索引功能======>
																						// 默认为false,在可靠模式下，异常宕机恢复慢；非可靠模式下，异常宕机恢复快
		) {
			// 若开启可靠模式下面的消息索引，则消息的storeTimeStamp字段表示的时间戳必须小于checkpoint文件中物理队列消息时间戳、逻辑队列消息时间戳、索引队列消息时间戳这三个时间戳中最小值，才满足恢复数据的条件
			if (storeTimestamp <= this.defaultMessageStore.getStoreCheckpoint().getMinTimestampIndex()) {
				log.info("find check timestamp, {} {}", //
						storeTimestamp, //
						UtilAll.timeMillisToHumanString(storeTimestamp));
				return true;
			}
		} else {
			// 否则消息的storeTimeStamp字段表示的时间戳必须小于checkpoint文件中物理队列消息时间戳、逻辑队列消息时间戳这两个时间戳中最小值才满足恢复数据的条件
			if (storeTimestamp <= this.defaultMessageStore.getStoreCheckpoint().getMinTimestamp()) {
				log.info("find check timestamp, {} {}", //
						storeTimestamp, //
						UtilAll.timeMillisToHumanString(storeTimestamp));
				return true;
			}
		}

		return false;
	}

	/**
	 * ===============================================写入消息========================== 
	 * 在Broker接受到生产者的消息之后，会间接的调用CommitLog. putMessage(MessageExtBrokerInner msg)方法完成消息的写入操作
	 */
	public PutMessageResult putMessage(final MessageExtBrokerInner msg) {
		// Set the storage time
		msg.setStoreTimestamp(System.currentTimeMillis());
		// Set the message body BODY CRC (consider the most appropriate setting
		// on the client)
		msg.setBodyCRC(UtilAll.crc32(msg.getBody()));
		// Back to Results
		AppendMessageResult result = null;

		StoreStatsService storeStatsService = this.defaultMessageStore.getStoreStatsService();

		String topic = msg.getTopic();
		int queueId = msg.getQueueId();
		long tagsCode = msg.getTagsCode();
		// 获取消息的sysflag字段
		final int tranType = MessageSysFlag.getTransactionValue(msg.getSysFlag());
		if (tranType == MessageSysFlag.TransactionNotType// 检查消息是否是非事务性（第3/4字节为0）
				|| tranType == MessageSysFlag.TransactionCommitType// 检查消息是否是提交事务（commit，第4字节为1，第3字节为0）消息
		) {
			// Delay Delivery
			// 从消息properties属性中获取"DELAY"参数属性的值（即延迟级别）
			// 该参数在应用层通过Message.setDelayTimeLevel(int level)方法设置，消息延时投递时间级别，0表示不延时，大于0表示特定延时级别
			if (msg.getDelayTimeLevel() > 0) {
				if (msg.getDelayTimeLevel() > this.defaultMessageStore.getScheduleMessageService().getMaxDelayLevel()) {
					// 将此消息设置为定时消息；即更改该消息为定时消息
					msg.setDelayTimeLevel(this.defaultMessageStore.getScheduleMessageService().getMaxDelayLevel());
				}

				topic = ScheduleMessageService.SCHEDULE_TOPIC;
				queueId = ScheduleMessageService.delayLevel2QueueId(msg.getDelayTimeLevel());
				tagsCode = this.defaultMessageStore.getScheduleMessageService().computeDeliverTimestamp(msg.getDelayTimeLevel(), msg.getStoreTimestamp());

				// Backup real topic, queueId
				// 将消息中原真实的topic和queueId存入消息属性中
				MessageAccessor.putProperty(msg, MessageConst.PROPERTY_REAL_TOPIC, msg.getTopic());
				MessageAccessor.putProperty(msg, MessageConst.PROPERTY_REAL_QUEUE_ID, String.valueOf(msg.getQueueId()));
				msg.setPropertiesString(MessageDecoder.messageProperties2String(msg.getProperties()));

				// 将MessageExtBrokerInner对象的topic值更改为"SCHEDULE_TOPIC_XXXX"
				msg.setTopic(topic);
				// 根据延迟级别获取延时消息的队列ID（queueId等于延迟级别减去1）并更改queueId值
				msg.setQueueId(queueId);
			}
		}

		long eclipseTimeInLock = 0;
		synchronized (this) {
			long beginLockTimestamp = this.defaultMessageStore.getSystemClock().now();

			// Here settings are stored timestamp, in order to ensure an orderly
			// global
			msg.setStoreTimestamp(beginLockTimestamp);
			// 调用MapedFileQueue.getLastMapedFile方法获取或者创建最后一个文件（即MapedFile列表中的最后一个MapedFile对象）
			// 若还没有文件或者已有的最后一个文件已经写满则创建一个新的文件，即创建一个新的MapedFile对象并返回；
			MapedFile mapedFile = this.mapedFileQueue.getLastMapedFile();

			if (null == mapedFile) {
				log.error("create maped file1 error, topic: " + msg.getTopic() + " clientAddr: " + msg.getBornHostString());
				return new PutMessageResult(PutMessageStatus.CREATE_MAPEDFILE_FAILED, null);
			}
			// 将消息内容写入MapedFile.mappedByteBuffer：MappedByteBuffer对象，即写入消息缓存中；由后台服务线程定时的将缓存中的消息刷盘到物理文件中；
			result = mapedFile.appendMessage(msg, this.appendMessageCallback);
			switch (result.getStatus()) {
				// 若为PUT_OK标记则继续后续处理
				case PUT_OK :
					break;
				// 若最后一个MapedFile剩余空间不足够写入此次的消息内容
				case END_OF_FILE :
					// Create a new file, re-write the message
					// 再次调用MapedFileQueue.getLastMapedFile方法获取新的MapedFile对象
					mapedFile = this.mapedFileQueue.getLastMapedFile();
					if (null == mapedFile) {
						// XXX: warn and notify me
						log.error("create maped file2 error, topic: " + msg.getTopic() + " clientAddr: " + msg.getBornHostString());
						return new PutMessageResult(PutMessageStatus.CREATE_MAPEDFILE_FAILED, result);
					}
					// 调用MapedFile.appendMessage方法重写写入
					result = mapedFile.appendMessage(msg, this.appendMessageCallback);
					// 继续执行后续处理操作
					break;
				// 若为其他标记则返回错误信息给上层
				case MESSAGE_SIZE_EXCEEDED :
					return new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, result);
				case UNKNOWN_ERROR :
					return new PutMessageResult(PutMessageStatus.UNKNOWN_ERROR, result);
				default :
					return new PutMessageResult(PutMessageStatus.UNKNOWN_ERROR, result);
			}
			// 初始化DispatchRequest对象
			DispatchRequest dispatchRequest = new DispatchRequest(//
					topic, // 1
					queueId, // 2
					result.getWroteOffset(), // 3 写入的开始物理位置
					result.getWroteBytes(), // 4 写入的大小
					tagsCode, // 5
					msg.getStoreTimestamp(), // 6
					result.getLogicsOffset(), // 7 已经写入的消息块个数
					msg.getKeys(), // 8
					/**
					 * Transaction
					 */
					msg.getSysFlag(), // 9
					msg.getPreparedTransactionOffset());// 10
			// 将请求消息放入DispatchMessageService.requestsWrite队列中
			this.defaultMessageStore.putDispatchRequest(dispatchRequest);

			eclipseTimeInLock = this.defaultMessageStore.getSystemClock().now() - beginLockTimestamp;
		} // end of synchronized

		if (eclipseTimeInLock > 1000) {
			// XXX: warn and notify me
			log.warn("putMessage in lock eclipse time(ms) " + eclipseTimeInLock);
		}

		PutMessageResult putMessageResult = new PutMessageResult(PutMessageStatus.PUT_OK, result);

		// Statistics
		storeStatsService.getSinglePutMessageTopicSizeTotal(topic).addAndGet(result.getWroteBytes());

		GroupCommitRequest request = null;

		// Synchronization flush
		// 若该Broker是同步刷盘
		if (FlushDiskType.SYNC_FLUSH == this.defaultMessageStore.getMessageStoreConfig().getFlushDiskType()) {
			GroupCommitService service = (GroupCommitService) this.flushCommitLogService;
			// 并且消息的property属性中"WAIT"参数为空或者为TRUE,则利用GroupCommitService后台线程服务进行刷盘操作
			if (msg.isWaitStoreMsgOK()) {
				// 构建GroupCommitRequest对象，其中nextOffset变量的值等于wroteOffset（写入的开始物理位置）加上wroteBytes（写入的大小）,表示下一次写入消息的开始位置
				request = new GroupCommitRequest(result.getWroteOffset() + result.getWroteBytes());
				// 将该对象存入GroupCommitService.requestsWrite写请求队列中，并唤醒GroupCommitService线程run()将写队列的数据与读队列的数据交互（读队列的数据肯定是空）
				service.putRequest(request);
				boolean flushOK = request.waitForFlush(this.defaultMessageStore.getMessageStoreConfig().getSyncFlushTimeout());
				if (!flushOK) {
					log.error("do groupcommit, wait for flush failed, topic: " + msg.getTopic() + " tags: " + msg.getTags() + " client address: " + msg.getBornHostString());
					putMessageResult.setPutMessageStatus(PutMessageStatus.FLUSH_DISK_TIMEOUT);
				}
			} else {
				service.wakeup();
			}
		}
		// Asynchronous flush
		// 若该Broker为异步刷盘（ASYNC_FLUSH）
		else {
			// 唤醒FlushRealTimeService线程服务
			this.flushCommitLogService.wakeup();
		}

		// Synchronous write double
		// 若该Broker为同步双写主用（SYNC_MASTER）
		if (BrokerRole.SYNC_MASTER == this.defaultMessageStore.getMessageStoreConfig().getBrokerRole()) {
			HAService service = this.defaultMessageStore.getHaService();
			// 并且消息的property属性中"WAIT"参数为空或者为TRUE
			if (msg.isWaitStoreMsgOK()) {
				// Determine whether to wait
				// 检查主从数据传输是否正常,主用put的位置masterPutwhere等于wroteOffset（写入的开始物理位置）加上wroteBytes（写入的大小）
				// 若主备同步正常
				if (service.isSlaveOK(result.getWroteOffset()/* 写入的开始物理位置 */ + result.getWroteBytes()/* 写入的大小 */)) {
					if (null == request) {
						request = new GroupCommitRequest(result.getWroteOffset()/* 写入的开始物理位置 */ + result.getWroteBytes()/* 写入的大小 */);
					}
					// 将请求对象放入 GroupTransferService服务的队列中，用于监听是否同步完成
					service.putRequest(request);

					service.getWaitNotifyObject().wakeupAll();

					boolean flushOK =
							// TODO
							// 该方法一直处于阻塞状态，直到HAService线程服务完成同步工作或者超时才返回结果
							request.waitForFlush(this.defaultMessageStore.getMessageStoreConfig().getSyncFlushTimeout());
					if (!flushOK) {
						log.error("do sync transfer other node, wait return, but failed, topic: " + msg.getTopic() + " tags: " + msg.getTags() + " client address: " + msg.getBornHostString());
						putMessageResult.setPutMessageStatus(PutMessageStatus.FLUSH_SLAVE_TIMEOUT);
					}
				}
				// Slave problem
				// 主用put的位置masterPutwhere等于wroteOffset（写入的开始物理位置）加上wroteBytes（写入的大小）
				// 主用put的位置 masterPutwhere减去HAService.push2SlaveMaxOffset（写入到Slave的最大Offset）的差值不能大于256M，否则视为主备同步异常
				// 主备同步异常
				else {
					// Tell the producer, slave not available
					putMessageResult.setPutMessageStatus(PutMessageStatus.SLAVE_NOT_AVAILABLE);
				}
			}
		}

		return putMessageResult;
	}

	/**
	 * According to receive certain message or offset storage time if an error
	 * occurs, it returns -1
	 */
	public long pickupStoretimestamp(final long offset, final int size) {
		if (offset > this.getMinOffset()) {
			SelectMapedBufferResult result = this.getMessage(offset, size);
			if (null != result) {
				try {
					return result.getByteBuffer().getLong(MessageDecoder.MessageStoreTimestampPostion);
				} finally {
					result.release();
				}
			}
		}

		return -1;
	}

	/*
	 * ====================================读取消息===================================
	 * 
	 */
	public SelectMapedBufferResult getMessage(final long offset/* 读取的起始偏移量 */, final int size/* 读取的大小 */) {
		int mapedFileSize = this.defaultMessageStore.getMessageStoreConfig().getMapedFileSizeCommitLog();
		// 根据起始偏移量offset得到其所在的MapedFile对象
		MapedFile mapedFile = this.mapedFileQueue.findMapedFileByOffset(offset, (0 == offset ? true : false));
		if (mapedFile != null) {
			// 由于offset是commitlog文件的全局偏移量，要以offset%mapedFileSize的余数作为单个文件的起始读取位置传入selectMapedBuffer方法中
			int pos = (int) (offset % mapedFileSize);
			// 获取从offset开始的size大小的消息内容
			SelectMapedBufferResult result = mapedFile.selectMapedBuffer(pos, size);
			return result;
		}

		return null;
	}

	public HashMap<String, Long> getTopicQueueTable() {
		return topicQueueTable;
	}

	public void setTopicQueueTable(HashMap<String, Long> topicQueueTable) {
		this.topicQueueTable = topicQueueTable;
	}

	public void destroy() {
		this.mapedFileQueue.destroy();
	}

	/*
	 * 指定位置开始写入二进制消息
	 */
	public boolean appendData(long startOffset, byte[] data) {
		synchronized (this) {
			// 根据入参中的指定位置startOffset值调用getLastMapedFile方法
			// 获取最后一个MapedFile对象，如果一个都没有，则新创建一个，如果最后一个写满了，则新创建一个
			MapedFile mapedFile = this.mapedFileQueue.getLastMapedFile(startOffset);
			if (null == mapedFile) {
				log.error("appendData getLastMapedFile error  " + startOffset);
				return false;
			}
			// 将二进制消息写入缓存中
			return mapedFile.appendMessage(data);
		}
	}

	public boolean retryDeleteFirstFile(final long intervalForcibly) {
		return this.mapedFileQueue.retryDeleteFirstFile(intervalForcibly);
	}

	abstract class FlushCommitLogService extends ServiceThread {
	}

	class FlushRealTimeService extends FlushCommitLogService {
		private static final int RetryTimesOver = 3;
		private long lastFlushTimestamp = 0;
		private long printTimes = 0;

		public void run() {
			CommitLog.log.info(this.getServiceName() + " service started");

			while (!this.isStoped()) {
				boolean flushCommitLogTimed = CommitLog.this.defaultMessageStore.getMessageStoreConfig().isFlushCommitLogTimed();

				int interval = CommitLog.this.defaultMessageStore.getMessageStoreConfig().getFlushIntervalCommitLog();
				int flushPhysicQueueLeastPages = CommitLog.this.defaultMessageStore.getMessageStoreConfig().getFlushCommitLogLeastPages();

				int flushPhysicQueueThoroughInterval = CommitLog.this.defaultMessageStore.getMessageStoreConfig().getFlushCommitLogThoroughInterval();

				boolean printFlushProgress = false;

				// Print flush progress
				long currentTimeMillis = System.currentTimeMillis();
				if (currentTimeMillis >= (this.lastFlushTimestamp + flushPhysicQueueThoroughInterval)) {
					this.lastFlushTimestamp = currentTimeMillis;
					flushPhysicQueueLeastPages = 0;
					printFlushProgress = ((printTimes++ % 10) == 0);
				}

				try {
					if (flushCommitLogTimed) {
						Thread.sleep(interval);
					} else {
						this.waitForRunning(interval);
					}

					if (printFlushProgress) {
						this.printFlushProgress();
					}
					// 若根据CommitLog刷盘间隔时间（默认是1秒）来间断性的调用CommitLog.MapedFileQueue.commit(int flushLeastPages)方法进行刷盘操作
					CommitLog.this.mapedFileQueue.commit(flushPhysicQueueLeastPages);
					// 用MapedFileQueue的存储时间戳storeTimestamp变量值（在MapedFileQueue.commit方法成功执行后更新）
					long storeTimestamp = CommitLog.this.mapedFileQueue.getStoreTimestamp();
					if (storeTimestamp > 0) {
						// 更新StoreCheckpoint.physicMsgTimestamp变量值（checkpoint文件内容中其中一个值）
						CommitLog.this.defaultMessageStore.getStoreCheckpoint().setPhysicMsgTimestamp(storeTimestamp);
					}
				} catch (Exception e) {
					CommitLog.log.warn(this.getServiceName() + " service has exception. ", e);
					this.printFlushProgress();
				}
			}

			// Normal shutdown, to ensure that all the flush before exit
			boolean result = false;
			for (int i = 0; i < RetryTimesOver && !result; i++) {
				result = CommitLog.this.mapedFileQueue.commit(0);
				CommitLog.log.info(this.getServiceName() + " service shutdown, retry " + (i + 1) + " times " + (result ? "OK" : "Not OK"));
			}

			this.printFlushProgress();

			CommitLog.log.info(this.getServiceName() + " service end");
		}

		@Override
		public String getServiceName() {
			return FlushCommitLogService.class.getSimpleName();
		}

		private void printFlushProgress() {
			CommitLog.log.info("how much disk fall behind memory, " + CommitLog.this.mapedFileQueue.howMuchFallBehind());
		}

		@Override
		public long getJointime() {
			return 1000 * 60 * 5;
		}
	}

	public class GroupCommitRequest {
		private final long nextOffset;
		private final CountDownLatch countDownLatch = new CountDownLatch(1);
		private volatile boolean flushOK = false;

		public GroupCommitRequest(long nextOffset) {
			this.nextOffset = nextOffset;
		}

		public long getNextOffset() {
			return nextOffset;
		}

		public void wakeupCustomer(final boolean flushOK) {
			this.flushOK = flushOK;
			this.countDownLatch.countDown();
		}

		public boolean waitForFlush(long timeout) {
			try {
				boolean result = this.countDownLatch.await(timeout, TimeUnit.MILLISECONDS);
				return result || this.flushOK;
			} catch (InterruptedException e) {
				e.printStackTrace();
				return false;
			}
		}
	}

	/**
	 * GroupCommit Service
	 */
	class GroupCommitService extends FlushCommitLogService {
		private volatile List<GroupCommitRequest> requestsWrite = new ArrayList<GroupCommitRequest>();
		private volatile List<GroupCommitRequest> requestsRead = new ArrayList<GroupCommitRequest>();

		public void putRequest(final GroupCommitRequest request) {
			synchronized (this) {
				this.requestsWrite.add(request);
				if (!this.hasNotified) {
					this.hasNotified = true;
					this.notify();
				}
			}
		}
		// 将写队列的数据与读队列的数据交互（读队列的数据肯定是空）
		private void swapRequests() {
			List<GroupCommitRequest> tmp = this.requestsWrite;
			this.requestsWrite = this.requestsRead;
			this.requestsRead = tmp;
		}

		private void doCommit() {
			if (!this.requestsRead.isEmpty()) {
				// 遍历读队列的数据
				for (GroupCommitRequest req : this.requestsRead) {
					// There may be a message in the next file, so a maximum of
					// two times the flush
					boolean flushOK = false;
					for (int i = 0; (i < 2) && !flushOK; i++) {
						// 检查MapedFileQueue.committedWhere（刷盘刷到哪里的记录）是否大于等于GroupCommitRequest.nextOffset，
						// 若是表示该请求消息表示nextOffset之前的消息已经被刷盘
						flushOK = (CommitLog.this.mapedFileQueue.getCommittedWhere() >= req.getNextOffset());

						if (!flushOK) {
							// 进行刷盘操作
							CommitLog.this.mapedFileQueue.commit(0/* flushLeastPages */);
						}
					}

					req.wakeupCustomer(flushOK);
				}

				long storeTimestamp = CommitLog.this.mapedFileQueue.getStoreTimestamp();
				if (storeTimestamp > 0) {
					// 更新StoreCheckpoint.physicMsgTimestamp变量值（checkpoint文件内容中其中一个值）
					CommitLog.this.defaultMessageStore.getStoreCheckpoint().setPhysicMsgTimestamp(storeTimestamp);
				}
				// 清空读请求队列requestRead
				this.requestsRead.clear();
			} else {
				// Because of individual messages is set to not sync flush, it
				// will come to this process
				CommitLog.this.mapedFileQueue.commit(0);
			}
		}

		public void run() {
			CommitLog.log.info(this.getServiceName() + " service started");

			while (!this.isStoped()) {
				try {
					this.waitForRunning(0);
					this.doCommit();
				} catch (Exception e) {
					CommitLog.log.warn(this.getServiceName() + " service has exception. ", e);
				}
			}

			// Under normal circumstances shutdown, wait for the arrival of the
			// request, and then flush
			try {
				Thread.sleep(10);
			} catch (InterruptedException e) {
				CommitLog.log.warn("GroupCommitService Exception, ", e);
			}

			synchronized (this) {
				this.swapRequests();
			}

			this.doCommit();

			CommitLog.log.info(this.getServiceName() + " service end");
		}

		@Override
		protected void onWaitEnd() {
			this.swapRequests();
		}

		@Override
		public String getServiceName() {
			return GroupCommitService.class.getSimpleName();
		}

		@Override
		public long getJointime() {
			return 1000 * 60 * 5;
		}
	}

	class DefaultAppendMessageCallback implements AppendMessageCallback {
		// File at the end of the minimum fixed length empty
		private static final int END_FILE_MIN_BLANK_LENGTH = 4 + 4;
		private final ByteBuffer msgIdMemory;
		// Store the message content
		private final ByteBuffer msgStoreItemMemory;
		// The maximum length of the message
		private final int maxMessageSize;

		DefaultAppendMessageCallback(final int size) {
			this.msgIdMemory = ByteBuffer.allocate(MessageDecoder.MSG_ID_LENGTH);
			this.msgStoreItemMemory = ByteBuffer.allocate(size + END_FILE_MIN_BLANK_LENGTH);
			this.maxMessageSize = size;
		}

		public ByteBuffer getMsgStoreItemMemory() {
			return msgStoreItemMemory;
		}

		public AppendMessageResult doAppend(final long fileFromOffset, final ByteBuffer byteBuffer, final int maxBlank, final Object msg) {
			// STORETIMESTAMP + STOREHOSTADDRESS + OFFSET <br>
			MessageExtBrokerInner msgInner = (MessageExtBrokerInner) msg;
			// PHY OFFSET
			long wroteOffset = fileFromOffset + byteBuffer.position();
			String msgId = MessageDecoder.createMessageId(this.msgIdMemory, msgInner.getStoreHostBytes(), wroteOffset);

			// Record ConsumeQueue information
			String key = msgInner.getTopic() + "-" + msgInner.getQueueId();
			Long queueOffset = CommitLog.this.topicQueueTable.get(key);
			if (null == queueOffset) {
				queueOffset = 0L;
				CommitLog.this.topicQueueTable.put(key, queueOffset);
			}

			// Transaction messages that require special handling
			final int tranType = MessageSysFlag.getTransactionValue(msgInner.getSysFlag());
			switch (tranType) {
				// Prepared and Rollback message is not consumed, will not enter the
				// consumer queue
				case MessageSysFlag.TransactionPreparedType :
				case MessageSysFlag.TransactionRollbackType :
					queueOffset = 0L;
					break;
				case MessageSysFlag.TransactionNotType :
				case MessageSysFlag.TransactionCommitType :
				default :
					break;
			}

			/**
			 * Serialize message
			 */
			final byte[] propertiesData = msgInner.getPropertiesString() == null ? null : msgInner.getPropertiesString().getBytes();
			final int propertiesLength = propertiesData == null ? 0 : propertiesData.length;

			final byte[] topicData = msgInner.getTopic().getBytes();
			final int topicLength = topicData == null ? 0 : topicData.length;

			final int bodyLength = msgInner.getBody() == null ? 0 : msgInner.getBody().length;

			final int msgLen = 4 // 1 TOTALSIZE
					+ 4 // 2 MAGICCODE
					+ 4 // 3 BODYCRC
					+ 4 // 4 QUEUEID
					+ 4 // 5 FLAG
					+ 8 // 6 QUEUEOFFSET
					+ 8 // 7 PHYSICALOFFSET
					+ 4 // 8 SYSFLAG
					+ 8 // 9 BORNTIMESTAMP
					+ 8 // 10 BORNHOST
					+ 8 // 11 STORETIMESTAMP
					+ 8 // 12 STOREHOSTADDRESS
					+ 4 // 13 RECONSUMETIMES
					+ 8 // 14 Prepared Transaction Offset
					+ 4 + bodyLength // 14 BODY
					+ 1 + topicLength // 15 TOPIC
					+ 2 + propertiesLength // 16 propertiesLength
					+ 0;

			// Exceeds the maximum message
			// 若总长度大于消息长度的最大值 默认为512K
			if (msgLen > this.maxMessageSize) {
				CommitLog.log.warn("message size exceeded, msg total size: " + msgLen + ", msg body size: " + bodyLength + ", maxMessageSize: " + this.maxMessageSize);
				return new AppendMessageResult(AppendMessageStatus.MESSAGE_SIZE_EXCEEDED);
			}

			// Determines whether there is sufficient free space
			// 若总长度加上文件末尾剩余的空格8字节的值大于该内存对象剩余的空间
			if ((msgLen + END_FILE_MIN_BLANK_LENGTH) > maxBlank) {
				this.resetMsgStoreItemMemory(maxBlank);
				// 1 TOTALSIZE
				this.msgStoreItemMemory.putInt(maxBlank);
				// 2 MAGICCODE
				this.msgStoreItemMemory.putInt(CommitLog.BlankMagicCode);
				// 3 The remaining space may be any value
				//

				// Here the length of the specially set maxBlank
				// 将剩余的空间用空格填满
				byteBuffer.put(this.msgStoreItemMemory.array(), 0, maxBlank);
				return new AppendMessageResult(AppendMessageStatus.END_OF_FILE, wroteOffset, maxBlank, msgId, msgInner.getStoreTimestamp(), queueOffset);
			}

			// Initialization of storage space
			this.resetMsgStoreItemMemory(msgLen);
			// 1 TOTALSIZE
			this.msgStoreItemMemory.putInt(msgLen);
			// 2 MAGICCODE
			this.msgStoreItemMemory.putInt(CommitLog.MessageMagicCode);
			// 3 BODYCRC
			this.msgStoreItemMemory.putInt(msgInner.getBodyCRC());
			// 4 QUEUEID
			this.msgStoreItemMemory.putInt(msgInner.getQueueId());
			// 5 FLAG
			this.msgStoreItemMemory.putInt(msgInner.getFlag());
			// 6 QUEUEOFFSET
			this.msgStoreItemMemory.putLong(queueOffset);
			// 7 PHYSICALOFFSET
			this.msgStoreItemMemory.putLong(fileFromOffset + byteBuffer.position());
			// 8 SYSFLAG
			this.msgStoreItemMemory.putInt(msgInner.getSysFlag());
			// 9 BORNTIMESTAMP
			this.msgStoreItemMemory.putLong(msgInner.getBornTimestamp());
			// 10 BORNHOST
			this.msgStoreItemMemory.put(msgInner.getBornHostBytes());
			// 11 STORETIMESTAMP
			this.msgStoreItemMemory.putLong(msgInner.getStoreTimestamp());
			// 12 STOREHOSTADDRESS
			this.msgStoreItemMemory.put(msgInner.getStoreHostBytes());
			// 13 RECONSUMETIMES
			this.msgStoreItemMemory.putInt(msgInner.getReconsumeTimes());
			// 14 Prepared Transaction Offset
			this.msgStoreItemMemory.putLong(msgInner.getPreparedTransactionOffset());
			// 15 BODY
			this.msgStoreItemMemory.putInt(bodyLength);
			if (bodyLength > 0)
				this.msgStoreItemMemory.put(msgInner.getBody());
			// 16 TOPIC
			this.msgStoreItemMemory.put((byte) topicLength);
			this.msgStoreItemMemory.put(topicData);
			// 17 PROPERTIES
			this.msgStoreItemMemory.putShort((short) propertiesLength);
			if (propertiesLength > 0)
				this.msgStoreItemMemory.put(propertiesData);

			// Write messages to the queue buffer
			// 整体复制到MapedFile的内存对象mappedByteBuffer中
			byteBuffer.put(this.msgStoreItemMemory.array(), 0, msgLen);

			AppendMessageResult result = new AppendMessageResult(AppendMessageStatus.PUT_OK, wroteOffset, msgLen, msgId, msgInner.getStoreTimestamp(), queueOffset);

			switch (tranType) {
				case MessageSysFlag.TransactionPreparedType :
				case MessageSysFlag.TransactionRollbackType :
					break;
				case MessageSysFlag.TransactionNotType :
				case MessageSysFlag.TransactionCommitType :
					// The next update ConsumeQueue information
					CommitLog.this.topicQueueTable.put(key, ++queueOffset);
					break;
				default :
					break;
			}

			return result;
		}

		private void resetMsgStoreItemMemory(final int length) {
			this.msgStoreItemMemory.flip();
			this.msgStoreItemMemory.limit(length);
		}
	}

	public void removeQueurFromTopicQueueTable(final String topic, final int queueId) {
		String key = topic + "-" + queueId;
		synchronized (this) {
			this.topicQueueTable.remove(key);
		}

		log.info("removeQueurFromTopicQueueTable OK Topic: {} QueueId: {}", topic, queueId);
	}
}
