/*
 * Copyright (c) 2011-2018, Meituan Dianping. All Rights Reserved.
 *
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements. See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License. You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.unidal.cat.message.storage.local;

import java.io.BufferedOutputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicInteger;

import io.netty.buffer.ByteBuf;
import org.unidal.cat.message.storage.Bucket;
import org.unidal.cat.message.storage.FileType;
import org.unidal.cat.message.storage.PathBuilder;
import org.unidal.cat.message.storage.internals.ByteBufCache;
import org.unidal.cat.message.storage.internals.DefaultBlock;
import org.unidal.lookup.annotation.Inject;
import org.unidal.lookup.annotation.Named;

import com.dianping.cat.Cat;
import com.dianping.cat.config.server.ServerConfigManager;
import com.dianping.cat.message.internal.MessageId;

@Named(type = Bucket.class, value = "local", instantiationStrategy = Named.PER_LOOKUP)
public class LocalBucket implements Bucket {

	@Inject("local")
	private PathBuilder m_builder;

	@Inject
	private ByteBufCache m_bufCache;

	@Inject
	private ServerConfigManager m_config;

	private DataHelper m_data = new DataHelper();

	private IndexHelper m_index = new IndexHelper();

	private boolean m_nioEnabled = true;

	private boolean m_writeMode;

	private AtomicInteger m_count = new AtomicInteger();

	@Override
	public synchronized void close() {
		if (m_index.isOpen()) {
			m_data.close();

			if (m_writeMode) {
				m_index.flushAndClose();
			} else {
				m_index.close();
			}
		}
	}

	@Override
	public void flush() {
		try {
			m_data.m_out.flush();
		} catch (Exception e) {
			Cat.logError(e);
		}
	}

	@Override
	public ByteBuf get(MessageId id) throws IOException {
		long address = m_index.read(id);

		if (address <= 0) {
			return null;
		} else {
			int segmentOffset = (int) (address & 0xFFFFFFL);
			long dataOffset = address >> 24;
			byte[] data = m_data.read(dataOffset);

			if (data != null) {
				DefaultBlock block = new DefaultBlock(id, segmentOffset, data);

				return block.unpack(id);
			} else {
				return null;
			}
		}
	}

	@Override
	public boolean initialize(String domain, String ip, int hour, boolean writeMode) throws IOException {
		m_nioEnabled = m_config.getStroargeNioEnable();
		long timestamp = hour * 3600 * 1000L;
		Date startTime = new Date(timestamp);
		File indexPath = new File(m_builder.getPath(domain, startTime, ip, FileType.INDEX));
		File dataPath = new File(m_builder.getPath(domain, startTime, ip, FileType.DATA));

		m_writeMode = writeMode;
		m_index.init(indexPath); //header就是最小索引单元，里面有4096个segment,第一个segment是一级索引，其余是二级索引
		m_data.init(dataPath);
		return true;
	}

	@Override
	public synchronized void puts(ByteBuf data, Map<MessageId, Integer> mappings) throws IOException { //mappings是这批数据，每个消息的偏移量
		long dataOffset = m_data.getDataOffset(); //获取目前data文件已经写的偏移量

		m_data.write(data); //真正写数据，写data很简单

		for (Map.Entry<MessageId, Integer> e : mappings.entrySet()) {
			MessageId id = e.getKey();
			int offset = e.getValue();
//写index数据很复杂，因为要写一级索引和二级索引
			m_index.write(id, dataOffset, offset); //比如第一次写一批数据，dataOffset为4，那么这批数据遍历写在二级索引各个格子的前5个字节就是4，后面3个字节为每个消息的偏移量
		}
	}

	@Override
	public String toString() {
		return String.format("%s[%s]", getClass().getSimpleName(), m_data.getPath());
	}

	private class DataHelper {
		private File m_path;

		private RandomAccessFile m_file;

		private long m_offset;

		private DataOutputStream m_out;

		private void close() {
			try {
				if (m_out != null) {
					m_out.close();
				}
			} catch (IOException e) {
				Cat.logError(e);
			}

			try {
				m_file.close();
			} catch (IOException e) {
				Cat.logError(e);
			}

			m_file = null;
		}

		private long getDataOffset() {
			return m_offset;
		}

		private File getPath() {
			return m_path;
		}

		private void init(File dataPath) throws IOException {
			m_path = dataPath;
			m_path.getParentFile().mkdirs();

			m_file = new RandomAccessFile(m_path, "rw"); // read-write
			m_offset = m_path.length();
			m_out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(m_path, true), (int) SEGMENT_SIZE));

			if (m_offset == 0) {
				m_out.writeInt(-1);
				m_offset += 4;
			}
		}

		private byte[] read(long dataOffset) throws IOException {
			if (dataOffset < m_offset) {
				m_file.seek(dataOffset);

				int len = m_file.readInt();

				if (len > 0) {
					byte[] data = new byte[len];

					m_file.readFully(data);

					return data;
				}
			}
			return null;
		}

		private void write(ByteBuf data) throws IOException {
			int len = data.readableBytes();

			m_out.writeInt(len); //先写长度
			data.readBytes(m_out, len); //再写内容
			m_offset += len + 4; //把data文件已写偏移量更新
		}
	}

	private class IndexHelper {

		private RandomAccessFile m_file;

		private File m_path;

		private FileChannel m_indexChannel;

		private Header m_header = new Header();

		private Map<String, SegmentCache> m_caches = new LinkedHashMap<String, SegmentCache>();

		private void close() {
			try {
				m_indexChannel.close();
				m_file.close();
			} catch (IOException e) {
				Cat.logError(e);
			}
			m_file = null;
			m_caches.clear();
		}

		private void flushAndClose() {
			try {
				m_header.m_segment.flushAndClose();

				for (SegmentCache cache : m_caches.values()) {
					cache.flushAndClose();
				}
			} catch (IOException e) {
				Cat.logError(e);
			}

			if (m_nioEnabled) {
				try {
					m_indexChannel.force(false);
					m_indexChannel.close();
				} catch (IOException e) {
					Cat.logError(e);
				}
			}

			try {
				m_file.close();
			} catch (IOException e) {
				Cat.logError(e);
			}

			m_file = null;
			m_caches.clear();
		}

		private Segment getSegment(String ip, long id) throws IOException {
			SegmentCache cache = m_caches.get(ip);

			if (cache == null) {
				cache = new SegmentCache();
				m_caches.put(ip, cache);
			}

			return cache.findOrCreateNextSegment(id);
		}

		private void init(File indexPath) throws IOException { //https://www.jianshu.com/p/d99b8c08ac8e
			m_path = indexPath;
			m_path.getParentFile().mkdirs();

			// read-write without meta sync
			m_file = new RandomAccessFile(m_path, "rw");
			m_indexChannel = m_file.getChannel();

			long size = m_file.length(); //ENTRY_PER_SEGMENT是每个segment有4096个格子；SEGMENT_SIZE每个segment大小32K字节，乘起来就是128M
			int totalHeaders = (int) Math.ceil((size * 1.0 / (((long) ENTRY_PER_SEGMENT) * SEGMENT_SIZE))); //文件有多少个128M，即有多少个最小索引单元

			if (totalHeaders == 0) { //为空的话就加载第一个最小索引单元
				totalHeaders = 1;
			}

			for (int i = 0; i < totalHeaders; i++) { //其实每个header就是一个最小索引单元。一个最小索引单元由4096个segment（32K）组成，包含1个一级索引segment，和4095个二级索引segment
				m_header.load(i);
			}
		}

		private boolean isOpen() {
			return m_file != null;
		}

		private long read(MessageId id) throws IOException {
			int index = id.getIndex();
			long position = m_header.getOffset(id.getIpAddressValue(), index, false);

			int segmentId = (int) (position / SEGMENT_SIZE);
			int offset = (int) (position % SEGMENT_SIZE);
			Segment segment = getSegment(id.getIpAddressInHex(), segmentId);

			if (segment != null) {
				try {
					return segment.readLong(offset);
				} catch (EOFException e) {
					// ignore it
				}
			} else if (position > 0) {
				m_file.seek(position);

				return m_file.readLong();
			}

			return -1;
		}
//这效率奇高啊！！processor处理了一批messgae数据，满了后交给bucket处理，data文件一批写入，index居然也是一批处理写入，这里就是处理复杂的index写入

/*
* 其实是这样的，在每个最小索引单元中，一级索引共4096个格子，第一个格子是-1，后面4095个格子与二级索引的4095个segment一一对应，每个二级索引的segment又有4096个格子与同一个ip的不同rollId消息对应
* 比如当消息ip为1.1.1.1，rollId为1，那么一级索引的第二个格子会保存1.1.1.1和0，二级索引第一个segment的第一个格子会保存该消息data指针。
* 再来个消息ip为1.1.1.1，rollId为2，那么一级索引不变，二级索引第一个segment的第二个格子会保存该消息data指针。
* 再来个消息ip为2.2.2.2，rollId为1，那么一级索引的第三个格子会保存1.1.1.1和1，二级索引第一个segment的第三个格子会保存该消息data指针。
* 也就是说，一级索引保存的是ip和二级索引的segment号。二级索引每个segment的每个格子（rollID决定在哪个格子）保存消息data指针。
* */
		private void write(MessageId id, long blockAddress, int blockOffset) throws IOException { //id是messageID，blockAddress是写daya文件前的已写偏移量，blockOffset是当前messageID在一批数据中的偏移量
			long position = m_header.getOffset(id.getIpAddressValue(), id.getIndex(), true); //返回这个消息根据rollId算得的在二级索引segment中的起始地址
			//上面已完成一级索引的写入以及table映射表的数据构造
			long address = position / SEGMENT_SIZE;
			int offset = (int) (position % SEGMENT_SIZE); //在某个segmenr中的偏移量
			Segment segment = getSegment(id.getIpAddressInHex(), address); //获取到第一个segment，构造出对象
			long value = (blockAddress << 24) + blockOffset;//!!!!!这里就是写二级索引。比如第一批数据，blockAddress永远肯定为4，第一批数据的第一条消息blockOffset为0。

			if (segment != null) {
				segment.writeLong(offset, value); //offset就是根据rollId获得的在segment中的起始字节位置；前 40 位为存储块的首地址，后 24 位为解压后的块内偏移地址
			} else {
				if (m_count.incrementAndGet() % 1000 == 0) {
					Cat.logEvent("AbnormalBlock", id.getDomain());
				}
				if (m_nioEnabled) {
					m_indexChannel.position(position);

					ByteBuffer buf = ByteBuffer.allocate(8);
					buf.putLong(value);
					buf.flip();
					m_indexChannel.write(buf);
				} else {
					m_file.seek(position);
					m_file.writeLong(value);
				}
			}
		}

		private class Header {
			private Map<Integer, Map<Integer, Integer>> m_table = new LinkedHashMap<Integer, Map<Integer, Integer>>(); //加载索引文件中的所有一级索引，建立 IP、baseIndex、segmentIndex 的映射表。

			private int m_nextSegment;

			private Segment m_segment; //header是最小索引单元，m_segment是每个最小索引单元的第一个segment，即一级索引

			private int m_offset;

			private Integer findSegment(int ip, int index, boolean createIfNotExists) throws IOException { //index是第几个二级索引segment
				Map<Integer, Integer> map = m_table.get(ip);

				if (map == null && createIfNotExists) { //说明第一个二级索引segment还没创建过，第一次写index数据
					map = new HashMap<Integer, Integer>();
					m_table.put(ip, map);
				}

				Integer segmentId = map == null ? null : map.get(index);

				if (segmentId == null && createIfNotExists) {
					long value = (((long) ip) << 32) + index; //前 32 位为 IP，后 32 位为 baseIndex

					segmentId = m_nextSegment;
					map.put(index, segmentId); //"0" -> "1"     ，第0个segment的序号为1

					m_segment.writeLong(m_offset, value); //这里一级索引写入Ip+baseIndex信息
					m_offset += 8; //一级索引，第一个格子8字节是-1，第二个格子8字节刚才写入了ip+baseIndex信息，写完将offset加8

					m_nextSegment++;

					if (m_nextSegment % (ENTRY_PER_SEGMENT) == 0) { //说明第一个二级索引segment已经满了，再创建一个
						// last segment is full, create new one
						m_segment.flushAndClose();
						m_segment = new Segment(m_indexChannel, ((long) m_nextSegment) * SEGMENT_SIZE);

						m_nextSegment++; // skip self head data
						m_segment.writeLong(0, -1); // write magic code
						m_offset = 8;
					}
				}

				return segmentId;
			}

			private long getOffset(int ip, int seq, boolean createIfNotExists) throws IOException { //seq是messageID中的偏移量，即rollId
				int segmentIndex = seq / MESSAGE_PER_SEGMENT; //rollId除以4096，得到这个消息在除了一级索引外的第几个segment中，rollId小于4096都在第一个segment中
				int segmentOffset = (seq % MESSAGE_PER_SEGMENT) * BYTE_PER_MESSAGE; //得到在某个segment中的偏移地址。rollId%4096可以得到消息在segment哪个格子中
				Integer segmentId = findSegment(ip, segmentIndex, createIfNotExists); //上面其实就是把新ip和baseindex写入一级索引的格子中并把映射关系写入table，返回segment号

				if (segmentId != null) {
					return ((long) segmentId) * SEGMENT_SIZE + segmentOffset;
				} else {
					return -1;
				}
			}

			private void load(int headBlockIndex) throws IOException { //这里是加载每一个最小索引单元
				Segment segment = new Segment(m_indexChannel, ((long) headBlockIndex) * ENTRY_PER_SEGMENT * SEGMENT_SIZE); //这里segment变量是每个最小索引单元的第一个segment，即一级索引
				long magicCode = segment.readLong();

				if (magicCode == 0) { //每个最小索引单元的第一个segment的第一个8字节格子应该new Segment时置为0
					segment.writeLong(0, -1);
				} else if (magicCode != -1) {
					throw new IOException("Invalid index file: " + m_path);
				}

				m_segment = segment;
				m_nextSegment = 1 + ENTRY_PER_SEGMENT * headBlockIndex;
				m_offset = 8;

				int readerIndex = 1; //下面是处理一级索引，建立table映射表。这里从1开始，因为一级索引第一个格子是-1

				while (readerIndex < ENTRY_PER_SEGMENT) {
					int ip = segment.readInt(); //一级索引的每个格式8个字节，前4个字节存储ip
					int index = segment.readInt(); //一级索引每个格式8个字节，后4个字节存储baseIndex; baseIndex = index / 4096，index 为消息递增序列号。

					readerIndex++;

					if (ip != 0) {
						Map<Integer, Integer> map = m_table.get(ip);

						if (map == null) {
							map = new HashMap<Integer, Integer>();
							m_table.put(ip, map);
						}

						Integer segmentNo = map.get(index); //segmentIndex 是递增的，1 ~ 4095、4097 ~ 8291，以此类推。

						if (segmentNo == null) {
							segmentNo = m_nextSegment++;

							map.put(index, segmentNo);
						}

						m_offset += 8;
					} else {
						break;
					}
				}
			}
		}

		private class Segment {
			private FileChannel m_segmentChannel;

			private long m_address; //这个segment的起始位置

			private ByteBuffer m_buf;

			private Segment(FileChannel channel, long address) throws IOException { //channel永远是一个index文件的channel，address的第几个segment的首地址，如果第一个segment,那就是32K*1
				m_segmentChannel = channel;
				m_address = address;

				m_buf = m_bufCache.get(); //32K的内存空间
				// m_buf = ByteBuffer.allocate(SEGMENT_SIZE);
				m_buf.mark(); //对当前位置进行标记
				m_segmentChannel.read(m_buf, address);
				m_buf.reset();
			}

			private synchronized void flushAndClose() throws IOException {
				if (m_buf != null) {
					int pos = m_buf.position();

					m_buf.position(0);
					m_segmentChannel.write(m_buf, m_address);
					m_buf.position(pos);
					m_bufCache.put(m_buf);
					m_buf = null;
				} else {
					Cat.logEvent("CloseBucket", "Duplicate:" + m_path.getAbsolutePath());
				}
			}

			private int readInt() throws IOException {
				return m_buf.getInt();
			}

			private long readLong() throws IOException {
				return m_buf.getLong();
			}

			private long readLong(int offset) throws IOException {
				return m_buf.getLong(offset);
			}

			@Override
			public String toString() {
				return String.format("%s[address=%s]", getClass().getSimpleName(), m_address);
			}

			private void writeLong(int offset, long value) throws IOException {
				m_buf.putLong(offset, value);
			}
		}

		private class SegmentCache {
			private final static int CACHE_SIZE = 2;

			private long m_maxSegmentId;

			private Map<Long, Segment> m_latestSegments = new LinkedHashMap<Long, Segment>();

			private synchronized void flushAndClose() throws IOException {
				for (Segment segment : m_latestSegments.values()) {
					segment.flushAndClose();
				}
				m_latestSegments.clear();
			}

			private Segment findOrCreateNextSegment(long segmentId) throws IOException {
				Segment segment = m_latestSegments.get(segmentId);

				if (segment == null) {
					if (segmentId > m_maxSegmentId) {
						if (m_latestSegments.size() >= CACHE_SIZE) {
							removeOldSegment();
						}

						segment = new Segment(m_indexChannel, segmentId * SEGMENT_SIZE);//其实，一个index文件的channel是单例，在各个地方设置，这里segmentId为1，就是获取二级索引第一个segment

						m_latestSegments.put(segmentId, segment);
						m_maxSegmentId = segmentId;
					}
				}

				return segment;
			}

			private void removeOldSegment() throws IOException {
				Entry<Long, Segment> first = m_latestSegments.entrySet().iterator().next();
				Segment segment = m_latestSegments.remove(first.getKey());

				segment.flushAndClose();
			}
		}
	}

}
