#ifndef STORAGE_STATUS_PROXY_H
#define STORAGE_STATUS_PROXY_H

#include <muduo/base/Types.h>
#include <muduo/base/Logging.h>
#include <muduo/base/Atomic.h>
#include <muduo/base/Mutex.h>
#include <muduo/net/TcpConnection.h>

#include <sdfs/common/ToolKit.h>
#include <sdfs/protobuf/Message.h>
#include <sdfs/protobuf/Codec.h>
#include <sdfs/config/TrackerConfig.h>
#include <sdfs/redis/SyncRedisConnector.h>

#include <boost/enable_shared_from_this.hpp>
#include <boost/thread/locks.hpp>
#include <boost/thread/shared_mutex.hpp>
#include <boost/noncopyable.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/any.hpp>

#include <set>
#include <vector>

typedef boost::shared_mutex Lock;
typedef boost::unique_lock< Lock >  WriteLock;
typedef boost::shared_lock< Lock >  ReadLock;

using namespace muduo;
using namespace boost;
using namespace muduo::net;
using namespace sdfs::protobuf;
using namespace sdfs::redis;

namespace sdfs
{

namespace tracker
{
//TODO: sortable
struct SyncTask
{
	int uploaderIndex;
	muduo::string hashcode;
	int timestamp;
	bool sent[3];
};

typedef shared_ptr<SyncTask> SyncTaskPtr;


class ServerStatus
{
public:
	ServerStatus(int idx, HeartBeatPtr heartbeat,
		muduo::Timestamp time, muduo::string localip, int localport):
		name_(heartbeat->name().c_str()),
		heartBeat_(heartbeat),
		lastHeartBeatTime_(time),
		writable_(true),
		localIp_(localip),
		localPort_(localport),
		idx_(idx),
		status_(true)
	{
		if(heartbeat->has_ip())
			ip_ = heartbeat->ip().c_str();
		if(heartbeat->has_port())
			port_ = heartbeat->port();
	}

	bool isExpired()
	{
		double diff = muduo::timeDifference(muduo::Timestamp::now(), lastHeartBeatTime_);
		 LOG_DEBUG << "isExpired diff: "<< diff;
		return diff >= sdfs::TrackerConfig::heartBeatTimeoutMs()/1000.0;
	}

	void update(HeartBeatPtr& heartbeat, muduo::Timestamp time)
	{
		heartBeat_ = heartbeat;
		lastHeartBeatTime_ = time;
		if(heartbeat->has_ip())
			ip_ = heartbeat->ip().c_str();
		if(heartbeat->has_port())
			port_ = heartbeat->port();
	}

	int32_t getTaskNum()
	{
		return numTask_.get();
	}

	void addTaskNum()
	{
		numTask_.add(1);
	}

	int getGroupIdx()
	{
		return heartBeat_->groupidx();
	}

	muduo::string& getIp()
	{
		return ip_;
	}

	int getPort()
	{
		return port_;
	}

	muduo::string& getName()
	{
		return name_;
	}

	bool operator > (ServerStatus& right)
	{
		return ( this->getTaskNum() > right.getTaskNum() )? true :false;
	}

	bool operator >= (ServerStatus& right)
	{
		return ( this->getTaskNum() >= right.getTaskNum() )? true :false;
	}

	bool operator < (ServerStatus& right)
	{
		return ( this->getTaskNum() < right.getTaskNum() )? true :false;
	}

	bool operator <= (ServerStatus& right)
	{
		return ( this->getTaskNum() <= right.getTaskNum() )? true :false;
	}

	void enableWrite()
	{
		LOG_DEBUG << name_ << " enableWrite";
		writable_ = true;
	}

	void disableWrite()
	{
		LOG_DEBUG << name_ << "disableWrite";
		writable_ = false;
	}

	bool writable()
	{
		LOG_DEBUG << name_ <<" writable: "<<writable_;
		return writable_;
	}

	muduo::string& getLocalIp()
	{
		return localIp_;
	}

	int getLocalPort()
	{
		return localPort_;
	}

	uint getIndex()
	{
		return idx_;
	}

	bool status()
	{
		return status_;
	}

	void lock()
	{
		status_ = false;
	}

	void unlock()
	{
		status_ = true;
	}
private:
	muduo::string name_;
	HeartBeatPtr heartBeat_;
	Timestamp lastHeartBeatTime_;
	AtomicInt32 numTask_;
	muduo::string ip_;
	int port_;
	bool writable_;
	muduo::string localIp_;
	int localPort_;
	uint idx_;
	bool status_;
};
typedef boost::shared_ptr<ServerStatus> ServerStatusPtr;

class BucketStatus: boost::noncopyable, public boost::enable_shared_from_this<BucketStatus>
{
	typedef std::vector<TcpConnectionPtr> ConnectionStatus;
	// typedef std::vector<muduo::string> hashcodeVector;
	// typedef shared_ptr<hashcodeVector> hashcodeVectorPtr;
public:
	BucketStatus(muduo::net::EventLoop* loop, 
			sdfs::protobuf::ProtobufCodec* codec,int index):
		loop_(loop),
		codec_(codec),
		keepaliveMessage_(new Keepalive()),
		index_(index),
		redisConn_(new SyncRedisConnector(sdfs::TrackerConfig::redisIp().c_str(), 
			sdfs::TrackerConfig::redisPort()))
	{
		keepaliveMessage_->set_trackername(sdfs::TrackerConfig::name().c_str());
		redisConn_->connect();
		for (int i = 0; i < 3; ++i)
		{
			TcpConnectionPtr conn;
			connections_.push_back(conn);
		}
	};

	int size()
	{
		ReadLock lock(lock_);
		return connections_.size();
	}
	
	void updateServer(const TcpConnectionPtr& conn, 
		 HeartBeatPtr heartbeat, muduo::Timestamp time)
	{
		WriteLock lock(lock_);
		

		if(!conn->getContext().empty())
		{
			ServerStatusPtr status = any_cast<ServerStatusPtr>(conn->getContext());
			status->update(heartbeat, time);
		}
		else
		{
			muduo::string localip = conn->peerAddress().toIp();
			int localport = networkToHost16(conn->peerAddress().portNetEndian());
			int idx = 0;
			while(connections_.at(idx) != NULL)
			{
				idx++;
				assert(idx < 3);
			}
			ServerStatusPtr status(
				new ServerStatus(idx, heartbeat, time, localip, localport));
			conn->setContext(status);
			connections_.at(idx) = conn;
			LOG_DEBUG <<  "register new Storage: " << heartbeat->groupidx() << " [" 
				<< conn->peerAddress().toIpPort();
		}
		//TODO: check lastSyncTime, to confirm weather the stroage
		// is up to date, if not, find a most closed hashcode
		// and send FileSyncMessage to server
		
	}

	SyncTaskPtr findNextTask(int synctime)
	{
		SyncTaskPtr task;
		if(syncTasks_.size() < 1)
			return task;
		if(synctime < 1)
		{
			return *(syncTasks_.begin());
		}
		std::vector<SyncTaskPtr>::reverse_iterator it = syncTasks_.rbegin();
		while(it != syncTasks_.rend())
		{
			if( (*it)->timestamp == synctime )
			{
				it++;
				if(it == syncTasks_.rend())
				{
					
					return task;
				}
				return *it;
			}
			it++;
		}
		return task;
	}

	void syncTask()
	{
		
		// std::vector<SyncTaskPtr>::iterator it = 
		// 	syncTasks_.begin();
		for(uint i = 0; i < syncTasks_.size() ; ++i)
		{
			SyncTaskPtr task = syncTasks_.at(i);
			for (uint j = 0; j < connections_.size(); ++j)
			{
				TcpConnectionPtr conn = connections_.at(j);
				if(task->sent[j] == false && conn != NULL)
				{
					FileSyncMessage message;
					message.set_id(id_.getAndAdd(1));
					message.add_hashcode(task->hashcode.c_str());
					message.set_synctime(task->timestamp);
					codec_->Send(conn, message);
					task->sent[j] = true;
					LOG_DEBUG << "send: " << conn->peerAddress().toIpPort() << ", hashcode: "
						<< task->hashcode;
				}
			}
			if(task->sent[0] && task->sent[1] &&task->sent[2])
			{
				MutexLockGuard guard(syncMutex_);
				syncTasks_.erase(syncTasks_.begin()+i);
				i--;
			}
		}
	}

	// int findNextTimestamp(int synctime)
	// {
	// 	if(syncTimes_.size() < 1)
	// 	{
	// 		return -1;
	// 	}
	// 	if(synctime == -1)
	// 	{
	// 		return *(syncTimes_.begin());
	// 	}
	// 	std::vector<int>::iterator it = std::upper_bound(
	// 		syncTimes_.begin(), syncTimes_.end(), synctime);
	// 	assert(it != syncTimes_.end());
	// 	it++;
	// 	int key = *it;
	// 	return key;
	// }

	void sendKeepalives()
	{
		ReadLock lock(lock_);
		// LOG_DEBUG << "connection size: "<< connections_.size();
		ConnectionStatus::iterator it = connections_.begin();
		while(it != connections_.end())
		{
			if(*it == NULL)
			{
				it++;
				continue;
			}
			ServerStatusPtr status = any_cast<ServerStatusPtr>((*it)->getContext());
			if(status->isExpired())
			{
				// LOG_ERROR << "storage timeout";
				// if((*it)->connected())
				// {
				// 	LOG_ERROR << "shutdown";
				// 	status.reset();
				// 	(*it)->shutdown();
				// 	removeServer(status->getIndex());
				// }
				// LOG_DEBUG << "remove connection";
				status->lock();
				LOG_ERROR << "Storage: " << status->getName() << " timeout, locked!";
			}
			else
			{
				status->unlock();
			}
			keepaliveMessage_->set_id(id_.getAndAdd(1));
			codec_->Send(*it, *keepaliveMessage_);
			it++;
			
		}
		// if(flag)
		// {
		// 	loop_->runAfter(sdfs::TrackerConfig::heartBeatTimeoutMs()/1000.0, 
		// 		boost::bind(&BucketStatus::checkServerStatus, shared_from_this()));
		// 	LOG_DEBUG << "schedualed check after " << sdfs::TrackerConfig::heartBeatTimeoutMs()/1000 << "s";
		// }
		
	}

	/**************************************************************
	 * when connection is shutdown, removeServer should be called *
	 * Atomic *
	 *************************************************************/
	bool removeServer(uint i)
	{
		// fix me: removeServer should work atomically
		//WriteLock lock(lock_);
		if(i < connections_.size())
		{
			connections_.at(i).reset();
			assert(connections_.at(i) == NULL);
			LOG_DEBUG << "removeServer: " << i ;
			return true;
		}
		return false;
	}

	bool isExist(const TcpConnectionPtr& conn)
	{
		ReadLock lock(lock_);
		ConnectionStatus::iterator it = connections_.begin();
		while(it != connections_.end())
		{
			if(*it == conn)
			{
				return true;
			}
			it++;
		}
		return false;
	}

	TcpConnectionPtr getConnection()
	{
		ReadLock lock(lock_);
		int32_t curMin = 0;
		TcpConnectionPtr conn;
		ConnectionStatus::iterator it = connections_.begin();
		ServerStatusPtr status;
		for(; it != connections_.end(); it++)
		{
			if(*it == NULL) continue;
			status = any_cast<ServerStatusPtr>((*it)->getContext());
			if(status->status() && curMin <= status->getTaskNum())
			{
				curMin = status->getTaskNum();
				conn = *it;
			}
		}
		status->addTaskNum();
		return conn;
	}

	const ServerStatusPtr pickStorage()
	{
		return any_cast<ServerStatusPtr>(getConnection()->getContext());
	}

	ServerStatusPtr pickWriter()
	{
		ReadLock lock(lock_);
		ConnectionStatus::iterator it = connections_.begin();
		ServerStatusPtr status;
		for(; it != connections_.end(); it++)
		{
			if(*it == NULL) continue;

			status = any_cast<ServerStatusPtr>((*it)->getContext());
			if(status->writable() && status->status())
			{
				status->addTaskNum();
				return status;
			}
		}
		
		return status;
	}

	// hashcodeVectorPtr addZoo(int timestamp)
	// {
	// 	char cmd[128];
	// 	snprintf(cmd, 128, "LPUSH synctime %d", timestamp);
	// 	LOG_DEBUG << "redis: " << cmd;
	// 	assert(redisConn_ != NULL);
	// 	redisConn_->command(cmd);
	// 	hashcodeVectorPtr v(new hashcodeVector());
	// 	hashcodes_[timestamp] = v;
	// 	syncTimes_.push_back(timestamp);
	// 	LOG_DEBUG << "add zoo: " << timestamp;
	// 	return v;
	// }

	void addSyncTask(const TcpConnectionPtr& conn, 
		const muduo::string& hashcode)
	{
		// push hashcode to sync record list
		LOG_DEBUG << "addSyncTask: " << hashcode;

		SyncTaskPtr task(new SyncTask());
		ServerStatusPtr status = any_cast<ServerStatusPtr>( conn->getContext() );
		{
			MutexLockGuard guard(syncMutex_);
			lastSyncTime_ = Timestamp::now().secondsSinceEpoch();
			task->uploaderIndex = status->getIndex();
			task->hashcode = hashcode;
			task->timestamp = lastSyncTime_;
			task->sent[status->getIndex()] = true;
			syncTasks_.push_back(task);
		}

		/*

		int timestamp = Timestamp::now().secondsSinceEpoch();
		LOG_DEBUG << "timestamp - newSyncTime_: " << timestamp <<" - " <<
			newSyncTime_ << " syncIntervalS: "<< TrackerConfig::syncIntervalS();
		
		hashcodeVectorPtr curzoo;
		if( syncTime_ == newSyncTime_ ||
			 timestamp - newSyncTime_ > TrackerConfig::syncIntervalS())
		{
			syncTime_ = newSyncTime_;
			newSyncTime_ = timestamp;
			curzoo = addZoo(newSyncTime_);
		}
		else
		{
			curzoo = hashcodes_[newSyncTime_];
		}

		curzoo->push_back(hashcode);

		snprintf(cmd, 128, "LPUSH %d %s", newSyncTime_, hashcode.c_str());
		LOG_DEBUG << "redis: " << cmd;
		redisConn_->command(cmd);

		*/

		// NOTE: can not send FileSyncMessage here,
		// all sync message send by heartbeat time,
		// or repeated sync request may be send
	}
	//TODO: add future
	// void initSyncTask(int num)
	// {
	// 	char cmd[128];
	// 	snprintf(cmd, 128, "LRANGE synctime 0 %d", num);
	// 	RedisReplyPtr reply = redisConn_->command(cmd);
	// 	for (int i = 0; i < reply->size(); ++i)
	// 	{
	// 		syncTimes_.push_back(reply->getIntAt(i));
	// 	}
	// 	std::sort(syncTimes_.begin(), syncTimes_.end());
	// 	for (uint i = 0; i < syncTimes_.size(); ++i)
	// 	{
	// 		snprintf(cmd, 128, "LRANGE %d 0 -1", syncTimes_.at(i));
	// 		reply = redisConn_->command(cmd);
	// 		hashcodeVectorPtr v = hashcodes_[syncTimes_.at(i)];
	// 		if(v == NULL)
	// 		{
	// 			v.reset(new hashcodeVector());
	// 		}
	// 		for (int j = 0; j < reply->size(); ++j)
	// 		{
	// 			v->push_back(reply->getStrAt(j));
	// 		}
	// 	}
	// }

	bool isUpToDate(int timestamp)
	{
		return lastSyncTime_ == timestamp ? true : false;
	}

	// hashcodeVectorPtr getHashcodesAt(int timestamp)
	// {
	// 	MutexLockGuard guard(codeMutex_);
	// 	return hashcodes_[timestamp];
	// }

	// void clearSyncRecord()
	// {
	// 	MutexLockGuard guard(codeMutex_);
	// 	hashcodes_.clear();
	// }

	ServerStatusPtr getStatus(const muduo::string& name)
	{
		ConnectionStatus::iterator it = connections_.begin();
		ServerStatusPtr status;
		for(; it != connections_.end(); it++)
		{
			if(*it == NULL) continue;

			status = any_cast<ServerStatusPtr>((*it)->getContext());
			LOG_DEBUG << "compare "<< status->getName() <<" and " << name;
			if(status->getName().compare(name) == 0)
			{
				LOG_DEBUG << "FOUND!";
				return status;
			}
		}
		return status;
	}

private:

	ConnectionStatus connections_;
	std::vector<SyncTaskPtr > syncTasks_;
	MutexLock syncMutex_;
	int lastSyncTime_;
	Lock lock_;
	muduo::net::EventLoop *loop_;
	sdfs::protobuf::ProtobufCodec *codec_;
	KeepalivePtr keepaliveMessage_;
	int index_;
	AtomicInt32 id_;
	SyncRedisConnectorPtr redisConn_;
};

typedef boost::shared_ptr<BucketStatus> BucketStatusPtr;

class StorageServerProxy: boost::noncopyable
{
public:

	~StorageServerProxy(){}

	void updateStorageServer(const TcpConnectionPtr& conn, 
		HeartBeatPtr heartbeat, Timestamp time)
	{
		int32_t groupidx = heartbeat->groupidx();
		buckets_[groupidx-1]->updateServer(conn, heartbeat, time);

	}

	ServerStatusPtr findStatus(const muduo::string& name)
	{
		ReadLock lock(lock_);
		ServerStatusPtr status;
		for (uint i = 0; i < buckets_.size() ; ++i)
		{
			status = buckets_[i]->getStatus(name);
			if(status != NULL)
				return status;
		}
		return status;
	}

	void removeStorageServer(const TcpConnectionPtr& conn)
	{
		WriteLock lock(lock_);
		ServerStatusPtr status = any_cast<ServerStatusPtr>(conn->getContext());
		int group = status->getGroupIdx();
		int i = status->getIndex();
		assert(group < 4 && group > 0);
		buckets_[group-1]->removeServer(i);
	}


	const BucketStatusPtr getBucket(int idx)
	{
		return buckets_[idx];
	}

	BucketStatusPtr operator[](int idx)
	{
		return buckets_[idx];
	}

	shared_ptr<std::vector<ServerStatusPtr> > packageStorageGroup()
	{
		ReadLock lock(lock_);
		shared_ptr< std::vector<ServerStatusPtr> > addrs(new std::vector<ServerStatusPtr>());
		if(instance_->buckets_[0]->size() > 0 &&
			instance_->buckets_[1]->size() > 0 &&
			instance_->buckets_[2]->size() > 0)
			for (uint i = 0; i < instance_->buckets_.size(); ++i)
			{
				addrs->push_back(instance_->buckets_[i]->pickStorage());
			}
		return addrs;
	}

	ServerStatusPtr findWriteStorage()
	{
		ServerStatusPtr status = instance_->buckets_[group_.getAndAdd(1) % bucketNum_]->pickWriter();
		int i = 0;
		while(status == NULL && i < bucketNum_-1)
		{
			status = instance_->buckets_[group_.getAndAdd(1) % bucketNum_]->pickWriter();
			i++;
		}
		return status;
	}

	void onKeepAlive()
	{
		ReadLock lock(lock_);
		for(uint i = 0 ;i < buckets_.size() ; ++i)
		{
			//LOG_DEBUG << "send Keepalive to bucket: "<< i;
			buckets_[i]->sendKeepalives();
		}
	}

	static StorageServerProxy* getInstance()
	{
		return instance_;
	}

	static StorageServerProxy* getInstance(muduo::net::EventLoop* loop, 
			sdfs::protobuf::ProtobufCodec* codec)
	{
		if(instance_ == NULL)
		{
			instance_ = new StorageServerProxy(loop, codec);
		}
		return instance_;
	}

	void addSyncTask(int groupidx, const TcpConnectionPtr& conn, muduo::string hashcode)
	{
		assert(groupidx < 4 && groupidx > 0);
		ReadLock lock(lock_);
		buckets_[groupidx-1]->addSyncTask(conn, hashcode);
	}

	void syncBucketData()
	{
		ReadLock lock(lock_);
		for (int i = 0; i < bucketNum_; ++i)
		{
			buckets_[i]->syncTask();
		}
	}

private:

	StorageServerProxy(muduo::net::EventLoop* loop, 
			sdfs::protobuf::ProtobufCodec* codec):
		loop_(loop),
		codec_(codec),
		bucketNum_(3)
	{
		BucketStatusPtr bucket1(new BucketStatus(loop, codec, 0));
		BucketStatusPtr bucket2(new BucketStatus(loop, codec, 1));
		BucketStatusPtr bucket3(new BucketStatus(loop, codec, 2));
		buckets_.push_back(bucket1);
		buckets_.push_back(bucket2);
		buckets_.push_back(bucket3);
		loop_->runEvery(sdfs::TrackerConfig::heartBeatIntervalMs()/1000.0, 
			boost::bind(&StorageServerProxy::onKeepAlive, this));
		loop_->runEvery(sdfs::TrackerConfig::heartBeatIntervalMs()/1000.0, 
			boost::bind(&StorageServerProxy::syncBucketData, this));
	}

	

	std::vector<BucketStatusPtr> buckets_;
	Lock lock_;
	
	AtomicInt32 group_;
	muduo::net::EventLoop *loop_;
	sdfs::protobuf::ProtobufCodec *codec_;
	int bucketNum_;

	static StorageServerProxy* instance_;
};

StorageServerProxy* StorageServerProxy::instance_ = NULL;

}
}
#endif
