#include "congestmonitor.h"
#include "common.h"
#include <cmath>
#include <iomanip>
#include "trend.h"

#define ALPHA 0.9
#define DELAY_THRESHOLD 100//ms
#define DELAY_CONGESTION_THRESHOLD 0.7
#define LOSS_CONGESTION_THRESHOLD 15

// compare if lhs is less than rhs, taking wrapping
// into account. if lhs is close to UINT_MAX and rhs
// is close to 0, lhs is assumed to have wrapped and
// considered smaller
bool wrapping_compare_less(uint32 lhs, uint32 rhs)
{
	// distance walking from lhs to rhs, downwards
	const uint32 dist_down = lhs - rhs;
	// distance walking from lhs to rhs, upwards
	const uint32 dist_up = rhs - lhs;

	// if the distance walking up is shorter, lhs
	// is less than rhs. If the distance walking down
	// is shorter, then rhs is less than lhs
	return dist_up < dist_down;
}

DelayHist::DelayHist()
{
	clear();
}

void DelayHist::clear()
{
	delay_base_initialized = false;
	delay_base = 0;
	cur_delay_idx = 0;
	delay_base_idx = 0;
	delay_base_time = Now_ms();
	for (size_t i = 0; i < CUR_SAMPLE_SIZE; i++) {
		cur_delay_hist[i] = 0;
	}
	for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) {
		delay_base_hist[i] = 0;
	}

	delay_avg = delay_var = 0.0;

	delay_avg_min = UINT_MAX;
	delay_avg_max = 0;
	delay_var_min = UINT_MAX;
	delay_var_max = 0;
}

void DelayHist::shift(const uint32 offset)
{
	// the offset should never be "negative"
	// assert(offset < 0x10000000);

	// increase all of our base delays by this amount
	// this is used to take clock skew into account
	// by observing the other side's changes in its base_delay
	for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) {
		//TODO: add a filter
		delay_base_hist[i] += offset;
	}
	delay_base += offset;
}

void DelayHist::add_sample(uint32 sample)
{
	// The two clocks (in the two peers) are assumed not to
	// progress at the exact same rate. They are assumed to be
	// drifting, which causes the delay samples to contain
	// a systematic error, either they are under-
	// estimated or over-estimated. This is why we update the
	// delay_base to adjust for this.

	// This means the values will keep drifting and eventually wrap.
	// We can cross the wrapping boundary in two directions, either
	// going up, crossing the highest value, or going down, crossing 0.

	// if the delay_base is close to the max value and sample actually
	// wrapped on the other end we would see something like this:
	// delay_base = 0xffffff00, sample = 0x00000400
	// sample - delay_base = 0x500 which is the correct difference

	// if the delay_base is instead close to 0, and we got an even lower
	// sample (that will eventually update the delay_base), we may see
	// something like this:
	// delay_base = 0x00000400, sample = 0xffffff00
	// sample - delay_base = 0xfffffb00
	// this needs to be interpreted as a negative number and the actual
	// recorded delay should be 0.

	// It is important that all arithmetic that assume wrapping
	// is done with unsigned integers. Signed integers are not guaranteed
	// to wrap the way unsigned integers do. At least GCC takes advantage
	// of this relaxed rule and won't necessarily wrap signed ints.

	// remove the clock offset and propagation delay.
	// delay base is min of the sample and the current
	// delay base. This min-operation is subject to wrapping
	// and care needs to be taken to correctly choose the
	// true minimum.

	// specifically the problem case is when delay_base is very small
	// and sample is very large (because it wrapped past zero), sample
	// needs to be considered the smaller

	if (!delay_base_initialized) {
		// delay_base being 0 suggests that we haven't initialized
		// it or its history with any real measurements yet. Initialize
		// everything with this sample.
		for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) {
			// if we don't have a value, set it to the current sample
			delay_base_hist[i] = sample;
		}
		delay_base = sample;
		delay_base_initialized = true;
	}

	if (wrapping_compare_less(sample, delay_base_hist[delay_base_idx])) {
		// sample is smaller than the current delay_base_hist entry
		// update it
		delay_base_hist[delay_base_idx] = sample;
	}

	// is sample lower than delay_base? If so, update delay_base
	if (wrapping_compare_less(sample, delay_base)) {
		// sample is smaller than the current delay_base
		// update it
		delay_base = sample;
	}

	// this operation may wrap, and is supposed to
	const uint32 delay = sample - delay_base;
	// sanity check. If this is triggered, something fishy is going on
	// it means the measured sample was greater than 32 seconds!
	//		assert(delay < 0x2000000);

	cur_delay_hist[cur_delay_idx] = delay;
	//cur_delay_hist[cur_delay_idx].tick = tick;
	//cur_delay_hist[cur_delay_idx].delay = delay;
	cur_delay_idx = (cur_delay_idx + 1) % CUR_SAMPLE_SIZE;

	//calculate avg and var, do not need to consider clock skew now for it has been corrected.
	delay_avg = ALPHA * delay_avg + (1-ALPHA) * delay;
	delay_var = ALPHA * delay_var + (1-ALPHA) * (delay-delay_avg)*(delay-delay_avg);
	delay_avg_min = min(delay_avg, delay_avg_min);
	delay_avg_max = max(delay_avg, delay_avg_max);
	delay_var_min = min(delay_var, delay_var_min);
	delay_var_max = max(delay_var, delay_var_max);

	//LOG4CPLUS_DEBUG(g_logger, "delay_avg = "<< delay_avg <<" ms, " << "delay_var = "<< delay_var);

	uint32 current = Now_ms();
	// once every minute
	if (current - delay_base_time > CLOCK_DRIFT_INTERVAL) {
		delay_base_time = current;
		delay_base_idx = (delay_base_idx + 1) % DELAY_BASE_HISTORY;
		// clear up the new delay base history spot by initializing
		// it to the current sample, then update it 
		delay_base_hist[delay_base_idx] = sample;
		delay_base = delay_base_hist[0];
		// Assign the lowest delay in the last 2 minutes to delay_base
		for (size_t i = 0; i < DELAY_BASE_HISTORY; i++) {
			if (wrapping_compare_less(delay_base_hist[i], delay_base))
				delay_base = delay_base_hist[i];
		}
	}
}

uint32 DelayHist::get_last()
{
	if( cur_delay_idx == 0 ){
		return cur_delay_hist[CUR_SAMPLE_SIZE-1];
	}else{
		return cur_delay_hist[cur_delay_idx-1]; 
	}
}

////TODO: It's ridiculous method!
//double DelayHist::get_congestion_idx()
//{
//	double avg_ratio = (delay_avg-delay_avg_min)/(delay_avg_max-delay_avg_min);
//	double var_ratio = (delay_var-delay_var_min)/(delay_var_max-delay_var_min);
//	return sqrt(avg_ratio*var_ratio);
//}

//bool DelayHist::get_increasing_trend()
//{
//	assert(TREND_SAMPLE_SIZE < CUR_SAMPLE_SIZE);
//	SpearmanTrendDetector trend;
//	size_t pos = (cur_delay_idx - TREND_SAMPLE_SIZE + CUR_SAMPLE_SIZE) % CUR_SAMPLE_SIZE;
//	for (size_t i = 0; i < TREND_SAMPLE_SIZE; i++) {
//		trend.AddSample(cur_delay_hist[pos].delay);
//		pos = (pos + 1) % CUR_SAMPLE_SIZE;
//	}
//
//	return trend.CalcIncreasingTrend(false);
//}

//uint32 DelayHist::get_value()
//{
//	uint32 value = UINT_MAX;
//	for (size_t i = 0; i < CUR_SAMPLE_SIZE; i++) {
//		value = min(cur_delay_hist[i].delay, value);
//	}
//	// value could be UINT_MAX if we have no samples yet...
//	return value;
//}
//////////////////////////////////////////////////////////////////////////

void SharedBottleneckMonitor::AddCongestMonitor(CongestMonitor* pCMonitor)
{
	pCMonitor->AddCongestListener(this);
	pCMonitor_set_.insert(pCMonitor);
}

void SharedBottleneckMonitor::RemoveCongestMonitor(CongestMonitor* pCMonitor)
{
	pCMonitor->ReomveCongestListener(this);
	pCMonitor_set_.erase(pCMonitor);
}

void SharedBottleneckMonitor::OnStatusChanged(const CongestMonitor* pCMonitor,
							 uint32 delay, 
							 uint32 loss, 
							 ICongestListener::CONGEST_LEVEL level)
{

}
//////////////////////////////////////////////////////////////////////////
uint32 DelayFilter::AddSample(const NtdMetrics* pMetrics)
{
	uint32 prev_delay_base = reverse_delay_hist_.delay_base;

	LOG4CPLUS_TRACE(g_logger, "owd = "<<pMetrics->owd - delay_hist_.delay_base
		<<" "<<pMetrics->owd <<" "<<delay_hist_.delay_base);
	reverse_delay_hist_.add_sample( pMetrics->reverse_owd);

	// the reverse congestion also affects the reverse_owd, we can simple apply the following filter
	// never adjust more than 10 milliseconds
	if (prev_delay_base - reverse_delay_hist_.delay_base <= 10 || 
		reverse_delay_hist_.delay_base - prev_delay_base <= 10 ) 
	{
		delay_hist_.shift(prev_delay_base - reverse_delay_hist_.delay_base);
	}

	delay_hist_.add_sample(pMetrics->owd);
	LOG4CPLUS_DEBUG(g_logger, "delay_avg = "<< delay_hist_.delay_avg 
		<<" [ "<<delay_hist_.delay_avg_min <<", " <<delay_hist_.delay_avg_max <<" ]" 
		<< "delay_var = "<< delay_hist_.delay_var
		<<" [ "<<delay_hist_.delay_var_min <<", " <<delay_hist_.delay_var_max <<" ]");


	// if the delay estimate exceeds the RTT, adjust the base_delay to
	// compensate
	if ( delay_hist_.get_last() > pMetrics->rtt) {
		//assert(false);
		delay_hist_.shift(delay_hist_.get_last() - pMetrics->rtt);
	}

	return delay_hist_.get_last();
}

//////////////////////////////////////////////////////////////////////////

CongestMonitor::CongestMonitor(uint16 conv_id):conv_id_(conv_id)
{
	SharedBottleneckMonitor::Instance()->AddCongestMonitor(this);
}

CongestMonitor::~CongestMonitor()
{
	SharedBottleneckMonitor::Instance()->RemoveCongestMonitor(this);
}

void CongestMonitor::AddSample(const NtdMetrics* pMetrics)
{
	NtdMetrics metrics = *pMetrics;
	metrics.owd = delay_filter_.AddSample(pMetrics);

	metrics_hist_.AddMetrics(&metrics);

	//Let's store the loss 
	//for each probe packet will have a resp, so we can calculate loss on the sender side, which provides
	//the raw information about pkt loss. Seems tracing each packets and calculate the loss is not necessary,
	//for we don't need to cache burst loss, we just want the loss ratio in a time period, and sampling is enough.
	//TODO: add loss event process

    //TODO: we need to switch to loss pattern detection later, here simple treat loss as a hint of congestion
	if( pMetrics->loss > 0 ){
		LOG4CPLUS_DEBUG(g_logger, ">>>>>>>>>>>>Congested, loss = " << pMetrics->loss);
		NotifyStatusChanged(metrics_hist_.GetLastDelay(), pMetrics->loss, ICongestListener::SEVERE_CONGEST);
	}else{
		//make decision based on delay, we can use avg value here to filter out burst
		if( metrics_hist_.GetLastDelay() >= DELAY_THRESHOLD ){
			//first check var and avg
			//if( delay_hist_.get_congestion_idx() > DELAY_CONGESTION_THRESHOLD )

			bool trend_idx = metrics_hist_.GetDelayIncreasingTrend();
            if( trend_idx )
			{
				LOG4CPLUS_DEBUG(g_logger, ">>>>>>>>>>>>Congested, delay = " << metrics_hist_.GetLastDelay() <<" ms, "
					//<<"idx = " << std::setprecision(3) << delay_hist_.get_congestion_idx());
					<<" increasing : "<< trend_idx);
				NotifyStatusChanged(metrics_hist_.GetLastDelay(), pMetrics->loss, ICongestListener::LIGHT_CONGEST);
			}
		}
	}
}

void CongestMonitor::AddCongestListener(ICongestListener* pListener)
{
	pCongListener_set_.insert(pListener);
}

void CongestMonitor::ReomveCongestListener(ICongestListener* pListener)
{
	pCongListener_set_.erase(pListener);
}

void CongestMonitor::NotifyStatusChanged(uint32 delay, 
										 uint32 loss, 
										 ICongestListener::CONGEST_LEVEL level)
{
	std::set<ICongestListener*>::iterator iter = pCongListener_set_.begin();
	std::set<ICongestListener*>::iterator end = pCongListener_set_.end();
	for(; iter!=end; ++iter){
		(*iter)->OnStatusChanged(this, delay, loss, level);
	}
}

//////////////////////////////////////////////////////////////////////////
UDPCongestMonitor::UDPCongestMonitor(uint16 conv_id):CongestMonitor(conv_id),loss_detector_(metrics_hist_)
{

}

void UDPCongestMonitor::AddSample(const NtdMetrics* pMetrics)
{
	CongestMonitor::AddSample(pMetrics);
}
//////////////////////////////////////////////////////////////////////////
TCPCongestMonitor::TCPCongestMonitor(uint16 conv_id):CongestMonitor(conv_id)
{

}

//////////////////////////////////////////////////////////////////////////
MetricsHist::MetricsHist()
{
	cur_idx = 0;

}

void MetricsHist::AddMetrics(NtdMetrics* pMetrics)
{
	hist[cur_idx] = *pMetrics;
	cur_idx = (cur_idx + 1) % CUR_SAMPLE_SIZE;

	LOG4CPLUS_DEBUG(g_logger, "["<<pMetrics->proto_type<<"]"<<" conv_id = "<< pMetrics->conv_id
		<< "seq = " << pMetrics->seq
		<< " now = "<< Now_ms() <<" ms, send_ts = "<< pMetrics->tick
		<< " ms, rtt = " << pMetrics->rtt <<" ms, rel_owd = " << pMetrics->owd <<" ms "
		<<" recv_rate = " << pMetrics->recv_rate << "Bytes/sec "
		<<" pkt_loss = " << pMetrics->loss);
}

bool MetricsHist::GetDelayIncreasingTrend()
{
	assert(TREND_SAMPLE_SIZE < CUR_SAMPLE_SIZE);
	SpearmanTrendDetector trend;
	size_t pos = (cur_idx - TREND_SAMPLE_SIZE + CUR_SAMPLE_SIZE) % CUR_SAMPLE_SIZE;
	for (size_t i = 0; i < TREND_SAMPLE_SIZE; i++) {
		trend.AddSample(hist[pos].owd);
		pos = (pos + 1) % CUR_SAMPLE_SIZE;
	}

	return trend.CalcIncreasingTrend(false);
}

uint32 MetricsHist::GetLastDelay()
{
	return hist[GetLastIndex(cur_idx)].owd;
}

uint32 MetricsHist::GetLastLoss()
{
	size_t idx_last = GetLastIndex(cur_idx);
	size_t idx_prev = GetLastIndex(idx_last);
	return hist[idx_last].loss - hist[idx_prev].loss;
}

uint32 MetricsHist::GetLastLossRatioByCnt(int32 perCnt)
{
	return 0;
}

uint32 MetricsHist::GetLastLossRatioByPeriod(int32 period)
{
	return 0;
}

std::vector<uint32> GetLossRatioByCnt(int32 perCnt)
{
	std::vector<uint32> ratioVec;
	return ratioVec;
}
std::vector<uint32> GetLossRatioByPeriod(int32 period)
{
	std::vector<uint32> ratioVec;
	return ratioVec;
}

//////////////////////////////////////////////////////////////////////////

LossPatternDetection::LossPatternDetection(MetricsHist& hist):hist_(hist)
{
	status_ = INIT;
}

LossPatternDetection::~LossPatternDetection()
{

}

//If loss is detected, we have to try first to see whether there's increasing trend in delay
//If yes, we can declare there's congestion on the link
//Otherwise, we have to watch for a while to filter out some noise
//1. if the loss or delay is unbearable, the link may be already congested, we can add them to 
//bottleneck monitor to see if any correlated link, if so we can adjust bandwidth between them,
//and verify the adjustment later. If no correlation found, we can try compete for the bandwidth,
//and we can watch on the correlation between loss and sending rate
//2. if the loss or delay is bearable, we can do nothing.
void LossPatternDetection::CheckCongestion()
{
	if( status_ == INIT && hist_.GetLastLoss() > 0 ){
		if( hist_.GetDelayIncreasingTrend() ){
			LOG4CPLUS_DEBUG(g_logger, "loss occured, and incresing trend in delay, congested!");
		}else{
			if( hist_.GetLastLossRatioByPeriod(3000) >= LOSS_CONGESTION_THRESHOLD 
				|| hist_.GetLastDelay() >= DELAY_CONGESTION_THRESHOLD )
			{
				//we'll wait for callback to switch status when no correlation found
				status_ = BOTTLENECK_DETECT;
				LOG4CPLUS_DEBUG(g_logger, "unbearable network, try find the correlated links");
			}else{
				LOG4CPLUS_DEBUG(g_logger, "bearable network, do nothing");
			}
		}
	}else if( status_ == BANDWIDTH_COMPETE ){
		//we'll assume turn up our sending rate in this mode, and let's check the correlation between
		//loss and sending rate.

		bool increasing = false;
		if( increasing ){
			//if the loss increase as the sending rate increase, then there's no need
			//to turn up the sending rate, we just roll back to the initial state or we can choose to stop this
			//APP for there's no solution in this case.
            LOG4CPLUS_DEBUG(g_logger, "no effect, let's roll back");
			//TODO: congestion control
		}else{
			//if doesn't, the loss doesn't increase, we're competing with other traffic, good, stay here
		}

	}

}

void LossPatternDetection::SwitchToCompeteMode()
{
	if( status_ == BOTTLENECK_DETECT ){
		LOG4CPLUS_DEBUG(g_logger, "No correlation found, switch to compete mode");
		status_ = BANDWIDTH_COMPETE;

		//Let's turn up the sending rate, 
		//maybe according to the priority of links, if there are multiple such links
		//TODO: congestion control
	}
}