
#ifndef __SMO_SOFT_INCLUDED
#define __SMO_SOFT_INCLUDED

class CSMO {
protected:
	typedef float _TyFloat; // internal data type

	_TyFloat m_f_C;
	_TyFloat m_f_tolerance;
	_TyFloat m_f_epsilon;
	// learning config

	bool m_b_linear_kernel;
	_TyFloat m_f_two_sigma_squared; // rbf kernel
	// kernel config
 
	_TyFloat m_f_offset;									/* threshold */
	std::vector<_TyFloat> m_alpha_list;			/* Lagrange multipliers */
	std::vector<_TyFloat> m_w_list;				/* weight vector: only for linear kernel */
	// output

	std::vector<_TyFloat> m_grad_list;
	std::vector<_TyFloat> m_error_cache;
	std::vector<_TyFloat> m_precomputed_dot_product; // n_point_num x n_point_num
	size_t m_n_pdp_row_length; // for 2D acess to m_precomputed_dot_product
	// intermediate

	CMerseneTwister m_random;

	CDataModel *m_p_data;

public:
	CSMO()
		:m_p_data(0)
	{
		m_f_C = 64;
		m_f_tolerance = .001f;
		m_f_epsilon = .001f;

		m_f_two_sigma_squared = 2;
		m_b_linear_kernel = false;
 
		m_f_offset = 0;

		m_random.init_genrand(123456);
	}

	double f_ErrorRate() const
	{
		int n_error = 0;
		size_t n_point_num = m_p_data->n_Point_Num();
		for(size_t i = 0; i < n_point_num; ++ i) {
			if((f_LearnedFunc(i) > 0) != (n_Target(i) > 0))
				++ n_error;
		}
		return double(n_error) / n_point_num;
	}
	
	_TyFloat f_LearnedFunc(size_t n_point) const
	{
		if(m_b_linear_kernel)
			return f_learned_func_linear_dense(n_point);	
		else
			return f_learned_func_nonlinear(n_point);
	}

	bool SetModel(CDataModel *p_data)
	{
		m_p_data = p_data;
		// set data model

		m_alpha_list.clear();
		m_w_list.clear();
		m_error_cache.clear();
		m_precomputed_dot_product.clear();

		size_t n_point_num = p_data->n_Point_Num();
		size_t n_dimension_num = p_data->n_Dimension_Num();
		// get SVM size

		if(!stl_ut::Resize_To_N(m_alpha_list, n_point_num, 0) ||
		   !stl_ut::Resize_To_N(m_error_cache, n_point_num, 0) ||
		   (m_b_linear_kernel && !stl_ut::Resize_To_N(m_w_list, n_dimension_num, 0)))
			return false;
		// allocate arrays for input data, and for SVM

		m_n_pdp_row_length = 0;
		if(!m_b_linear_kernel) {
			if(stl_ut::Resize_To_N(m_precomputed_dot_product, n_point_num * n_point_num)) {
				m_n_pdp_row_length = n_point_num;
				for(size_t i = 0; i < n_point_num; ++ i) {
					for(size_t j = 0; j < n_point_num; ++ j)
						f_precomputed_dot_product(i, j) = f_DotProductFunc(i, j);
				}
			} else
				return false;
			// precompute dot product on data
		}
		// impractical one! (there must be threshold for point num)

		return true;
	}

	template <class _TyVec>
	inline void ClearVector(_TyVec r_vec, int value = 0)
	{
		for(size_t i = 0, n = r_vec.size(); i < n; ++ i)
			r_vec[i] = value;
	}

	bool Train(_TyFloat f_C, _TyFloat f_tolerance, _TyFloat f_epsilon,
		size_t n_max_iters = 15000, bool b_verbose = false)
	{
		ClearVector(m_w_list);
		ClearVector(m_error_cache);
		// make sure everything is zero

		ClearVector(m_alpha_list, 0);
		ClearVector(m_grad_list, 1);
		m_f_offset = 0;
		// 

		// Avec, Bvec = box limits * target


		m_f_C = f_C;
		m_f_tolerance = f_tolerance;
		m_f_epsilon = f_epsilon;
		// set parameters

		CTimer timer;
		double f_verbose_time = 5;
		// verbose timer

		size_t n_point_num = m_p_data->n_Point_Num();
		// get SVM size

		{
			int n_change_num = 0;
			bool b_examine_all = true;
			size_t n_iter = 0;
			for(; n_change_num || b_examine_all; ++ n_iter) {
				n_change_num = 0;
				if(b_examine_all) {
					for(int i = 0; i < n_point_num; ++ i)
						n_change_num += (ExamineExample(i))? 1 : 0;
				} else { 
					for(int i = 0; i < n_point_num; ++ i) {
						if(m_alpha_list[i] > 0 && m_alpha_list[i] < m_f_C)
							n_change_num += (ExamineExample(i))? 1 : 0;
					}
				}
				if(b_examine_all)
					b_examine_all = false;
				else if(!n_change_num)
					b_examine_all = true;

				if(b_verbose && timer.f_Time() > f_verbose_time) {
					float f_time = timer.f_Time();
					f_verbose_time = f_time + 1;
					printf("SVM learned %.2f%% of training examples (time: " PRItime ", pass: %d) \r",
						(1 - f_ErrorRate()) * 100, PRItimeparams(f_time), n_iter);
				}
				// print error rate

				if(n_iter > n_max_iters)
					return false;
				// we failed to learn in given number of iterations
			}
			// SMO

			if(b_verbose)
				printf("finished after %d iterations\n", n_iter);
		}
		// run SMO

		return true;
	}

protected:
	_TyFloat f_DotProductFunc(size_t i, size_t j) const
	{
		return f_dot_product_dense(i, j); 
	}

	_TyFloat f_KernelFunc(size_t i, size_t j) const
	{
		if(m_b_linear_kernel)
			return f_DotProductFunc(i, j);
		else
			return f_RBF_Kernel(i, j);
	}

	int ExamineExample(int i2)
	{
		int y2 = n_Target(i2);
		_TyFloat f_a2 = m_alpha_list[i2];
 		_TyFloat f_err2 = (f_a2 > 0 && f_a2 < m_f_C)? m_error_cache[i2] : f_LearnedFunc(i2) - y2;
		// get target, alpha and error for i2

		size_t n_point_num = m_p_data->n_Point_Num();

		_TyFloat r2 = f_err2 * y2;
		if((r2 < -m_f_tolerance && f_a2 < m_f_C) || (r2 > m_f_tolerance && f_a2 > 0)) {
			{
				int i1 = -1;
				_TyFloat f_diff_max = 0;
				for(int k = 0; k < n_point_num; ++ k) {
					if(k != i2 && m_alpha_list[k] > 0 && m_alpha_list[k] < m_f_C) {
						_TyFloat f_err_difference = fabs(f_err2 - m_error_cache[k]); // step size approximation
						if(f_diff_max < f_err_difference) {
							f_diff_max = f_err_difference;
							i1 = k;
						}
					}
				}
				if(i1 != -1) {
					if(TakeStep(i1, i2))
						return 1;
				}
			}
			// if number of non-zero & non-C alpha > 1, use second choice heuristic

			for(int n_start = int(m_random.genrand_real2() * n_point_num),
			   n_end = n_point_num + n_start, k = n_start; k < n_end; ++ k) {
				int i1 = k % n_point_num;
				if(i1 != i2 && m_alpha_list[i1] > 0 && m_alpha_list[i1] < m_f_C) {
					if(TakeStep(i1, i2))
						return 1;
				}
			}
			// loop over all non-zero and non-C alpha, starting at random point

			for(int n_start = int(m_random.genrand_real2() * n_point_num),
			   n_end = n_point_num + n_start, k = n_start; k < n_end; ++ k) {
				int i1 = k % n_point_num;
				if(i1 != i2 && (fabs(m_alpha_list[i1]) < m_f_epsilon ||
				   fabs(m_alpha_list[i1] - m_f_C) < m_f_epsilon)) {
					if(TakeStep(i1, i2))
						return 1;
				}
			}
			// loop over all possible i1, starting at a random point
		}

		return 0;
	}

	bool TakeStep(int i1, int i2) // http://research.microsoft.com/pubs/68391/smo-book.pdf
	{ 
		if(i1 == i2)
			return false;
	
		int y1 = n_Target(i1);
		_TyFloat f_a1_old = m_alpha_list[i1];
		_TyFloat f_err1 = (f_a1_old > 0 && f_a1_old < m_f_C)? m_error_cache[i1] : f_LearnedFunc(i1) - y1;
		//
		int y2 = n_Target(i2);
		_TyFloat f_a2_old = m_alpha_list[i2];
		_TyFloat f_err2 = (f_a2_old > 0 && f_a2_old < m_f_C)? m_error_cache[i2] : f_LearnedFunc(i2) - y2;
		// get errors, Lagrange multipliers and targets for both points
  
		_TyFloat L, H;
		if(y1 == y2) {
			_TyFloat f_sum = f_a1_old + f_a2_old;
			L = max(0, f_sum - m_f_C);
			H = min(m_f_C, f_sum); // equation 12.4
		} else {
			_TyFloat f_diff = f_a1_old - f_a2_old;
			L = max(0, f_diff);
			H = min(m_f_C, m_f_C + f_diff); // equation 12.3
		}
		if(L == H)
			return false;
		// Compute L, H

		_TyFloat f_k11 = f_KernelFunc(i1, i1);
		_TyFloat f_k12 = f_KernelFunc(i1, i2);
		_TyFloat f_k22 = f_KernelFunc(i2, i2);
		_TyFloat f_eta = f_k11 + f_k22 - 2 * f_k12; // equation 12.5
		// evaluate kernel, calculate f_eta

		_TyFloat f_a2_new_clipped; // new alhpa2
		if(f_eta > m_f_epsilon) {
			_TyFloat f_a2_new = f_a2_old + y2 * (f_err1 - f_err2) / f_eta; // equation 12.6
			// calculate new alpha

			if(f_a2_new < L)
				f_a2_new_clipped = L;
			else if(f_a2_new > H)
				f_a2_new_clipped = H;
			else
				f_a2_new_clipped = f_a2_new; // equation 12.7
			// clip it
		} else {
			_TyFloat Lobj, Hobj;
			{
				_TyFloat c1 = -f_eta / 2;
				_TyFloat c2 = y2 * (f_err1 - f_err2) + f_eta * f_a2_old;
				Lobj = (c1 * L + c2) * L;
				Hobj = (c1 * H + c2) * H;
			}
			// evaluate objective functions at f_a2_new_clipped = L, f_a2_new_clipped = H

			if(Lobj > Hobj + m_f_epsilon)
				f_a2_new_clipped = L;
			else if(Lobj < Hobj - m_f_epsilon)
				f_a2_new_clipped = H;
			else
				f_a2_new_clipped = f_a2_old;
			// clip it
		}
		// calculate new alpha 2

		_TyFloat f_a1_new;
		{
			int n_sign = y1 * y2;
			f_a1_new = f_a1_old + n_sign * (f_a2_old - f_a2_new_clipped);
			if(f_a1_new < m_f_epsilon) {
				f_a2_new_clipped += n_sign * f_a1_new; // update a2
				f_a1_new = 0;
			} else if(f_a1_new > m_f_C - m_f_epsilon) {
				f_a2_new_clipped += n_sign * (f_a1_new - m_f_C); // update a2
				f_a1_new = m_f_C;
			}
		}
		// update alpha 1 accordingly

		if(f_a2_new_clipped < 1e-8f)
			f_a2_new_clipped = 0;
		else if(f_a2_new_clipped > m_f_C - 1e-8f)
			f_a2_new_clipped = m_f_C;
		if(fabs(f_a2_new_clipped - f_a2_old) < m_f_epsilon * (f_a2_new_clipped + f_a2_old + m_f_epsilon))
			return false;
		// make sure alpha 2 is clipped (update of alpha1 might have set it wrong)

		_TyFloat f_delta_offset;
		{
			_TyFloat w1 = y1 * (f_a1_new - f_a1_old);
			_TyFloat w2 = y2 * (f_a2_new_clipped - f_a2_old);

			_TyFloat f_offset_new;
			if(f_a1_new > 0 && f_a1_new < m_f_C)
				f_offset_new = m_f_offset + f_err1 + w1 * f_k11 + w2 * f_k12;
			else if(f_a2_new_clipped > 0 && f_a2_new_clipped < m_f_C)
				f_offset_new = m_f_offset + f_err2 + w1 * f_k12 + w2 * f_k22;
			else {
				_TyFloat b1 = f_err1 + w1 * f_k11 + w2 * f_k12;
				_TyFloat b2 = f_err2 + w1 * f_k12 + w2 * f_k22;
				f_offset_new = m_f_offset + (b1 + b2) / 2;
			}
  
			f_delta_offset = f_offset_new - m_f_offset;
			m_f_offset = f_offset_new;
		}
		// Update threshold to reflect change in Lagrange multipliers

		{
			size_t n_point_num = m_p_data->n_Point_Num();
			size_t n_dimension_num = m_p_data->n_Dimension_Num();
			// get SVM size

			_TyFloat t1 = y1 * (f_a1_new - f_a1_old);
			_TyFloat t2 = y2 * (f_a2_new_clipped - f_a2_old);

			if(m_b_linear_kernel) {
  				//_ASSERTE(!is_sparse_data && !is_binary);
				for(int i = 0; i < n_dimension_num; ++ i) {
					_TyFloat f_correction = f_Point_Dense(i1, i) * t1 + f_Point_Dense(i2, i) * t2;
					m_w_list[i] += f_correction;
				}
				// update weights
			}
			// Update weight vector to reflect change in a1 & f_a2_new_clipped, if SVM is linear

			{
  				for(int i = 0; i < n_point_num; ++ i) {
					if(m_alpha_list[i] > 0 && m_alpha_list[i] < m_f_C) {  
						_TyFloat f_correction = t1 * f_KernelFunc(i1, i) + t2 * f_KernelFunc(i2, i) - f_delta_offset;
						m_error_cache[i] += f_correction;
					}
				}
				m_error_cache[i1] = 0;
				m_error_cache[i2] = 0;
			}
			// Update error cache using new Lagrange multipliers
		}
		// Update weight vector / error cache to reflect change in a1 & f_a2_new_clipped, if SVM is linear

		m_alpha_list[i1] = f_a1_new;
		m_alpha_list[i2] = f_a2_new_clipped;
		// update Lagrange vector

		return true;
	}

	/*_TyFloat f_dot_product_sparse_binary(int i1, int i2)
	{
		int p1=0, p2=0, dot=0;
		int num1 = ((sparse_binary_vector)sparse_binary_points.elementAt(i1)).id.size();
		int num2 = ((sparse_binary_vector)sparse_binary_points.elementAt(i2)).id.size();

		while (p1 < num1 && p2 < num2) 
		{
			int a1 = object2int(((sparse_binary_vector)sparse_binary_points.elementAt(i1)).id.elementAt(p1));
			int f_a2_new_clipped = object2int(((sparse_binary_vector)sparse_binary_points.elementAt(i2)).id.elementAt(p2));
			if(a1 == f_a2_new_clipped) 
			{
				dot++;
				p1++;
				p2++;
			}
			else if(a1 > f_a2_new_clipped)
				p2++;
			else
				p1++;
		}
		return (_TyFloat)dot;
	}


	_TyFloat f_dot_product_sparse_nonbinary(int i1, int i2)
	{
		int p1=0, p2=0;
		_TyFloat dot = 0;
		int num1 = ((sparse_vector)sparse_points.elementAt(i1)).id.size();
		int num2 = ((sparse_vector)sparse_points.elementAt(i2)).id.size();
 
		while (p1 < num1 && p2 < num2) 
		{
			int a1 = object2int(((sparse_vector)sparse_points.elementAt(i1)).id.elementAt(p1));
			int f_a2_new_clipped = object2int(((sparse_vector)sparse_points.elementAt(i2)).id.elementAt(p2));
			if(a1 == f_a2_new_clipped) 
			{
				_TyFloat val1 = object2float(((sparse_vector)sparse_points.elementAt(i1)).val.elementAt(p1));
				_TyFloat val2 = object2float(((sparse_vector)sparse_points.elementAt(i2)).val.elementAt(p2));
	  
				dot += val1 * val2;
				p1++;
				p2++;
			}
			else if(a1 > f_a2_new_clipped)
				p2++;
			else
				p1++;
		}

		return (_TyFloat)dot;
	}

	_TyFloat f_learned_func_linear_sparse_binary(int k) 
	{
		_TyFloat s = 0;
		int temp =0;
		for (int i=0; i<((sparse_binary_vector)sparse_binary_points.elementAt(k)).id.size(); i++)
  
		{
			temp =object2int(((sparse_binary_vector)sparse_binary_points.elementAt(i)).id.elementAt(i));
			s += m_w_list[temp];
		}

		s -= m_f_offset;
		return s;
	}
	
	_TyFloat f_learned_func_linear_sparse_nonbinary(int k) 
	{
		_TyFloat s = 0;

		for (int i=0; i<((sparse_vector)sparse_points.elementAt(k)).id.size(); i++)
		{
	
			int j = object2int (((sparse_vector)sparse_points.elementAt(k)).id.elementAt(i));
   
			_TyFloat v = object2float (((sparse_vector)sparse_points.elementAt(k)).val.elementAt(i));
	 
			s += m_w_list[j] * v;
		}
		s -= m_f_offset;
		return s;
	}*/

	_TyFloat f_learned_func_linear_dense(int k) const
	{
		_TyFloat s = 0;
		for(int i = 0, n_dimension_num = m_p_data->n_Dimension_Num(); i < n_dimension_num; ++ i)
			s += m_w_list[i] * f_Point_Dense(k, i);
		//s -= m_f_offset;
		return s - m_f_offset;
	}

	_TyFloat f_learned_func_nonlinear(int k) const
	{
		_TyFloat s = 0;
		for(int i = 0, n_point_num = m_p_data->n_Point_Num(); i < n_point_num; ++ i) {
			if(m_alpha_list[i] != 0)
				s += m_alpha_list[i] * n_Target(i) * f_KernelFunc(i, k);
		}
		return s - m_f_offset;
	}

	_TyFloat f_dot_product_dense(int i1, int i2) const
	{
		_TyFloat dot = 0;
		for(int i = 0, n_dimension_num = m_p_data->n_Dimension_Num(); i < n_dimension_num; ++ i)
			dot += f_Point_Dense(i1, i) * f_Point_Dense(i2, i);
		return dot;
	}

	_TyFloat f_RBF_Kernel(int i1, int i2) const
	{
		_TyFloat s = f_precomputed_dot_product(i1, i2) * -2;
		s += f_precomputed_dot_product(i1, i1) + f_precomputed_dot_product(i2, i2);
		return _TyFloat(exp(_TyFloat(-s / m_f_two_sigma_squared)));	 
	}

	inline int n_Target(int p) const
	{
		return m_p_data->f_Get_Target(p);
	}

	inline float f_Point_Dense(int p, int d) const
	{
		return m_p_data->f_Get_Point(p, d);
	}

	inline _TyFloat f_precomputed_dot_product(int p1, int p2) const
	{
		return m_precomputed_dot_product[p1 + m_n_pdp_row_length * p2];
	}

	inline _TyFloat &f_precomputed_dot_product(int p1, int p2)
	{
		return m_precomputed_dot_product[p1 + m_n_pdp_row_length * p2];
	}
};

#endif //__SMO_SOFT_INCLUDED
