/*
 */

#include "../../UberLame_src/NewFix.h"
#include "../../UberLame_src/CallStack.h"
#include <vector>
#include <string>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <conio.h>
#include "../../UberLame_src/Integer.h"
#include "../../UberLame_src/MinMax.h"
#include "../../UberLame_src/StlUtils.h"
#include "../../UberLame_src/Mersene.h"
#include "../../UberLame_src/Vector.h"
#include "../../UberLame_src/Timer.h"
#include "../../UberLame_src/Dir.h"
#include <cuda.h>
#include <cublas.h>
#include "../CuUtils.h"
#include "SMOonGPU.h"

bool b_PowerOfTwo(unsigned int x)
{
	return (x & (x - 1)) == 0;
}

int n_Nearest_PowerOfTwo(int x)
{
	-- x;
    x |= x >> 1;
    x |= x >> 2;
    x |= x >> 4;
    x |= x >> 8;
    x |= x >> 16;
	return ++ x;
}

int n_Log2(int x)
{
	_ASSERTE(b_PowerOfTwo(x));
	int n_log = 0;
	for(x /= 2; x; x /= 2) 
		++ n_log;
	return n_log;
}

CSMOonGPU::CSMOonGPU(CUmodule h_module)
	:m_p_data(0), m_dp_alpha_list(0), m_p_alpha_list(0), m_dp_w_list(0),
	m_dp_error_cache(0), m_p_error_cache(0),
	m_dp_precomputed_kernel(0), m_p_precomputed_kernel(0),
	m_dp_dense_targets(0), m_dp_dense_points(0), m_p_reduction_list(0), m_dp_reduction_list(0), m_dp_reduction_helper(0)
{
	m_f_C = 64;
	m_f_tolerance = .001f;
	m_f_epsilon = .001f;

	m_b_linear_kernel = false;
	m_f_two_sigma_squared = 2;

	m_f_offset = 0;

	m_random.init_genrand(123456);

	m_h_module = h_module; // get CUDA module

	if(cuModuleGetFunction(&h_precalc_dp, m_h_module, "PrecalcDotProds") != CUDA_SUCCESS ||
	   cuModuleGetFunction(&h_precalc_rbf_o, m_h_module, "PrecalcKernel_RBF_others") != CUDA_SUCCESS ||
	   cuModuleGetFunction(&h_precalc_rbf_d, m_h_module, "PrecalcKernel_RBF_diagonal") != CUDA_SUCCESS ||
	   cuModuleGetFunction(&h_update_w, m_h_module, "Update_WeightVector") != CUDA_SUCCESS ||
	   cuModuleGetFunction(&h_update_err, m_h_module, "Update_ErrorCache") != CUDA_SUCCESS ||
	   cuModuleGetFunction(&h_eval_nl_kernel, m_h_module, "Evaluate_NonlinearKernel") != CUDA_SUCCESS) {
		fprintf(stderr, "error: cuModuleGetFunction() failed\n");
		exit(-1);
		return;
	}
	// get CUDA functions

	for(int i = 0; i < 10; ++ i) {
		std::string s_function_name;
		stl_ut::Format(s_function_name, "Evaluate_NonlinearKernel_%d", 1 << i);
		if(cuModuleGetFunction(&p_kernel_reduction[i], h_module, s_function_name.c_str()) != CUDA_SUCCESS) {
			fprintf(stderr, "error: failed find CUDA kernel entry point\n");
			exit(-1);
			return;
		}
		stl_ut::Format(s_function_name, "SimpleReduce_%d", 1 << i);
		if(cuModuleGetFunction(&p_simple_reduction[i], h_module, s_function_name.c_str()) != CUDA_SUCCESS) {
			fprintf(stderr, "error: failed find CUDA kernel entry point\n");
			exit(-1);
			return;
		}
	}
	// get function pointers

	if(cuMemAlloc(&dp_results, 1000 * sizeof(float)) != CUDA_SUCCESS ||
		cuMemAlloc(&dp_results_tmp, 1024 * sizeof(float)) != CUDA_SUCCESS) { // fixed size
		fprintf(stderr, "error: failed to alloc arrays for reduction kernel\n");
		exit(-1);
		return;
	}
}

CSMOonGPU::~CSMOonGPU()
{
	if(dp_results)
		cuMemFree(dp_results);
	if(dp_results_tmp)
		cuMemFree(dp_results_tmp);

	if(m_dp_alpha_list)
		cuMemFree(m_dp_alpha_list);
	if(m_p_alpha_list)
		delete[] m_p_alpha_list;
	if(m_dp_w_list)
		cuMemFree(m_dp_w_list);

	if(m_dp_error_cache)
		cuMemFree(m_dp_error_cache);
	if(m_p_error_cache)
		delete[] m_p_error_cache;
	if(m_dp_precomputed_kernel)
		cuMemFree(m_dp_precomputed_kernel);
	if(m_p_precomputed_kernel)
		delete[] m_p_precomputed_kernel;

	if(m_dp_reduction_list)
		cuMemFree(m_dp_reduction_list);
	if(m_dp_reduction_helper)
		cuMemFree(m_dp_reduction_helper);
	if(m_p_reduction_list)
		delete[] m_p_reduction_list;

	if(m_dp_dense_points)
		cuMemFree(m_dp_dense_points);
	if(m_dp_dense_targets)
		cuMemFree(m_dp_dense_targets);
}

double CSMOonGPU::f_ErrorRate() const
{
	int n_error = 0;
	size_t n_point_num = m_p_data->n_Point_Num();
	for(size_t i = 0; i < n_point_num; ++ i) {
		if((f_LearnedFunc(i) > 0) != (m_p_data->f_Get_Target(i) > 0))
			++ n_error;
	}
	return double(n_error) / n_point_num;
}

float CSMOonGPU::f_LearnedFunc(size_t n_point) const
{
	if(m_b_linear_kernel)
		return f_learned_func_linear_dense(n_point);	
	else
		return f_learned_func_nonlinear(n_point);
}

bool CSMOonGPU::SetModel(CDataModel *p_data, bool b_use_linear_kernel)
{
	m_p_data = 0;
	// clear data model (to mark error)

	if(p_data->b_IsSparse())
		return false;
	// just dense models atm

	CDenseDataModel *p_dense = (CDenseDataModel*)p_data;
	// get dense data model

	if(!p_dense->p_GetPointsS() || !p_dense->p_GetTargetsI())
		return false;
	// points must be single-precision floats, targets must be integers

	size_t n_point_num = p_data->n_Point_Num();
	size_t n_dimension_num = p_data->n_Dimension_Num();
	// get SVM size

	m_b_linear_kernel = b_use_linear_kernel;
	// get type of kenrel

	m_n_dimension_num = n_dimension_num;
	if(cuMemAlloc(&m_dp_dense_targets, n_point_num * sizeof(int)) != CUDA_SUCCESS ||
	   cuMemAlloc(&m_dp_dense_points, n_point_num * n_dimension_num * sizeof(float)) != CUDA_SUCCESS ||
	   cuMemcpyHtoD(m_dp_dense_targets, p_dense->p_GetTargetsI(), n_point_num * sizeof(int)) != CUDA_SUCCESS ||
	   cuMemcpyHtoD(m_dp_dense_points, p_dense->p_GetPointsS(), n_point_num * n_dimension_num * sizeof(float)))
		return false;
	// alloc points, copy data

	if(cuMemAlloc(&m_dp_error_cache, n_point_num * sizeof(float)) != CUDA_SUCCESS ||
	   cuMemsetD32(m_dp_error_cache, 0, n_point_num) != CUDA_SUCCESS ||
	   !(m_p_error_cache = new(std::nothrow) float[n_point_num]))
		return false;
	memset(m_p_error_cache, 0, n_point_num * sizeof(float));
	// alloc error cache (and clear to all 0-s)

	if(cuMemAlloc(&m_dp_alpha_list, n_point_num * sizeof(float)) != CUDA_SUCCESS ||
	   cuMemsetD32(m_dp_alpha_list, 0, n_point_num) != CUDA_SUCCESS ||
	   !(m_p_alpha_list = new(std::nothrow) float[n_point_num]))
		return false;
	memset(m_p_alpha_list, 0, n_point_num * sizeof(float));
	// alloc alphas (and clear to all 0-s)

	if(!m_b_linear_kernel) {
		int n_reduction_size = n_point_num; // multiply to n_point_num first, then reduce (debug)
		union {
			float f;
			unsigned int ui;
		} one;
		one.f = 1;
		if(cuMemAlloc(&m_dp_reduction_list, n_reduction_size * sizeof(float)) != CUDA_SUCCESS ||
		   cuMemAlloc(&m_dp_reduction_helper, n_reduction_size * sizeof(float)) != CUDA_SUCCESS ||
		   !(m_p_reduction_list = new(std::nothrow) float[n_reduction_size]) ||
		   cuMemsetD32(m_dp_reduction_helper, one.ui, n_reduction_size) != CUDA_SUCCESS)
			return false;
	}
	// alloc buffer for vector reduction

	if(m_b_linear_kernel) {
		if(cuMemAlloc(&m_dp_w_list, n_dimension_num * sizeof(float)) != CUDA_SUCCESS ||
		   cuMemsetD32(m_dp_w_list, 0, n_dimension_num) != CUDA_SUCCESS)
			return false;
	}
	// alloc wieght list (linear kernels only)

	if(cuMemAlloc(&m_dp_precomputed_kernel, n_point_num * n_point_num * sizeof(float)) != CUDA_SUCCESS ||
	   !(m_p_precomputed_kernel = new(std::nothrow) float[n_point_num * n_point_num]))
		return false;
	m_n_point_num = n_point_num;
	// alloc precalculated kernel (or self dot-products)
	// @todo impractical one! (there must be threshold for n_point_num)
	// @note 15000 x 15000 x sizeof(float) = 858 MB, GeForce GTX 260 has 895.69 MB RAM (without framebuffers, etc.)

	m_p_data = p_data;
	// set data model (success)

	return true;
}

bool CSMOonGPU::Train(float f_C, float f_tolerance, float f_epsilon, size_t n_max_iters, bool b_verbose)
{
	if(!m_p_data)
		return false;
	// SetModel() either failed, or wasn't called at all

	size_t n_point_num = m_p_data->n_Point_Num();
	size_t n_dimension_num = m_p_data->n_Dimension_Num();
	// get SVM size

	cuMemsetD32(m_dp_error_cache, 0, n_point_num);
	memset(m_p_error_cache, 0, n_point_num * sizeof(float));
	cuMemsetD32(m_dp_alpha_list, 0, n_point_num);
	memset(m_p_alpha_list, 0, n_point_num * sizeof(float));
	if(m_b_linear_kernel)
		cuMemsetD32(m_dp_w_list, 0, n_dimension_num);
	m_f_offset = 0;
	// make sure everything is zero

	{
		{
			int bs = 16;
			int gs = (n_point_num + bs - 1) / bs;
			// block size, grid size

			cuParamsSet4(h_precalc_dp, DevPtr(m_dp_precomputed_kernel), DevPtr(m_dp_dense_points),
				int(n_point_num), int(n_dimension_num));
			cuFuncSetBlockShape(h_precalc_dp, bs, bs, 1);
			cuLaunchGrid(h_precalc_dp, gs, gs);
			// calculate dot products

			/*int n_error_num = 0;
			float *p_dp_test = new(std::nothrow) float[n_point_num * n_point_num];
			cuMemcpyDtoH(p_dp_test, m_dp_precomputed_kernel, n_point_num * n_point_num * sizeof(float));
			for(int x = 0; x < n_point_num; ++ x) {
				for(int y = 0; y < n_point_num; ++ y) {
					float f_cpu = 0;
					for(int i = 0; i < n_dimension_num; ++ i)
						f_cpu += m_p_data->f_Get_Point(x, i) * m_p_data->f_Get_Point(y, i);
					// calculate dp on CPU

					float f_gpu = p_dp_test[x + n_point_num * y];
					if(fabs(f_cpu - f_gpu) > 1e-3f)
						++ n_error_num;
				}
			}
			delete[] p_dp_test;
			fprintf(stderr, "dot product precalc kernel: %d errors\n", n_error_num);*/
			// test whether are dot products calculated correctly (0 errors with threshold 1e-6 on test cloud of 1000 points)
		}
		// calculate self-dot products

		if(!m_b_linear_kernel) {
			{
				int bs = 16;
				int gs = (n_point_num + bs - 1) / bs;
				// block size, grid size

				cuParamsSet3(h_precalc_rbf_o, DevPtr(m_dp_precomputed_kernel),
					int(n_point_num), float(m_f_two_sigma_squared));
				cuFuncSetBlockShape(h_precalc_rbf_o, bs, bs, 1);
				cuLaunchGrid(h_precalc_rbf_o, gs, gs);
				// calculate RBF kernel everywhere outside diagonal
			}
			{
				int bs = 256;
				int gs = (n_point_num + bs - 1) / bs;
				// block size, grid size

				cuParamsSet2(h_precalc_rbf_d, DevPtr(m_dp_precomputed_kernel), int(n_point_num));
				cuFuncSetBlockShape(h_precalc_rbf_d, bs, 1, 1);
				cuLaunchGrid(h_precalc_rbf_d, gs, 1);
				// calculate RBF kernel on the diagonal
			}

			/*int n_error_num = 0;
			float *p_dp_test = new(std::nothrow) float[n_point_num * n_point_num];
			cuMemcpyDtoH(p_dp_test, m_dp_precomputed_kernel, n_point_num * n_point_num * sizeof(float));
			for(int x = 0; x < n_point_num; ++ x) {
				for(int y = 0; y < n_point_num; ++ y) {
					float f_cpu11 = 0;
					float f_cpu12 = 0;
					float f_cpu22 = 0;
					for(int i = 0; i < n_dimension_num; ++ i) {
						f_cpu11 += m_p_data->f_Get_Point(x, i) * m_p_data->f_Get_Point(x, i);
						f_cpu12 += m_p_data->f_Get_Point(x, i) * m_p_data->f_Get_Point(y, i);
						f_cpu22 += m_p_data->f_Get_Point(y, i) * m_p_data->f_Get_Point(y, i);
					}
					// calculate dp's on CPU

					float f_cpu = exp(-(f_cpu12 * -2 + f_cpu11 + f_cpu22) / m_f_two_sigma_squared);

					float f_gpu = p_dp_test[x + n_point_num * y];
					if(fabs(f_cpu - f_gpu) > 1e-6f)
						++ n_error_num;
				}
			}
			delete[] p_dp_test;
			fprintf(stderr, "rbf kernel product precalc kernel: %d errors\n", n_error_num);*/
		}
		// modify self dot-products to be a RBF kernel (only when doing non-linear blah)
	}
	// precalculate kernel function

	cuMemcpyDtoH(m_p_precomputed_kernel, m_dp_precomputed_kernel, n_point_num * n_point_num * sizeof(float));
	// copy kernel function to CPU as well

	m_f_C = f_C;
	m_f_tolerance = f_tolerance;
	m_f_epsilon = f_epsilon;
	// set parameters

	CTimer timer;
	double f_verbose_time = 5;
	// verbose timer

	{
		int n_change_num = 0;
		bool b_examine_all = true;
		int n_opts = 0;
		for(size_t n_iter = 0; n_change_num || b_examine_all; ++ n_iter) {
			n_change_num = 0;
			if(b_examine_all) {
				for(int i = 0; i < n_point_num; ++ i)
					n_change_num += (ExamineExample(i))? 1 : 0;
			} else { 
				for(int i = 0; i < n_point_num; ++ i) {
					if(m_p_alpha_list[i] > 0 && m_p_alpha_list[i] < m_f_C)
						n_change_num += (ExamineExample(i))? 1 : 0;
				}
			}

			n_opts += n_change_num;

			if(b_examine_all)
				b_examine_all = false;
			else if(!n_change_num)
				b_examine_all = true;

			if(b_verbose && timer.f_Time() > f_verbose_time) {
				float f_time = timer.f_Time();
				f_verbose_time = f_time + 1;
				printf("SVM learned %.2f%% of training examples (time: " PRItime ", pass: %d) \r",
					(1 - f_ErrorRate()) * 100, PRItimeparams(f_time), n_iter);
			}
			// print error rate

			if(n_iter > n_max_iters)
				return false;
			// we failed to learn in given number of iterations
		}
		// SMO

		if(b_verbose)
			printf("finished after %d iterations\n", n_opts);
	}
	// run SMO

	return true;
}

float CSMOonGPU::f_KernelFunc(size_t i, size_t j) const
{
	return m_p_precomputed_kernel[i + j * m_n_point_num];
}

float CSMOonGPU::f_learned_func_linear_dense(size_t k) const
{
	float gpus = cublasSdot(m_n_dimension_num, (float*)DevPtr(m_dp_w_list), 1,
		((float*)DevPtr(m_dp_dense_points)) + k * m_n_dimension_num, 1);
	// use cublas to calculate a single dot product quickly

	/*float cpus = 0;
	for(int i = 0, n_dimension_num = m_p_data->n_Dimension_Num(); i < n_dimension_num; ++ i)
		cpus += m_p_w_list[i] * f_Point_Dense(k, i);
	if(fabs(s - gpus) > 1e-3)
		fprintf(stderr, "error: f_learned_func_linear_dense() diverging\n");
	// test corectness using CPU*/
	// untested

	return gpus - m_f_offset;
}

float CSMOonGPU::f_learned_func_nonlinear(size_t k) const
{
	const int n_point_num = m_p_data->n_Point_Num();

#if 0
	float cpus = 0;
	for(int i = 0; i < n_point_num; ++ i) {
		if(m_p_alpha_list[i] != 0)
			cpus += m_p_alpha_list[i] * n_Target(i) * f_KernelFunc(i, k);
	}
#endif

	float gpus = 0;
#if 0
	{
		int bs = 256;
		int gs = (n_point_num + bs - 1) / bs;
		// block size, grid size

		cuParamsSet6(h_eval_nl_kernel, DevPtr(m_dp_reduction_list), DevPtr(m_dp_precomputed_kernel),
			DevPtr(m_dp_alpha_list), DevPtr(m_dp_dense_targets), int(n_point_num), int(k * n_point_num));
		cuFuncSetBlockShape(h_eval_nl_kernel, bs, 1, 1);
		cuLaunchGrid(h_eval_nl_kernel, gs, 1);
		// execute kernel

		gpus/*float gpussum*/ = cublasSdot(n_point_num, (float*)DevPtr(m_dp_reduction_list), 1,
												(float*)DevPtr(m_dp_reduction_helper), 1);

		/*cuMemcpyDtoH(m_p_reduction_list, m_dp_reduction_list, n_point_num * sizeof(float));
		// copy rlist co CPU

		for(int i = 0; i < n_point_num; ++ i)
			gpus += m_p_reduction_list[i];
		// do summation on CPU

		if(fabs(gpus - gpussum) > 1e-3f)
			fprintf(stderr, "error: cpu summation %f vs. gpu sommation %f\n", gpus, gpussum);*/
	}
#endif

	{
		int n = n_point_num;
		int bs = min(n_Nearest_PowerOfTwo(n), 512); // depends on vector length (harris uses min(npot(n/2), 512))
		int gs = min(1024, int(n + (bs * 2) - 1) / (bs * 2));
		//_ASSERTE(b_PowerOfTwo(gs)); // this holds even if vector length isn't power of two

		CUfunction h_kernel = p_kernel_reduction[n_Log2(bs)];
		if(gs > 1) {
			cuParamsSet7(h_kernel, DevPtr(dp_results_tmp), int(0),
				DevPtr(m_dp_precomputed_kernel), DevPtr(m_dp_alpha_list), DevPtr(m_dp_dense_targets), int(n_point_num), int(k * n_point_num));
			// output to temp array
		} else {
			cuParamsSet7(h_kernel, DevPtr(dp_results), int(0),
				DevPtr(m_dp_precomputed_kernel), DevPtr(m_dp_alpha_list), DevPtr(m_dp_dense_targets), int(n_point_num), int(k * n_point_num));
			// output directly to result set
		}
		cuFuncSetBlockShape(h_kernel, bs, 1, 1);
		cuLaunchGrid(h_kernel, gs, 1);
		// first level reduction (eats any amount of data via looping)

		if(gs > 1) {
			//_ASSERTE(b_PowerOfTwo(gs));
			int n = gs;
			int bs2 = min(n_Nearest_PowerOfTwo(n), 512);
			int gs2 = 1;

			CUfunction h_kernel2 = p_simple_reduction[n_Log2(bs2)];
			cuParamsSet5(h_kernel2, DevPtr(dp_results), int(0), DevPtr(dp_results_tmp), int(0), int(n));
			cuFuncSetBlockShape(h_kernel2, bs2, 1, 1);
			cuLaunchGrid(h_kernel2, gs2, 1);
			// second step reduction
		}

		float f_result;
		cuMemcpyDtoH(&f_result, dp_results, 1 * sizeof(float));
		// copy from cuda

		gpus = f_result;

		/*if(fabs(cpus - gpus) > 1e-3f)
			fprintf(stderr, "error: cpu summation %f vs. gpu sommation %f\n", cpus, gpus);*/
	}

	return gpus - m_f_offset;
}

int CSMOonGPU::n_Target(size_t i) const
{
	return m_p_data->f_Get_Target(i);
}

int CSMOonGPU::ExamineExample(int i2)
{
	int y2 = n_Target(i2);
	float f_a2 = m_p_alpha_list[i2];
 	float f_err2 = (f_a2 > 0 && f_a2 < m_f_C)? m_p_error_cache[i2] : f_LearnedFunc(i2) - y2;
	// get target, alpha and error for i2

	size_t n_point_num = m_p_data->n_Point_Num();

	float r2 = f_err2 * y2;
	if((r2 < -m_f_tolerance && f_a2 < m_f_C) || (r2 > m_f_tolerance && f_a2 > 0)) {
		{
			int i1 = -1;
			float f_diff_max = 0;
			for(int k = 0; k < n_point_num; ++ k) {
				if(k != i2 && m_p_alpha_list[k] > 0 && m_p_alpha_list[k] < m_f_C) {
					float f_err_difference = fabs(f_err2 - m_p_error_cache[k]); // step size approximation
					if(f_diff_max < f_err_difference) {
						f_diff_max = f_err_difference;
						i1 = k;
					}
				}
			}
			if(i1 != -1) {
				if(TakeStep(i1, i2))
					return 1;
			}
		}
		// if number of non-zero & non-C alpha > 1, use second choice heuristic

		for(int n_start = int(m_random.genrand_real2() * n_point_num),
		   n_end = n_point_num + n_start, k = n_start; k < n_end; ++ k) {
			int i1 = k % n_point_num;
			if(i1 != i2 && m_p_alpha_list[i1] > 0 && m_p_alpha_list[i1] < m_f_C) {
				if(TakeStep(i1, i2))
					return 1;
			}
		}
		// loop over all non-zero and non-C alpha, starting at random point

		for(int n_start = int(m_random.genrand_real2() * n_point_num),
		   n_end = n_point_num + n_start, k = n_start; k < n_end; ++ k) {
			int i1 = k % n_point_num;
			if(i1 != i2 && (fabs(m_p_alpha_list[i1]) < m_f_epsilon ||
			   fabs(m_p_alpha_list[i1] - m_f_C) < m_f_epsilon)) {
				if(TakeStep(i1, i2))
					return 1;
			}
		}
		// loop over all possible i1, starting at a random point
	}

	return 0;
}

bool CSMOonGPU::TakeStep(int i1, int i2) // http://research.microsoft.com/pubs/68391/smo-book.pdf
{ 
	if(i1 == i2)
		return false;

	int y1 = n_Target(i1);
	float f_a1_old = m_p_alpha_list[i1];
	float f_err1 = (f_a1_old > 0 && f_a1_old < m_f_C)? m_p_error_cache[i1] : f_LearnedFunc(i1) - y1;
	//
	int y2 = n_Target(i2);
	float f_a2_old = m_p_alpha_list[i2];
	float f_err2 = (f_a2_old > 0 && f_a2_old < m_f_C)? m_p_error_cache[i2] : f_LearnedFunc(i2) - y2;
	// get errors, Lagrange multipliers and targets for both points

	float L, H;
	if(y1 == y2) {
		float f_sum = f_a1_old + f_a2_old;
		L = max(0, f_sum - m_f_C);
		H = min(m_f_C, f_sum); // equation 12.4
	} else {
		float f_diff = f_a1_old - f_a2_old;
		L = max(0, f_diff);
		H = min(m_f_C, m_f_C + f_diff); // equation 12.3
	}
	if(L == H)
		return false;
	// Compute L, H

	float f_k11 = f_KernelFunc(i1, i1);
	float f_k12 = f_KernelFunc(i1, i2);
	float f_k22 = f_KernelFunc(i2, i2);
	float f_eta = f_k11 + f_k22 - 2 * f_k12; // equation 12.5
	// evaluate kernel, calculate f_eta

	float f_a2_new_clipped; // new alhpa2
	if(f_eta > m_f_epsilon) {
		float f_a2_new = f_a2_old + y2 * (f_err1 - f_err2) / f_eta; // equation 12.6
		// calculate new alpha

		if(f_a2_new < L)
			f_a2_new_clipped = L;
		else if(f_a2_new > H)
			f_a2_new_clipped = H;
		else
			f_a2_new_clipped = f_a2_new; // equation 12.7
		// clip it
	} else {
		float Lobj, Hobj;
		{
			float c1 = -f_eta / 2;
			float c2 = y2 * (f_err1 - f_err2) + f_eta * f_a2_old;
			Lobj = (c1 * L + c2) * L;
			Hobj = (c1 * H + c2) * H;
		}
		// evaluate objective functions at f_a2_new_clipped = L, f_a2_new_clipped = H

		if(Lobj > Hobj + m_f_epsilon)
			f_a2_new_clipped = L;
		else if(Lobj < Hobj - m_f_epsilon)
			f_a2_new_clipped = H;
		else
			f_a2_new_clipped = f_a2_old;
		// clip it
	}
	// calculate new alpha 2

	float f_a1_new;
	{
		int n_sign = y1 * y2;
		f_a1_new = f_a1_old + n_sign * (f_a2_old - f_a2_new_clipped);
		if(f_a1_new < m_f_epsilon) {
			f_a2_new_clipped += n_sign * f_a1_new; // update a2
			f_a1_new = 0;
		} else if(f_a1_new > m_f_C - m_f_epsilon) {
			f_a2_new_clipped += n_sign * (f_a1_new - m_f_C); // update a2
			f_a1_new = m_f_C;
		}
	}
	// update alpha 1 accordingly

	if(f_a2_new_clipped < 1e-8f)
		f_a2_new_clipped = 0;
	else if(f_a2_new_clipped > m_f_C - 1e-8f)
		f_a2_new_clipped = m_f_C;
	if(fabs(f_a2_new_clipped - f_a2_old) < m_f_epsilon * (f_a2_new_clipped + f_a2_old + m_f_epsilon))
		return false;
	// make sure alpha 2 is clipped (update of alpha1 might have set it wrong)

	float f_delta_offset;
	{
		float w1 = y1 * (f_a1_new - f_a1_old);
		float w2 = y2 * (f_a2_new_clipped - f_a2_old);

		float f_offset_new;
		if(f_a1_new > 0 && f_a1_new < m_f_C)
			f_offset_new = m_f_offset + f_err1 + w1 * f_k11 + w2 * f_k12;
		else if(f_a2_new_clipped > 0 && f_a2_new_clipped < m_f_C)
			f_offset_new = m_f_offset + f_err2 + w1 * f_k12 + w2 * f_k22;
		else {
			float b1 = f_err1 + w1 * f_k11 + w2 * f_k12;
			float b2 = f_err2 + w1 * f_k12 + w2 * f_k22;
			f_offset_new = m_f_offset + (b1 + b2) / 2;
		}

		f_delta_offset = f_offset_new - m_f_offset;
		m_f_offset = f_offset_new;
	}
	// Update threshold to reflect change in Lagrange multipliers

	{
		size_t n_point_num = m_p_data->n_Point_Num();
		size_t n_dimension_num = m_p_data->n_Dimension_Num();
		// get SVM size

		float t1 = y1 * (f_a1_new - f_a1_old);
		float t2 = y2 * (f_a2_new_clipped - f_a2_old);

		if(m_b_linear_kernel) {
  			/*//_ASSERTE(!is_sparse_data && !is_binary);
			for(int i = 0; i < n_dimension_num; ++ i) {
				float f_correction = f_Point_Dense(i1, i) * t1 + f_Point_Dense(i2, i) * t2;
				m_w_list[i] += f_correction;
			}*/
			// update weights

			{
				int bs = (n_dimension_num > 256)? 256 : n_NearestGEquaPo2(n_dimension_num);
				int gs = (n_dimension_num + bs - 1) / bs;
				// block size, grid size

				cuParamsSet7(h_update_w, DevPtr(m_dp_w_list), DevPtr(m_dp_dense_points),
					int(n_dimension_num), int(i1), int(i2), float(t1), float(t2));
				cuFuncSetBlockShape(h_update_w, bs, 1, 1);
				cuLaunchGrid(h_update_w, gs, 1);
				// update weights on GPU (this is probably causing GPU to slack due to low number
				// of scheduled threads - look into it, switch to CPU if number of dimensions is really small)
			}
		}
		// Update weight vector to reflect change in a1 & f_a2_new_clipped, if SVM is linear

		{
  			/*for(int i = 0; i < n_point_num; ++ i) {
				if(m_alpha_list[i] > 0 && m_alpha_list[i] < m_f_C) {  
					float f_correction = t1 * f_KernelFunc(i1, i) + t2 * f_KernelFunc(i2, i) - f_delta_offset;
					m_error_cache[i] += f_correction;
				}
			}
			m_error_cache[i1] = 0;
			m_error_cache[i2] = 0;*/

			{
				int bs = 256;
				int gs = (n_point_num + bs - 1) / bs;
				// block size, grid size

				cuParamsSet8(h_update_err, DevPtr(m_dp_error_cache), DevPtr(m_dp_precomputed_kernel),
					int(n_point_num), int(i1), int(i2), float(t1), float(t2), float(f_delta_offset));
				cuFuncSetBlockShape(h_update_err, bs, 1, 1);
				cuLaunchGrid(h_update_err, gs, 1);
				// update weights on GPU (this is probably causing GPU to slack due to low number
				// of scheduled threads - look into it, switch to CPU if number of dimensions is really small)

				cuMemcpyDtoH(m_p_error_cache, m_dp_error_cache, n_point_num * sizeof(float));
				// copy to CPU side as well
			}
		}
		// Update error cache using new Lagrange multipliers
	}
	// Update weight vector / error cache to reflect change in a1 & f_a2_new_clipped, if SVM is linear

	m_p_alpha_list[i1] = f_a1_new;
	m_p_alpha_list[i2] = f_a2_new_clipped;
	// update Lagrange vector

	cuMemcpyHtoD(CUdeviceptr(((float*)DevPtr(m_dp_alpha_list)) + i1), &f_a1_new, sizeof(float));
	cuMemcpyHtoD(CUdeviceptr(((float*)DevPtr(m_dp_alpha_list)) + i2), &f_a2_new_clipped, sizeof(float));
	// this hurts ... but there's no other way really

	return true;
}

int CSMOonGPU::n_NearestGEquaPo2(int n)
{
	int pot = 1;
	while(pot < n && pot > 0)
		pot *= 2;
	return pot;
}
