# _*_ coding:utf-8 _*_
"""
__Author__    :  Icy
__Date__      :  2018/7/18
__File__      :  al.py
__Desc__      :
"""
import pprint

"""
优化损失函数的方法
"""
from numpy import *

# 随机梯度下降法
class Gradient(object):
	@staticmethod
	def SGD():
		# 训练集
		# 每个样本点有3个分量 (x0,x1,x2)
		sample = [(1, 0., 3), (1, 1., 3), (1, 2., 3), (1, 3., 2), (1, 4., 4)]
		# y[i] 样本点对应的输出
		y = [95.364, 97.217205, 75.195834, 60.105519, 49.342380]

		# 迭代阀值，当两次迭代损失函数之差小于该阀值时停止迭代
		# 这代表函数已经收敛了，
		epsilon = 0.0001

		# 学习率
		alpha = 0.01

		diff = [0, 0]
		max_itor = 1000
		error1 = 0
		error0 = 0
		cnt = 0

		# 初始化参数
		theta0 = 0
		theta1 = 0
		theta2 = 0
		# 在规定的迭代次数中
		while True:
			cnt += 1
			# 			拟合函数：y=theta0+theta1*x[i][1]+theta2*x[i][2]
			for i in range(len(sample)):
				# 计算预测值差与实际差
				diff[0] = (theta0 + theta1 * sample[i][1] + theta2 * sample[i][2]) - y[i]
				# 更新参数
				theta0 -= alpha * diff[0] * sample[i][0]
				theta1 -= alpha * diff[0] * sample[i][1]
				theta2 -= alpha * diff[0] * sample[i][2]
			# 累计损失
			error1 = 0
			for i in range(len(sample)):
				# 除不除样本量都无所谓，这是比较两次迭代之差而已
				error1 += ((theta0 + theta1 * sample[i][1] + theta2 * sample[i][2]) - y[i]) ** 2 / 2
			print(f'error1:{error1}---error0:{error0}')
			if abs(error1 - error0) < epsilon:
				break
			else:
				error0 = error1
			print(f'theta0:{theta0},'
			      f'theta1:{theta1},'
			      f'theta2:{theta2},'
			      f'最大迭代次数：{cnt},'
			      f'error1:{error1}', sep='    ')
		print(f'done, theta0:{theta0},'
			      f'theta1:{theta1},'
			      f'theta2:{theta2},'
			      f'最大迭代次数：{cnt},'
			      f'error1:{error1}')

"""
分类算法
"""
class SSVM():
	@staticmethod
	def svm():
		def calKernelValue(matrix_x,sample_x,kernelOption):
			kernelType = kernelOption[0]
			numSamples = matrix_x.shape[0]
			kernelValue = mat(zeros((numSamples,1)))

			if kernelType == 'linear':
				kernelType = matrix_x * sample_x.T
			elif kernelType == 'rbf':
				# link as follow
				svm_link = 'https://blog.csdn.net/zouxy09/article/details/17292011'


		def loaddata(filename):
			with open(filename,'r') as f:
				train_data = []
				label_data = []
				for line in f.readline():
					line=line.strip().split('\t')
					train_data.append((line[0],line[1]))
					label_data.append(line[2])
			return train_data,label_data


if __name__ == "__main__":
	GD = Gradient()
	GD.SGD()
