import random
import svmMLiA
import numpy as np
from sys import exit
from matplotlib import pyplot as plt


def showResult(alpha,b):
    # 功能：
    # 参数：
    # return:
    classify_one=[]
    classify_two=[]
    for i in range(data.dataMatrix.shape[0]):
        #judge=classFun(a.dataMatrix,a.alpha,i,a.labelMatrix,a.b)
        judge=classFun(np.mat(dataArr),alpha,i,np.mat(labelArr).T,b)
        if judge>0:
            classify_one.append(dataArr[i])
        elif judge<0:
            classify_two.append(dataArr[i])
    classify_one=np.array(classify_one)
    classify_two=np.array(classify_two)
    plt.scatter(classify_one[:,0],classify_one[:,1],color='green',marker='o')
    plt.scatter(classify_two[:,0],classify_two[:,1],color='red',marker='*')
    plt.show()


def selectJrand(i,m):
    # 功能：从[0,m)区间选取非i的随机整数,用于选取(a_i,a_j)
    # 参数：
    # return:
    j=i
    while (j==i):
        j=int(random.uniform(0,m))
    return j


def classFun(dataMat,alpha,i,Y,b):
    # 功能:分类函数,TODO:不兼容核函数:svmMLia存在解决方式
    # 参数：l_(m,1),Y_(m,1),X_(m,n)
    # return:函数分类值:predict_y 

    w=np.multiply(alpha,Y).T*dataMat    
    x=dataMat[i,:].T
    return float(w*x)+b
# debug,fname-,describe-:


def SMO(dataArr,labelArr,maxIter,C,tolerance):
    # 功能：SMO算法进行SVM的最优计算
    # 参数：
    # return:
    data=paraSet(tolerance,C,dataArr,labelArr)
    numChange=0
    entireSet=True
    ite=0
    while(ite<maxIter and (numChange>0 or entireSet)):
        numChange=0
        #print entireSet
        if entireSet:
            for i in range(data.m):
                numChange+=afterconfirm(i,data)
                #print numChange
            ite+=1
        else:
             # good np.where((data.alpha>0)&( data.alpha<1))
            #good:condition=np.where((data.alpha>0)&( data.alpha<1))[0] vs nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
            condition=np.where((data.alpha>0)&( data.alpha<1))[0]
            #condition = np.nonzero((data.alpha.A > 0) * (data.alpha.A < C))[0]
            #print condition
            for i in condition:
                numChange+=afterconfirm(i,data)
                #print 'asd',numChange
            ite+=1
        if entireSet==True:
            entireSet=False
            #print 'asd'
        elif numChange==0:
            entireSet=True
    return data


def SMO_simple(dataArr,labelArr,maxIter,C,tolerance):
    # 功能：SMO算法进行SVM的最优计算
    # 参数：
    # return:
    data=paraSet(tolerance,C,dataArr,labelArr)
    numChange=0
    entireSet=True
    ite=0
    while(ite<maxIter):
        numChange=0
        #print entireSet
        for i in range(data.m):
            numChange+=updateAlpha_simple(data,i,j)
        if numChange>0:
            ite+=1
    return data


def afterconfirm(i,data):
    # 功能：确定i后,若i违背KKT,则选择j并更新alpha
    # 参数：
    # return:改变的alpha数
    Yi=float(data.labelMatrix[i])
    #Ei=classFun(data.dataMatrix,data.alpha,i,data.labelMatrix,data.b)-Yi
    #data.updateEi(i)
    Eiy=data.ECache[i]*Yi
    alpha_i=float(data.alpha[i][0])
    if((Eiy < -data.tolerant and alpha_i<data.C) or (Eiy > data.tolerant and alpha_i>0)):
        #j=selectJ(data,i) # TODO:处理特殊情况的selectJ
        j=selectJrand(i,data.m)
        #data.updateEi(j)
        #print i,j
        return updateAlpha(data,i,j)
    else:
        # i满足KKT条件
        return 0


def selectJ(data,i):
    # 功能：选择第二个参数,
    # 参数：
    # return:delta全为0,返回-1;否则返回下标j
    # error:不要错误0的,max忘记变了
    max=0
    Ei=data.ECache[i]
    j=-1
    validE=np.nonzero(data.ECache)[0]
    for ite in validE:
        if ite==i:
            continue
        delta=abs(Ei-data.ECache[ite])
        if delta>max:
            j=ite
            max=delta
    if j==-1:
        print 'selectJrand'
        j=selectJrand(i,data.m)
    return j


def updateAlpha(data,i,j):
    # 功能：
    # 参数：
    # return:
    # 计算解析式
    # error:少:
    # if L==H: print "L==H"; return 0
    # if eta >= 0: print "eta>=0"; return 0
    K_ii=data.kernel_ij(i,i)
    K_jj=data.kernel_ij(j,j)
    K_ij=data.kernel_ij(i,j)
    Y_i=float(data.labelMatrix[i])
    Y_j=float(data.labelMatrix[j])
    E_i=float(data.ECache[i])
    E_j=float(data.ECache[j])
    n=K_ii+K_jj-2*K_ij
    if n <= 0:
        print 'n <= 0'
        return 0
    # error:alpha_i_unc=float(data.alpha[i][0])+ Y_i*(E_i-E_j) /n,这样使得,alpha一直<0
    alpha_i_unc=float(data.alpha[i][0])+ Y_i*(E_j-E_i) /n
    #剪枝alpha_i,得alpha_j
    alpha_i=data.clipAlpha(j,i,alpha_i_unc)
    if alpha_i<0:
        print 'alpha_i<0'
        return 0 # L==H的情况
    alpha_j=float( data.alpha[j][0]+Y_i*Y_j*(data.alpha[i][0]-alpha_i) )
    #更新b,alpha
    b_i=-E_i-Y_i*K_ii*(alpha_i-data.alpha[i][0])-Y_j*K_ij*(alpha_j-data.alpha[j][0])+data.b
    b_j=-E_j-Y_i*K_ij*(alpha_i-data.alpha[i][0])-Y_j*K_jj*(alpha_j-data.alpha[j][0])+data.b
    b_new=float(b_i+b_j)/2
    data.alpha[i][0]=alpha_i
    data.alpha[j][0]=alpha_j
    data.updateEi(i)
    data.updateEi(j)
    return 2


def updateAlpha_simple(data,i,j):
    # 功能：
    # 参数：
    # return:
    # 计算解析式
    # error:少:
    # if L==H: print "L==H"; return 0
    # if eta >= 0: print "eta>=0"; return 0
    K_ii=data.kernel_ij(i,i)
    K_jj=data.kernel_ij(j,j)
    K_ij=data.kernel_ij(i,j)
    Y_i=float(data.labelMatrix[i])
    Y_j=float(data.labelMatrix[j])
    E_i=classFun(data.dataMatrix,data.alpha,i,data.labelMatrix,data.b)-float(data.labelMatrix[i][0])
    E_j=classFun(data.dataMatrix,data.alpha,j,data.labelMatrix,data.b)-float(data.labelMatrix[j][0])
    n=K_ii+K_jj-2*K_ij
    if n <= 0:
        print 'n <= 0'
        return 0
    # error:alpha_i_unc=float(data.alpha[i][0])+ Y_i*(E_i-E_j) /n,这样使得,alpha一直<0
    alpha_i_unc=float(data.alpha[i][0])+ Y_i*(E_j-E_i) /n
    #剪枝alpha_i,得alpha_j
    alpha_i=data.clipAlpha(j,i,alpha_i_unc)
    if alpha_i<0:
        print 'alpha_i<0'
        return 0 # L==H的情况
    alpha_j=float( data.alpha[j][0]+Y_i*Y_j*(data.alpha[i][0]-alpha_i) )
    #更新b,alpha
    b_i=-E_i-Y_i*K_ii*(alpha_i-data.alpha[i][0])-Y_j*K_ij*(alpha_j-data.alpha[j][0])+data.b
    b_j=-E_j-Y_i*K_ij*(alpha_i-data.alpha[i][0])-Y_j*K_jj*(alpha_j-data.alpha[j][0])+data.b
    b_new=float(b_i+b_j)/2
    data.alpha[i][0]=alpha_i
    data.alpha[j][0]=alpha_j
    return 2
# param：
dataArr,labelArr=svmMLiA.loadDataSet('testSet.txt')

class paraSet:
    'SVM 计算过程中频繁使用的参数,便于传参'
    def __init__(self,tolerant,C,dataArr,labelArr): 
        self.tolerant=tolerant # 超参数,违背KTT程度的容忍度
        self.C=C # 超参数,alpha上边界
        self.dataMatrix=np.mat(dataArr)
        self.m,self.n=self.dataMatrix.shape # (m,n)-(样本数,样本维度)
        self.labelMatrix=(np.mat(labelArr)).T
        self.alpha=np.mat(np.zeros((self.m,1)))
        self.ECache=np.zeros((self.m))
        self.b=0 #阈值b的初始值
        for i in range(self.m):
            self.updateEi(i) # good:use:updateEi(i)=>define:updateEi(self,i)
    def updateEi(self,i):
        Ei=classFun(self.dataMatrix,self.alpha,i,self.labelMatrix,self.b)-float(self.labelMatrix[i][0])
        self.ECache[i]=Ei
    
    
    def kernel_ij(self,i,j):
        # 功能：核函数或者普通计算,设计为类方法的原因是方便继承
        # 参数：
        # return:
        # error:float(self.dataMatrix[i,:]*(self.dataMatrix[i,:]).T)
        return float(self.dataMatrix[i,:]*(self.dataMatrix[j,:]).T)
    
    def clipAlpha(self,i,j,alpha_unc):
        # 功能：确保aj在[L,H]上,L==H则无意义
        # 参数：
        # return:
        # error:剪枝的逻辑有误
        alpha_1=self.alpha[i]
        alpha_2=self.alpha[j]
        alpha_clip=alpha_unc
        if self.labelMatrix[i]!=self.labelMatrix[j]:
            L=max(0,alpha_2-alpha_1)
            H=min(self.C,self.C+alpha_2-alpha_1)    
        else:
            L=max(0,alpha_2+alpha_1-self.C)
            H=min(self.C,alpha_2+alpha_1)
        if L==H:
            return -1
        if alpha_unc>H:
            alpha_clip=H
        elif alpha_unc<L:
            alpha_clip=L
        return float(alpha_clip)

maxIter=40
C=0.6
tolerance=0.001
#---
data=paraSet(tolerance,C,dataArr,labelArr)
i=0
j=1
# content:updateAlpha
debug=[]
#a=SMO_simple(dataArr,labelArr,maxIter,C,tolerance)

#updateAlpha(data,32,0)
#exit(0)
b,alpha=svmMLiA.smoSimple(dataArr,labelArr,0.6,0.001,40)
#b,alpha=svmMLiA.smoP(dataArr,labelArr,0.6,0.001,40)
#exit(0)
showResult(alpha,b)

# return:
