import numpy as np
import adaboost
from matplotlib import pyplot as plt
from sys import exit
from math import log

def stumpClassify(datArr,feature,T,T_exp):
    # 功能：数据集的单特征的阈值分类
    # 参数：feature:特征的列索引,T:阈值,T_exp:阈值表达式_(<,>=)
    # return:分类结果(-1,+1),result(m)
    #result=np.ones((datMat.shape[0],1))
    result=np.ones(datArr.shape[0])
    if T_exp=='lt':
        result[datArr[:,feature]<=T]=-1
    else:
        result[datArr[:,feature]>T]=-1
    return result


def buildStump(datArr,classLabels,D):
    # 功能：构建单层决策树
    # 参数：D_(m):样本权重
    # return:bestTree,minError,bestPredic
    minError=float('inf')
    m,n=datArr.shape
    # note:shape=[m,1]和[m]是有区别的  for in 的时候输出就不一样
    bestTree={}
    bestPredic=[]
    for feature in range(n):
        orderVector=np.sort(datArr[:,feature])
        for T in orderVector:
            for T_exp in ['lt','gt']:
                #errArr=np.mat(np.zeros((m,1)))
                errArr=np.zeros(m)
                predictResult=stumpClassify(datArr,feature,T,T_exp)
                errArr[predictResult!=classLabels]=1
                """not good
                errArr=np.zeros((m,1))
                errRatio=np.sum(errArr*D)
                """
                errRatio=np.sum(errArr*D)
                if errRatio<minError:
                    minError=errRatio
                    bestTree['dim']=feature
                    bestTree['thresh']=T
                    bestTree['ineq']=T_exp
                    bestPredic=predictResult.copy()
    return bestTree,minError,bestPredic


def adaBoost_ds_train(datArr,classLabels,numIt,D_init):
    # 功能：基本分类器为单层决策树的adaboost训练
    # 参数：numIt_基本分类器的最大数量,D_init:初始样本权值
    # return:[Gm(x),...],[am,...]
    #train
    D=D_init.copy()
    A=np.zeros(numIt)
    baseClassify=[]
    #print 'classLabels',classLabels
    m=datArr.shape[0]
    fx_current=np.zeros(m)
    for M in range(numIt):
        stump,errorRatio,predict=buildStump(datArr,classLabels,D)
        baseClassify.append(stump)
        # error:低级错误:a=1/2*log( (1-errorRatio)/errorRatio )=> 1/2==0
        A[M]=0.5*log( (1.0-errorRatio)/max(errorRatio,1e-16) )
        
        Gm=stumpClassify(datArr,stump['dim'],stump['thresh'],stump['ineq'])
        ex=np.exp(-A[M]*Gm*classLabels)
        Zm=np.sum(D*ex)
        D=D/Zm*ex
        #print 'predict',predict==classLabels
        """ error:判断方式是错的
        if errorRatio<=0:
            break
        """
        fx_current+=predict*A[M]
        sign_fx=np.zeros(m)
        predErr=np.zeros(m)
        sign_fx[fx_current>0]=1
        sign_fx[fx_current<0]=-1
        predErr[sign_fx!=classLabels]=1
        TE=np.sum(predErr)/float(m)
        print TE
        if TE==0:#这里能到0存在数学定义
            print 'perfect'
            break
        #print 'old,errorRatio:',errorRatio, ';new:',np.sum(newErr*D)
    return baseClassify,A


def adaBoost_ds_predic(G,A,datArr):
    # 功能：利用基本分类器几相应权重建立最终分类器
    # 参数：G:{dim,thresh,ineq}
    # return:{1,-1}
    m=datArr.shape[0] # 样本数
    n=len(G) # 基本分类器数
    GMat=np.mat(np.zeros((m,n)))
    M=0
    for stump in G:
        predic=stumpClassify(datArr,stump['dim'],stump['thresh'],stump['ineq'])
        predic.shape=[m,1]
        GMat[:,M]=predic
        M+=1
    Fx=GMat*np.mat(A).T
    # Gx=sign(Fx)
    Fx[Fx>0]=1
    Fx[Fx<0]=-1
    return Fx.getA()


def gradAscent(dataMat, classLabel, aipha=0.001, maxCycle=500):
    # 功能：求逻辑回归的权重
    # 参数：dataMat_m*n：X;classLabel_m*1:y;
    # return:weight_1*3_array

    # var_:it is a vector
    # VAR_ or IsVar_:it is a matrix
    X_ = np.mat(dataMat)  # (100,3)
    Y_ = np.mat(classLabel.copy()).T  # (100,1)
    Y_[Y_==-1]=0
    m, n = X_.shape
    weight_ = np.mat(np.ones((1, n)))  #(1,3)
    for it in range(maxCycle):
        z_ = X_ * (weight_.T)
        sgm_ = sgmoid(z_)
        cost_ = (Y_ - sgm_).T  #(1,100)
        # partial differential
        PartDiff_ = cost_ * X_
        #weight_=weight_+aipha*PartDiff_
        #logWeight.append(weight_.getA()[0,:])
        #weight_ = update_weight(weight_, aipha, PartDiff_)
        weight_=weight_ + aipha * PartDiff_
    return weight_.getA()[0, :]


def sgmoid(zVector):
    # 功能：input the z_(m,1) vector,output the sgm vector
    # 参数：zVector_(m,1):z-vector=X*theta_T
    # return:sgm vector_(m,1)
    # error:e**inX,var_e is not exp
    # error 函数不应该只算向量，还能支持单值z
    return  1.0 / (1 + np.exp(-zVector)) 

def classifyVector(inX,weights):
    # 功能：利用逻辑回归进行分类，z取线性函数
    # 参数：inx,weights_narray_1*n
    # return:分类结果
    X_mat=np.mat(inX)
    alpha=np.mat(weights)
    m,n=X_mat.shape
    alpha.shape=(n,1)
    z=X_mat*alpha
    result=sgmoid(z)
    result[result>0.5]=1
    result[result<=0.5]=0
    return result

def load_data(fileName):
    # 功能：
    # 参数：
    # return:
    dataArr=[]
    labelArr=[]
    with open(fileName) as dataHandle:
        for sampleStr in dataHandle.readlines():
            sampleArr=sampleStr.split('\t')
            labelArr.append(float(sampleArr[-1]))
            features=[]
            for featureStr in sampleArr[:-1]:
                features.append(float(featureStr))
            dataArr.append(features)
    return np.array(dataArr),np.array(labelArr)
# debug,fname-,describe-:
#good:看onenote=>python部分的Array:(m,1)和一维m的区别:
# param：

train_dataArr,train_labelArr=load_data('horseColicTraining2.txt')
test_dataArr,test_label=load_data("horseColicTest2.txt")
numIt=50
m=train_dataArr.shape[0]
m2=test_dataArr.shape[0]
D=np.ones(m)/m

#G,A=adaBoost_ds_train(datArr,classLabels,numIt,D)
#adaBoost_ds_predic()
# content:
#a=stumpClassify(datMat.getA(),0,2,'lt')
##a,b,c=buildStump(datArr,classLabels,D)
#exit(0)
weight=gradAscent(train_dataArr,train_labelArr)    
r1=classifyVector(test_dataArr,weight)
r1[r1==0]=-1
r1=r1.getA1()
check1=np.zeros(m2)
check1[r1!=test_label]=1
e1=check1.sum()
er1=e1/float(m2)
#exit(0)
G,A=adaBoost_ds_train(train_dataArr,train_labelArr,numIt,D)
r2=adaBoost_ds_predic(G,A,test_dataArr)
# good:数组降维
r2=np.squeeze(r2)
check1=np.zeros(m2)
check1[r2!=test_label]=1
e1=check1.sum()
er2=e1/float(m2)
exit(0)


d,l=adaboost.loadDataSet("horseColicTest2.txt")
d2,l2=adaboost.loadDataSet('horseColicTraining2.txt')
c=adaboost.adaBoostTrainDS(d2,l2)
r3=adaboost.adaClassify(d,c)
# return:rr
exit(0)
#plt.scatter(datMat.getA()[:,0],datMat.getA()[:,1],marker='*')
#plt.show()
