from sys import exit, _getframe
import random
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from math import log
import time




def sgmoid(zVector):
    # 功能：input the z_(m,1) vector,output the sgm vector
    # 参数：zVector_(m,1):z-vector=X*theta_T
    # return:sgm vector_(m,1)
    # error:e**inX,var_e is not exp
    # error 函数不应该只算向量，还能支持单值z
    return 1.0 / (1 + np.exp(-zVector))


def stocGradAscent1(dataMatrix, classLabels, numIter=150):
    m, n = dataMatrix.shape
    weights = np.ones(n)  #initialize to all ones
    for j in range(numIter):
        dataIndex = list(range(m))
        for i in range(m):
            alpha = 4 / (1.0 + j +
                         i) + 0.0001  #apha decreases with iteration, does not
            randIndex = int(random.uniform(
                0, len(dataIndex)))  #go to 0 because of the constant
            h = sgmoid(sum(dataMatrix[randIndex] * weights))
            error = classLabels[randIndex] - h
            weights = update_weight(weights, alpha,
                                    error * dataMatrix[randIndex])
            # weights = weights + alpha * error * dataMatrix[randIndex]
            del (dataIndex[randIndex])
    return weights


def gradAscent(dataMat, classLabel, aipha=0.001, maxCycle=500):
    # 功能：求逻辑回归的权重
    # 参数：dataMat_m*n：X;classLabel_m*1:y;
    # return:weight_1*3_array

    # var_:it is a vector
    # VAR_ or IsVar_:it is a matrix
    X_ = np.mat(dataMat)  # (100,3)
    Y_ = np.mat(classLabel).T  # (100,1)
    m, n = X_.shape
    weight_ = np.mat(np.ones((1, n)))  #(1,3)
    for it in range(maxCycle):
        z_ = X_ * (weight_.T)
        sgm_ = sgmoid(z_)
        cost_ = (Y_ - sgm_).T  #(1,100)
        # partial differential
        PartDiff_ = cost_ * X_
        #weight_=weight_+aipha*PartDiff_
        #logWeight.append(weight_.getA()[0,:])
        #weight_ = update_weight(weight_, aipha, PartDiff_)
        weight_=weight_ + aipha * PartDiff_
    return weight_.getA()[0, :]


def stocGradAscent(dataMat, classLabel, maxCycle=500):
    # 功能：随机梯度上升
    # 参数：同gradAscent
    # return:weight_narray
    # var_ is a vector
    X = np.array(dataMat)
    # error 整数[0,1]和浮点数求和不等！
    Y = np.array(classLabel)
    m, n = X.shape
    #random.shuffle(dataSet)
    #exit(0)
    weight = np.ones(n)
    idx = list(range(m))
    for j in range(maxCycle):

        for i in range(m):
            aipha = 4 / (1.0 + j + i) + 0.0001
            # error:for i,x_ in enumerate(data)
            # error:x_[0]=999这样是会改变原值的，因为x_是数组类型，是引用
            # error:x_=sample这样是会改变dataSet原值的，这样赋值还是引用
            index = idx[i]
            sgm = sgmoid((weight * X[index]).sum())
            partDiff = (Y[index] - sgm) * X[index]
            # weight=weight+aipha*(partDiff)
            weight = update_weight(weight, aipha, partDiff)
        random.shuffle(idx)
    return weight


def plotBesfFit(weight,X,Y):
    # 功能：绘制逻辑回归的决策边界图
    # 参数：weight_list
    # return:none
    class1 = X[Y == 1]  #x0是固定为1的
    class0 = X[Y == 0]
    minX1 = min([class0[:, 0].min(), class1[:, 0].min()]) - 1
    maxX1 = max([class0[:, 0].max(), class1[:, 0].max()]) + 1
    DM_X = np.arange(minX1, maxX1)
    DM_Y = (-weight[0] - weight[1] * DM_X) / weight[2]  # DM_Y 是X的第三列，即特征属性x2
    plt.plot(class0[:, 0], class0[:, 1], 'r*', class1[:, 0], class1[:, 1],
             'b+', DM_X, DM_Y, 'g--')
    plt.show()


def plotBesfFit_cmp(*weights):
    # 功能：绘制多个逻辑回归的决策边界图，进行比较
    # 参数：weight_list
    # return:none
    X = np.array(dataMat)
    Y = np.array(classLabel)
    class1 = (X[Y == 1])[:, -2:]  #x0是固定为1的
    class0 = (X[Y == 0])[:, -2:]
    minX1 = min([class0[:, 0].min(), class1[:, 0].min()]) - 1
    maxX1 = max([class0[:, 0].max(), class1[:, 0].max()]) + 1
    DM_X = np.arange(minX1, maxX1)
    plt.plot(class0[:, 0], class0[:, 1], 'r*', class1[:, 0], class1[:, 1],
             'b+')
    color = ['m', 'y', 'k', 'c', 'w', 'b', 'g', 'r']
    colorNum = len(color)
    for i, weight in enumerate(weights):
        DM_Y = (-weight[0] -
                weight[1] * DM_X) / weight[2]  # DM_Y 是X的第三列，即特征属性x2
        plt.plot(DM_X,
                 DM_Y,
                 '%s--' % (color[i % colorNum]),
                 label='weight%d' % (i + 1))
    plt.legend(loc='upper left', frameon=False)
    plt.show()


def plot_weight_trend(weight):
    # 功能：
    # 参数：
    # return:
    log = np.array(weight)
    cycleNum, weightNum = log.shape
    x = range(cycleNum)
    color = ['m', 'y', 'k', 'c', 'w', 'b', 'g', 'r']
    colorNum = len(color)
    # note:全局变量和局部变量命名分开，否则不利于自动补全
    for i in range(weightNum):
        plt.plot(x,
                 log[:, i],
                 '%s-' % color[i % colorNum],
                 label='weight%d' % (i + 1))
    plt.legend()
    plt.show()


def normalEquation(dataMat, classLabel):
    # 功能：通过正规方程求解损失函数的最优权重
    # 参数：X_m*(n+1),Y_m*1:list
    # return:weight_1*(n+1)_narray
    X = np.mat(dataMat)
    Y = np.mat(classLabel)
    """error
    weight=np.linalg.inv( X.I*X )*X.I*Y.I
    weight=np.array(weight)
    weight.shape=[1,3]
    """
    weight = np.linalg.inv(X.I * X) * X.I * Y.I
    return weight.getA()[:, 0]


def cost_function(theta):
    # 功能：逻辑回归的损失函数，需要全局变量DATA_X，DATA_Y
    # 参数：theta_list/narray_1*n,损失函数自变量
    # return:funcValue：函数值
    theta=np.array(theta)
    m,n=DATA_X.shape
    funcValue=0.0
    for i in range(m):
        funcZ=(theta*DATA_X[i]).sum()
        sgm=sgmoid(funcZ)
        if DATA_Y[i]==1:
            if sgm!=0:
                break
            else:
                funcValue+=-log(sgm)
        else:
            if (1-sgm)==0:
                break
            else:
                # error,python log无法自行给出无限大
                funcValue+=-log(abs(1-sgm))
    return funcValue


def cost_func_jac(theta):
    # 功能：
    # 参数：
    # return:
    X_=np.mat(DATA_X)
    Y_=np.mat(DATA_Y).T
    weight_=np.mat(theta)
    z_ = X_ * (weight_.T)
    sgm_ = sgmoid(z_)
    cost_ = (Y_ - sgm_).T  #(1,100)
    # partial differential
    # error PartDiff_ = +(cost_ * X_) 注意最下化时，是反方向梯度
    PartDiff_ = -(cost_ * X_)
    return PartDiff_.getA()[0]


def evaluate():
    # 功能：
    # 参数：
    # return:
    # 这种方式能最少改代码的情况下实现

    theta=[1,1,1]
    maxCycle = 500
    aipha = 0.001
    begin=time.time()
    res = minimize(cost_function, theta, method='BFGS',jac=cost_func_jac,options={'disp': True})
    weight4=res.x
    weight0 = normalEquation(dataMat, classLabel)
    t1=time.time()
    weight1 = gradAscent(dataMat, classLabel, aipha, 500)
    #weight2 = stocGradAscent1(np.array(dataMat), classLabel,5000)
    t2=time.time()
    weight3 = stocGradAscent(dataMat, classLabel,500)
    t3=time.time()
    print(t1-begin,t2-t1,t3-t2)
    plotBesfFit_cmp( weight1, weight3, weight4,weight0)


def weight_convergence():
    # 功能：
    # 参数：
    # return:
    weight3 = stocGradAscent1(np.array(dataMat), classLabel,150)
    gradAscent(np.array(dataMat), classLabel,maxCycle=80000)
    plot_weight_trend(gradAscent_logWeight)

def classifyVector(inX,weights):
    # 功能：利用逻辑回归进行分类，z取线性函数
    # 参数：inx,weights_narray_1*n
    # return:分类结果
    z=(inX*weights).sum()
    result=sgmoid(z)
    if result>0.5:
        return 1
    else :
        return 0


def get_data_Byfile(fileName):
    # 功能：从文件中获取规整的数据
    # 参数：fileName_str
    # return:dataX,dataY_narray
    fileData=[]
    with open(fileName) as fileHandle:
        for dataline in fileHandle.readlines():
            fileData.append(dataline.strip().split('\t'))
    fileData=np.array(fileData,float)
    # error 从文件中获取的是str类型
    trainX=fileData[:,:-1]
    trainY=fileData[:,-1].astype(int) # 浮点数加法存在误差
    return trainX,trainY


def testClonic():
    # 功能：
    # 参数：
    # return:
    # 1准备数据
    trainX,trainY=get_data_Byfile('horseColicTraining.txt')
    testX,testY=get_data_Byfile('horseColicTest.txt')
    # 2训练模型，获得权重
    #weight=gradAscent(trainX,trainY)
    """
    DATA_X=testX
    DATA_Y=testY
    theta=np.ones([1,DATA_X.shape[1]])
    res = minimize(cost_function, theta, method='BFGS',jac=cost_func_jac,options={'disp': True})
    weight=res.x
    """
    #weight=normalEquation(testX,testY)
    # 3利用权重进行测试
    errNum=0
    for x,y in zip(testX,testY):
        predict=classifyVector(x,weight)
        if predict!=y:
            errNum+=1
    errRatio=errNum/float(testX.shape[0])*100
    print(errRatio)
# debug,fname-colicTest
# describe-:
# dataArr:100*3，labelMat-[0,1]
dataMat, classLabel = loadDataSet()  # dataMat X=[1,x1,x2],m=100
DATA_X=np.array(dataMat)
DATA_Y=np.array(classLabel)
gradAscent_logWeight = []
stocGradAscent1_logWeight = []
stocGradAscent_logWeight = []
# param：

# content:

# return: 
# exit(0)
# ----------
#exit(0)
weight_convergence()