import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split

class LogisticRegression:
    def Sigmoid(self,x):
        return 1 / (1 + np.exp(-x))

    def Iterater(self,X,Y):
        X = np.mat(X)
        Y = np.mat(Y).transpose()
        w = np.ones((X.shape[1],1))
        # 迭代次数
        iters = 5000
        # 精度
        alpha = 0.001
        # 预测值
        fx = self.Sigmoid(X * w)
        # 开始迭代
        for i in range(iters):
            # 差值
            error = Y - fx
            # 更新w
            w = w + alpha * X.T * error
            # 重新预测
            fx = self.Sigmoid(X * w)
        return w

    def test(self,x,y,w):
        x = np.mat(x)
        y = np.mat(y).transpose()
        # 预测值
        fx = self.Sigmoid(x * w)
        return fx

    def prob2(self,y,fx):
        right = 0
        length = len(fx)
        for i in range(length):
            if fx[i][0] < 0.5:
                if y[i] == 0:
                    right += 1
            else:
                if y[i] == 1:
                    right += 1

        print("正确数为：", right)
        print("正确率为：", right / length,'\n')
    
    def prob3(self,y,fx0,fx1,fx2):
        # 测试集的大小
        length = y.shape[0]
        # 预测正确的数量
        right = 0

        for i in range(length):
            # 为0的概率最大
            if fx0[i][0] > max(fx1[i][0], fx2[i][0]):
                if y[i] == 0:
                    right += 1
            # 为1的概率最大
            if fx1[i][0] > max(fx0[i][0], fx2[i][0]):
                if y[i] == 1:
                    right += 1
            # 为2的概率最大
            if fx2[i][0] > max(fx0[i][0], fx1[i][0]):
                if y[i] == 2:
                    right += 1
        print("正确数为：", right)
        print("正确率为：", right / length,'\n')

# 二分类
def fit2(log,x_train,y_train,x_test,y_test):
    # 迭代，并计算出w
    w = log.Iterater(x_train,y_train)
    # 对测试集进行测试，并返回预测值
    fx = log.test(x_test,y_test,w)
    # 统计准确率
    log.prob2(y_test,fx)

# 三分类
def fit3(log,data,target):
    # 划分数据集
    # 设定统一的随机数以保证划分的数据集每个数据的位置保持一定
    r = int(np.random.rand(1, 1) * 100)
    x_train, x_test, y_train, y_test = train_test_split(data, target, random_state=r)
    # 以0为正例
    y0 = list(map(lambda x: 1 if x == 0 else 0, target))
    # 以1为正例
    y1 = list(map(lambda x: 1 if x == 1 else 0, target))
    # 以2为正例
    y2 = list(map(lambda x: 1 if x == 2 else 0, target))
    # 重新划分训练集标签
    a, b, y_train0, c = train_test_split(data, y0, random_state=r)
    a1, b1, y_train1, c1 = train_test_split(data, y1, random_state=r)
    a2, b2, y_train2, c2 = train_test_split(data, y2, random_state=r)

    # 迭代，并计算出w
    w0 = log.Iterater(x_train, y_train0)
    # 对测试集进行测试，并返回预测值
    fx0 = log.test(x_test, y_test, w0)

    # 迭代，并计算出w
    w1 = log.Iterater(x_train, y_train1)
    # 对测试集进行测试，并返回预测值
    fx1 = log.test(x_test, y_test, w1)

    # 迭代，并计算出w
    w2 = log.Iterater(x_train, y_train2)
    # 对测试集进行测试，并返回预测值
    fx2 = log.test(x_test, y_test, w2)
    
    log.prob3(y_test,fx0,fx1,fx2)


if __name__ == '__main__':
    # 西瓜数据集
    target = [1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0]
    data = [[0.697, 0.460, 1.0], [0.774, 0.376, 1], [0.634, 0.264, 1], [0.608, 0.318, 1],
                      [0.556, 0.215, 1.0], [0.403, 0.237, 1], [0.481, 0.149, 1], [0.437, 0.211, 1],
                      [0.666, 0.091, 1.0], [0.243, 0.267, 1], [0.245, 0.057, 1], [0.343, 0.099, 1],
                      [0.639, 0.161, 1.0], [0.657, 0.198, 1], [0.360, 0.370, 1], [0.593, 0.042, 1], [0.719, 0.103, 1]]

    # 创建对数几率回归的对象
    log = LogisticRegression()

    # 进行二分类
    # 由于西瓜集的数据太少，所以训练集与测试集保持一致
    print("对西瓜集进行训练并预测：")
    fit2(log,data, target,data, target)

    # 鸢尾花数据集
    iris = load_iris()

    # 进行三分类
    print("对鸢尾花集进行训练并预测：")
    fit3(log,iris.data,iris.target)