import numpy as np
from sklearn.linear_model import LogisticRegression as SK_LR
import matplotlib.pyplot as plt


def load_data(file, intercept=True):
    with open(file, 'r') as f:
        data = f.readlines()

    data_arr = []
    label_arr = []
    for line in data:
        arr = line[: -1].split('\t')
        arr = [float(num) for num in arr]
        mid_data = arr[:-1]
        # 增加截距项
        if intercept:
            mid_data.insert(0, 1)

        data_arr.append(mid_data)
        label_arr.append([int(arr[-1])])

    return np.array(data_arr), np.array(label_arr)


class LogisticRegression(object):
    @staticmethod
    def sigmoid(x):
        return 1 / (1 + np.exp(-x))

    def classify_vector(self, x):
        p = self.sigmoid(x)
        return 1 if p > 0.5 else 0

    @staticmethod
    def regress_line(x, weight):
        """
        logistic regression的分类回归线

        目标函数 sigmoid
        f(z) = 1 / (1 + e^(-z))
        当z = 0时，f(0) = 0.5
        当z < 0时，f(z) < 0.5，目标归为一类
        当z > 0时，f(z) > 0.5，目标归为另一类

        z =  w0 + w1 * x + w2 * y
        z = 0即为分类回归线
        :param x:
        :param weight:
        :return:
        """
        if len(weight.shape) == 1:
            return (-weight[0] - weight[1] * x) / weight[2]
        else:
            return (-weight[0][0] - weight[1][0] * x) / weight[2][0]

    def grad_ascent(self, data_mat, label_mat):
        """
        梯度上升迭代回归权重：在整体训练集上用向量循环计算，直至达到误差要求/迭代次数
        :return:
        """
        m, n = data_mat.shape
        alpha = 0.001
        max_cycle = 500
        weight = np.ones((n, 1))
        for k in range(max_cycle):
            sig_value = self.sigmoid(np.dot(data_mat, weight))
            error = label_mat - sig_value
            weight = weight + alpha * np.dot(data_mat.T, error)
        return weight

    def stochastic_grad_ascent(self, data_mat, label_mat):
        """
        随机梯度上升迭代回归权重：每行数据计算一次，数值计算（迭代结果并非最优）
        :return:
        """
        m, n = data_mat.shape
        alpha = 0.01
        weight = np.ones(n)
        for i in range(m):
            sig_value = self.sigmoid(np.dot(data_mat[i], [[num] for num in weight]).sum())
            error = label_mat[i][0] - sig_value
            weight = weight + alpha * error * data_mat[i]
        return weight

    def improved_stochastic_grad_ascent(self, data_mat, label_mat, numiter=500):
        """
        改进的随机梯度上升
          1、对随机梯度进行迭代
          2、修改学习率为动态调整
          3、并非对训练集逐个计算，而是随机抽取计算
        :param numiter: 迭代次数
        :return:
        """
        m, n = data_mat.shape
        weight = np.ones(n)
        data_index = range(m)
        for iter_num in range(numiter):
            for i in data_index:
                alpha = 4 / (1.0 + iter_num + i) + 0.01
                data_mat_index = np.random.choice(data_index)
                # data_mat_index = int(np.random.uniform(0, len(data_index)))
                sig_value = self.sigmoid(np.dot(data_mat[data_mat_index], [[num] for num in weight]).sum())
                error = label_mat[data_mat_index][0] - sig_value
                weight = weight + alpha * error * data_mat[data_mat_index]
        return weight

    def train_predict(self, train_data, train_label):
        """
        训练 + 预测
        :return:
        """
        # 梯度上升
        # weight = self.grad_ascent()

        # 随机梯度上升  [ 1.01702007  0.85914348 -0.36579921]
        # weight = self.stochastic_grad_ascent()

        # 改进的随机梯度上升
        weight = self.improved_stochastic_grad_ascent(train_data, train_label)

        error_count = 0
        for num, data in enumerate(train_data):
            predict_x = np.dot(weight, [[num] for num in data])
            pre = self.classify_vector(predict_x)
            if pre != train_label[num][0]:
                error_count += 1
        print('错误率:{}'.format(error_count / len(train_data)))
        return error_count / len(train_data)

    def multi_test(self, train_data, train_label, num_test=10):
        """
        多次预测，
        :param train_data:
        :param train_label:
        :param num_test:
        :return:
        """
        error_rate_sum = 0
        for i in range(num_test):
            error_rate = self.train_predict(train_data, train_label)
            error_rate_sum += error_rate

        print('after {} iterations the avg error is {}'.format(num_test, error_rate_sum / num_test))

    @staticmethod
    def sklearn_lr_predict(data, label):
        """

        :param data:
        :param label:
        :return:
        """
        sk_lr_default = SK_LR()
        sk_lr_l1 = SK_LR(penalty='l1')
        sk_lr_l2_nt = SK_LR(penalty='l2', solver='newton-cg')
        sk_lr_l2_lbfgs = SK_LR(penalty='l2', solver='lbfgs')
        sk_lr_l2_sag = SK_LR(penalty='l2', solver='sag')
        sk_lr_saga = SK_LR(solver='saga')

        clf_default = sk_lr_default.fit(data, label)
        clf_l1 = sk_lr_l1.fit(data, label)
        clf_l1_nt = sk_lr_l2_nt.fit(data, label)
        clf_l2_lbfgs = sk_lr_l2_lbfgs.fit(data, label)
        clf_l2_sag = sk_lr_l2_sag.fit(data, label)
        clf_saga = sk_lr_saga.fit(data, label)

        compute_label = np.array([num[0] for num in label])

        pre_default = clf_default.predict(data)
        pre_l1 = clf_l1.predict(data)
        pre_l1_nt = clf_l1_nt.predict(data)
        pre_l2_lbfgs = clf_l2_lbfgs.predict(data)
        pre_l2_sag = clf_l2_sag.predict(data)
        pre_saga = clf_saga.predict(data)

        # 计算错误率
        error_arr = np.ones(len(compute_label))

        error_count_default = error_arr[pre_default != compute_label].sum()
        error_count_11 = error_arr[pre_l1 != compute_label].sum()
        error_count_11_net = error_arr[pre_l1_nt != compute_label].sum()
        error_count_12_lbfgs = error_arr[pre_l2_lbfgs != compute_label].sum()
        error_count_12_sag = error_arr[pre_l2_sag != compute_label].sum()
        error_count_saga = error_arr[pre_saga != compute_label].sum()

        print('sklearn 错误率:{} {} {} {} {}'.format(error_count_default / len(compute_label),
                                                  error_count_11 / len(compute_label),
                                                  error_count_11_net / len(compute_label),
                                                  error_count_12_lbfgs / len(compute_label),
                                                  error_count_12_sag / len(compute_label),
                                                  error_count_saga / len(compute_label)))

    def matplot(self, data_mat, label_mat):
        """
        画图
        :return:
        """
        # 梯度上升
        # [[ 4.12414349]
        #  [ 0.48007329]
        #  [-0.6168482 ]]
        # weight= self.grad_ascent()

        # 随机梯度上升  [ 1.01702007  0.85914348 -0.36579921]
        # weight = self.stochastic_grad_ascent()

        # 改进的随机梯度上升
        weight = self.improved_stochastic_grad_ascent(data_mat, label_mat)

        x1 = []
        y1 = []
        x2 = []
        y2 = []
        for num, label in enumerate(label_mat):
            x = data_mat[num][1]
            y = data_mat[num][2]
            if int(label[0]) == 1:
                x1.append(x)
                y1.append(y)
            else:
                x2.append(x)
                y2.append(y)

        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(x1, y1, s=30, c='red', marker='s')
        ax.scatter(x2, y2, s=30, c='green', marker='s')

        x = np.arange(-3.0, 3.0)
        y = self.regress_line(x, weight)

        ax.plot(x, y)
        plt.show()


def run():
    # txt_file = './data/B/chp5/testSet.txt'
    # data_mat, label_mat = load_data(txt_file)
    # logistic_regress.matplot()

    file_train = './data/chp5/horseColicTraining.txt'
    data_arr, label_arr = load_data(file_train, intercept=False)

    logistic_regress = LogisticRegression()
    logistic_regress.multi_test(data_arr, label_arr)
    # sklearn
    logistic_regress.sklearn_lr_predict(data_arr, label_arr)


if __name__ == '__main__':
    run()
