#!/usr/bin/python
# author dennis
# 2022年07月20日
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from collections import defaultdict


def draw(x_test, y_test, y_predict):
    d_dict = defaultdict(list)
    d_dict1 = defaultdict(list)
    for i in range(len(y_test)):
        d_dict[y_test[i]].append(x_test[i])
    for i in range(len(y_predict)):
        d_dict1[y_predict[i]].append(x_test[i])
    styles = ['ro', 'r+', 'r*']
    styles1 = ['yo', 'y+', 'y*']
    for i, (key, values) in enumerate(d_dict.items()):
        values = np.array(values)
        plt.plot(values[:, 1], values[:, 2], styles[i])
    for i, (key, values) in enumerate(d_dict1.items()):
        values = np.array(values)
        plt.plot(values[:, 1], values[:, 2], styles1[i])
    plt.xlabel('feature1')
    plt.ylabel('feature2')
    plt.show()


def to_one_hot(y):
    n_classes = y.max() + 1
    m = len(y)
    Y_one_hot = np.zeros((m, n_classes))
    Y_one_hot[np.arange(m), y] = 1
    return Y_one_hot


def softmax(logits):
    exps = np.exp(logits)
    exp_sums = np.sum(exps, axis=1, keepdims=True)
    return exps / exp_sums


def graDescend(n_inputs, n_outputs):
    alpha = 0.01
    maxCycles = 5000
    # 样本长度
    m = len(x_train)
    weights = np.ones((n_inputs, n_outputs))
    for i in range(maxCycles):
        logits = np.dot(x_train, weights)
        h = softmax(logits)
        error = h - Y_train_one_hot
        gradients = 1 / m * x_train.T.dot(error)
        weights = weights - alpha * gradients
    print(weights)
    return weights


def test(weights):
    logits = x_test.dot(weights)
    h = softmax(logits)
    y_predict = np.argmax(h, axis=1)
    accuracy_score = np.mean(y_predict == y_test)
    print(accuracy_score)
    return y_predict


if __name__ == '__main__':
    iris = datasets.load_iris()
    X = iris['data'][:, (2, 3)]
    y = iris['target']
    X_with_bias = np.c_[np.ones([len(X), 1]), X]
    x_train, x_test, y_train, y_test = train_test_split(X_with_bias, y, test_size=0.5, random_state=1)
    Y_train_one_hot = to_one_hot(y_train)
    Y_test_one_hot = to_one_hot(y_test)
    # 数据维度为3(2个特征1个偏置)
    n_inputs = x_train.shape[1]
    # 输出维度为3
    n_outputs = len(np.unique(y_train))
    weights = graDescend(n_inputs, n_outputs)
    y_predict = test(weights)
    draw(x_test, y_test, y_predict)
