import nn
import utils
import loss
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

"""
根据基础神经元实现线性回归，目前版本不支持autograd，手动书写误差传播逻辑
"""


def create_data():
    x_train = np.array([338., 333., 328., 207., 226., 25., 179., 60., 208., 606.]) / 100
    y_train = np.array([640., 633., 619., 393., 428., 27., 193., 66., 226., 1591.]) / 100
    x_test = np.array([330., 316., 305., 220., 242., 24., 176., 74., 161., 619]) / 100
    y_test = np.array([624., 594., 567., 418., 459., 47., 335., 141., 305., 1646]) / 100
    return x_train, y_train, x_test, y_test


def load_data(args):
    datas = pd.read_csv("datasets/pokemon/pokemon.csv")
    data = []
    for each in args:
        dirty_data = datas[each]
        clear_data = dirty_data.fillna(0)
        data.append(clear_data.values)
    return data


def issue1(epochs=10000, weight_init=1.5, bias_init=3, alpha_init=1e-5):
    """
    实现宝可梦战斗力线性回归模型，在训练过程中输出实时损失函数值

    :arg
        epochs: 训练轮数
        weight_init: 初始化权重
        bias_init: 初始化偏置
        alpha_init: 初始化学习率

    :return
        net: 训练模型
    """
    alpha = utils.Tensor(alpha_init)
    x_train, y_train, x_test, y_test = create_data()
    net = nn.Cell(weight_init, bias_init, inputs_shape=1)  # 构造神经元
    los_fun = loss.SquaredLossFunction()
    for j in range(epochs):
        errorList = []
        for i in range(len(x_train)):
            pre = net(x_train[i])
            grad_w = x_train[i] * (pre - y_train[i])
            grad_b = pre - y_train[i]
            net.weight -= alpha * grad_w
            net.bias -= alpha * grad_b
            errorList.append(los_fun.lossvalue(pre, y_train[i]))
        print(errorList)
    print("=====================================")
    print(net.weight)
    print(net.bias)
    # 以上为训练环节，以下进行测试环节
    loss_list = []
    for i in range(len(x_test)):
        loss_list.append(los_fun.lossvalue(y_test[i], net(x_test[i])))
    print("predict loss value:"+ str(loss_list))
    c = np.arange(0, 10, 0.01)
    d = c * net.weight.data + net.bias.data
    plt.scatter(x_train, y_train)
    plt.plot(c, d)
    plt.show()
    return net


def issue2(care_att, epochs=1000, weight_init=1.0, bias_init=3, alpha_init=1e-3):
    """
    基于宝可梦数据集，实现宝可梦战斗力预测模型
    :arg
        care_att: 关心的属性，建立该属性列表与战斗力的预测模型
        epochs: 训练的轮数
        weight_init: 初始化权重
        bias_init: 初始化偏置
        alpha_init: 初始化学习率
    :return:
        net: 训练模型
    """
    care_att.append("base_total")
    data = load_data(care_att)
    alpha = utils.Tensor(alpha_init)
    net = nn.Cell(weight_init=weight_init, bias=bias_init, inputs_shape=len(care_att) - 1)  # 构造神经元
    los_fun = loss.SquaredLossFunction()
    for i in range(epochs):
        errorList = []
        for j in range(len(data[0])):
            inputs = []
            for k in range(len(data) - 1):
                inputs.append(data[k][j])
            pre = net(inputs)
            grad_w = inputs * (pre - data[len(data) - 1][j])
            grad_b = pre - data[len(data) - 1][j]
            net.weight -= alpha * grad_w
            net.bias -= alpha * grad_b
            errorList.append(los_fun.lossvalue(pre, data[len(data) - 1][j]))
        print(errorList)
    print("=====================================")
    print(net.weight)
    print(net.bias)
    return net


if __name__ == "__main__":
    class_name = ["hp", "speed", "defense", "sp_attack", "sp_defense"]
    issue1(alpha_init=3e-3)
    issue2(care_att=class_name, epochs=100, weight_init=2.0, alpha_init=3e-7)
