import torch
import numpy as np
import pandas as pd
import torch.nn.functional as Fun

# 1. 数据准备
df_train_data = pd.read_csv('abalone_train.data')
df_test_data = pd.read_csv('abalone_test.data')

train_data_x = df_train_data.iloc[:, 1:10]
train_data_x = np.array(train_data_x, dtype=float)
train_data_x = torch.squeeze(torch.FloatTensor(train_data_x), dim=1)

test_data_x = df_test_data.iloc[:, 1:10]
test_data_x = np.array(test_data_x, dtype=float)
test_data_x = torch.squeeze(torch.FloatTensor(test_data_x), dim=1)

df_train_data['Sex Code'] = df_train_data['Sex'].map({'M': 0, 'F': 1, 'I': 2})
df_test_data['Sex Code'] = df_test_data['Sex'].map({'M': 0, 'F': 1, 'I': 2})

train_data_y = df_train_data['Sex Code']
train_data_y = np.array(train_data_y, dtype=int)
train_data_y = torch.LongTensor(train_data_y)

test_data_y = df_test_data['Sex Code']
test_data_y = np.array(test_data_y, dtype=int)
test_data_y = torch.LongTensor(test_data_y)

input_data = train_data_x
label = train_data_y

if torch.cuda.is_available():
    input_data = input_data.cuda()
    label = label.cuda()
    test_data_x = test_data_x.cuda()


# 2. 定义BP神经网络
class BPNet(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output, dropout=0.5):
        super(BPNet, self).__init__()

        self.dropout = torch.nn.Dropout(dropout)

        self.hidden_1 = torch.nn.Linear(n_feature, n_hidden)  # hidden layer
        self.bn1 = torch.nn.BatchNorm1d(n_hidden)

        self.hidden_2 = torch.nn.Linear(n_hidden, n_hidden // 4)  # hidden layer
        self.bn2 = torch.nn.BatchNorm1d(n_hidden // 4)

        self.hidden_3 = torch.nn.Linear(n_hidden // 4, n_hidden // 16)  # hidden layer
        self.bn3 = torch.nn.BatchNorm1d(n_hidden // 16)

        self.out = torch.nn.Linear(n_hidden // 16, n_output)  # 定义输出层网络

    def forward(self, x):
        x = Fun.relu(self.hidden_1(x))  # relu
        x = self.dropout(self.bn1(x))
        x = Fun.relu(self.hidden_2(x))  # activation function for hidden layer
        x = self.dropout(self.bn2(x))
        x = Fun.relu(self.hidden_3(x))  # activation function for hidden layer
        x = self.dropout(self.bn3(x))
        x = self.out(x)  # 输出层不用激活函数
        return x


# 3. 定义优化器损失函数
# n_feature: 输入的特征维度, n_hidden: 神经元个数, n_output: 输出的类别个数
model = BPNet(n_feature=8, n_hidden=512, n_output=3, dropout=0.3)
if torch.cuda.is_available():
    model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.02, weight_decay=1e-5)
loss_func = torch.nn.CrossEntropyLoss()  # 对于多分类一般采用的交叉熵损失函数

# 4. 训练数据
for i in range(20001):
    out = model(input_data)  # 输入input,输出out
    loss = loss_func(out, label)  # 输出与label对比
    if i % 1000 == 0:
        if i % 5000 == 0:
            out = model(test_data_x)
            prediction = torch.max(out, 1)[1]
            if torch.cuda.is_available():
                # 将GPU的数据转存到CPU
                prediction = prediction.cpu()
            pred_y = prediction.data.numpy()
            target_y = test_data_y.data.numpy()

            accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)
            print("iteration: {}, training loss: {:.6f}, accuracy: {:.6f}".format(i, loss, accuracy))
        else:
            print("iteration: {}, training loss: {:.6f}".format(i, loss))

    optimizer.zero_grad()  # 梯度清零
    loss.backward()  # 前馈操作
    optimizer.step()  # 使用梯度优化器

# 5. 得出结果
if torch.cuda.is_available():
    torch.cuda.empty_cache()

# out是一个计算矩阵，可以用Fun.softmax(out)转化为概率矩阵
out = model(test_data_x)
prediction = torch.max(out, 1)[1]  # 返回index  0返回原值
if torch.cuda.is_available():
    # 将GPU的数据转存到CPU
    prediction = prediction.cpu()
pred_y = prediction.data.numpy()
target_y = test_data_y.data.numpy()

# 6. 衡量准确率
accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)
print("=> prediction accuracy: {:.6f}".format(accuracy))
print(model)
