import pandas as pd
import numpy as np
import torch
from collections import Counter
from sklearn import datasets
import torch.nn.functional as Fun
import torch.nn.functional as F
import torch.optim as optim
from torch import nn

# 1. 数据准备
data = pd.read_csv('./data.csv')  #从文本文件读取数据
x_data = data.values[:,1:]  # 特征数据 所有行，列从1开始
y_data = data['labels'].values  #标签数据
label_number = list(set(y_data)).__len__()   #标签数据不重复的数字的个数
X = torch.FloatTensor(x_data).numpy()
y = y_data


# 将数据分为训练集和测试集
# 将15个样本分成：训练：12；测试：3 
train_ratio = 0.8  
index = np.random.permutation(X.shape[0]) #数据索引 X.shape[0]: 数据的行数
train_index = index[:int(X.shape[0] * train_ratio)] #训练集索引
test_index = index[int(X.shape[0] * train_ratio):] #测试集索引
X_train, y_train = X[train_index], y[train_index]  #根据索引拿到训练集数据
X_test, y_test = X[test_index], y[test_index] # 根据索引拿到测试集数据

# 定义神经网络模型
class Net(nn.Module):
    def __init__(self,input=3,hidden=20,output=3):
        super(Net, self).__init__()  # 使用其他库内容,对于逻辑没有影响
        torch.manual_seed(2)
        self.fc1 = nn.Linear(input, hidden)  #定义函数
        self.fc2 = nn.Linear(hidden, hidden)
        self.fc3 = nn.Linear(hidden, output)

    def forward(self, x):
        x = F.relu(self.fc1(x))  #使用函数 结合非线性项，增强模型表达能力
        x = F.relu(self.fc2(x)) # relu是集合F里面的函数 参考: https://zhuanlan.zhihu.com/p/360980567
        x = self.fc3(x)
        return x

# 初始化模型、损失函数和优化器  ---损失函数相当于最小二乘的方差
model = Net(input=3,hidden=20,output=3)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

# 训练模型
is_ = True

num_epochs = 50
for epoch in range(num_epochs):
    inputs = torch.from_numpy(X_train)  # 获取训练数据
    labels = torch.from_numpy(y_train)  # 获取标签
    optimizer.zero_grad()
    outputs = model(inputs) # 获得模型预测的输出
    if is_:
        print(outputs)
        is_ = False
    loss = criterion(outputs, labels) # 计算损失(模型预测输出和真实标签的偏离程度)
    loss.backward()  # 计算梯度
    optimizer.step() # 通过梯度下降减小loss值
    if epoch % 10 == 0:  # 每搁10次记录一次结果
        print("Epoch: %d, Loss: %.4f" % (epoch, loss.item()))


# 评估模型
with torch.no_grad():
    inputs = torch.from_numpy(X_test)  # 拿到测试集 特征
    labels = torch.from_numpy(y_test) # 拿到测试集 真实标签
    outputs = model(inputs)   # 根据模型结果判断输出
    _, predictions = torch.max(outputs, 1) # 获得outputs每行最大值的索引
    accuracy = (predictions == labels).float().mean() # (predictions == labels):输出值为bool的Tensor
    print("X_test: \n",X_test)
    print("labels: \n",labels)
    print("predictions: \n",predictions)
    print("Accuracy: %.2f %%" % (accuracy.item() * 100))


# tensor([[ -5.0933,   2.8192,  -2.3504],  预测结果: [标签0的可能性大小,标签1的可能性大小,标签2的可能性大小]
#         [  1.7075, -27.4904, -12.6824],
#         [  2.3996, -24.5835, -12.2874]])