import torch
import pandas as pd
from torch import nn
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split

# left 是目标值
from NeuralNetworks.HrModel import Model

data = pd.read_csv('../dataset/HR_comma_sep.csv')
# 去重返回part列值
# print(data.part.unique())
# print(data.salary.unique())
# 根据salary分组，得出每种分类的part有多少人
# print(data.groupby(['salary', 'part']).size())
# 将所有的特征数值化，转化为one-hot【独热】编码
# print(pd.get_dummies(data.salary))
# 把独热编码添加至源数据，并删除源数据中的值
data = data.join(pd.get_dummies(data.salary))
data = data.join(pd.get_dummies(data.part))
del data['salary']
del data['part']
# print(data.size())
# 现在数据就可以参与计算了
# 计算目标值分布，【每种类型多少个】
# print(data.left.value_counts())
# 数据校准值,高于校准值才有意义
# a = 11428 / len(data)
Y_data = data.left.values.reshape(-1, 1)
X_data = data[[c for c in data.columns if c != 'left']].values
# 拆分测试数据和训练数据
train_x, test_x, train_y, test_y = train_test_split(X_data, Y_data)

# 转tensor# print(Y.size())
train_y = torch.from_numpy(train_y).type(torch.float32)
train_x = torch.from_numpy(train_x).type(torch.float32)
test_y = torch.from_numpy(test_y).type(torch.float32)
test_x = torch.from_numpy(test_x).type(torch.float32)
# 处理训练数据 batch_size ： 每批次输出多少天数据，shuffle：乱序
train_hr_dateset = TensorDataset(train_x, train_y)
train_hr_dataLoader = DataLoader(train_hr_dateset, batch_size=64, shuffle=True)
# 处理测试数据
test_dataset = TensorDataset(test_x, test_y)
test_data_loader = DataLoader(test_dataset, batch_size=64)

# 开始训练
epochs = 100  # 训练多少次
model, optim = Model.get_model(0.0001)  # 获取模型
loss_fn = nn.BCELoss()  # 定于损失函数，分类问题使用BCELoss()
for epoch in range(epochs):  # 开始训练
    for x, y in train_hr_dataLoader:
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        optim.zero_grad()  # 梯度置零
        loss.backward()  # 反向传播
        optim.step()  # 优化
    with torch.no_grad():  # 打印损失值
        print('epoch: ', epoch, 'loss:  ', loss_fn(model(train_x), train_y).data.item())
