import random
import pandas as pd
from collections import Counter
import torch
import torch.nn as nn
from bert_chinese_encode import get_bert_encode_for_single
from rnn_model import RNN
from torch.optim import SGD

# 读取数据
train_data_path = "./train_data.csv"
train_data= pd.read_csv(train_data_path, header=None, sep="\t")

# 打印正负标签比例
# print(dict(Counter(train_data[0].values)))

# 转换数据到列表形式
train_data = train_data.values.tolist()
# print(train_data[:10])

# 学习率为0.005
learning_rate = 0.005

# 定义参数
input_size = 768  # bert模型输出的维度
hidden_size = 128  # 自定义的
n_categories = 2  # 类别数量

rnn = RNN(input_size, hidden_size, n_categories)  # 实例化模型
# outputs, hidden = rnn(input, hidden)
optimizer = SGD(rnn.parameters(), lr=learning_rate)

# 选取损失函数为NLLLoss()
# CrossEntropyLoss就是把以上Softmax–Log–NLLLoss
criterion = nn.NLLLoss()


def train(category_tensor, text_tensor):
    """
    每次训练一个样本
    :param category_tensor: 真实标签
    :param text_tensor: 文本张量
    :return:
    """
    hidden = rnn.init_hidden()
    rnn.zero_grad()

    for i in range(text_tensor.size()[1]):
        output, hidden = rnn(text_tensor[0][i].unsqueeze(0), hidden)

    loss = criterion(output, category_tensor)
    loss.backward()
    optimizer.step()

    return output, loss.item()


def random_train_example(train_data):
    """
    接收原始样本，并且转成张量
    :param train_data:
    :return:
    """
    category, text= random.choice(train_data)
    text_tensor = get_bert_encode_for_single(text)

    category_tensor = torch.tensor([int(category)])

    return category, text, category_tensor, text_tensor

if __name__ == '__main__':
    category, text, category_tensor, text_tensor = random_train_example(train_data)

    print(train(category_tensor, text_tensor))