import pandas as pd
import torch
from torch.utils.tensorboard import SummaryWriter
from tqdm import *
from transformers import BertModel, BertTokenizer
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import torch.nn as nn
from Classifier import Classifier
import numpy as np
from sklearn.utils.class_weight import compute_class_weight
import warnings
warnings.filterwarnings("ignore")


# 定义tensorboard保存日志的文件夹路径
writer = SummaryWriter('../logs')
# 自动选择cup还是gpu
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 加载数据
df = pd.read_csv('../datasets/外卖.csv')
print(df.shape)
print(df['txt'].sample(5))
text = df['txt'].values
labels = df['label'].values.astype('int')

# 定义标签编码器
le = LabelEncoder()

# 数据清洗
'''
    数据本来就很干净，这里先不写了
'''

# 数据预处理数据
model_name = '../bert-base-chinese'
model_path = '../bert-base-chinese'
tokenizer = BertTokenizer.from_pretrained(model_name)
# 创建一个空列表来保存整数序列
sent_id = []

# 遍历每个推文
for i in tqdm(range(len(text))):
    encoded_sent = tokenizer.encode(text[i],
                                    add_special_tokens=True,
                                    max_length=100,
                                    truncation=True,
                                    pad_to_max_length='right')
    # 将整数序列保存到列表中
    sent_id.append(encoded_sent)

# print("sent_id[0]:", sent_id[0])
# print("sent_id[1]:", sent_id[1])
# print("ids_to_tokens[0]:", tokenizer.convert_ids_to_tokens(sent_id[0]))
# print("ids_to_tokens[1]:", tokenizer.convert_ids_to_tokens(sent_id[1]))

# 创建注意力掩码
attention_masks = []
for sent in sent_id:
    att_mask = [int(token_id > 0) for token_id in sent]
    attention_masks.append(att_mask)

'''
sent_id是二维列表,第二维是整数序列，长度是编码设置的长度（max_length）
'''
# print("sent_id的长度:",len(sent_id))
# print("attention_masks的长度:",len(attention_masks))

# 划分数据集
# 输入的sent_ids
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(sent_id, labels, random_state=2024,
                                                                                    test_size=0.3, stratify=labels)

# 输入的注意力掩码
train_masks, validation_masks, _, _ = train_test_split(attention_masks, labels, random_state=2024,
                                                       test_size=0.3,stratify=labels)


# 利用Dataset等设置批次batch
# 对于在特定任务上微调 BERT，作者建议使用批量大小为 16 或 32。
# 定义批量大小
batch_size = 32

# 创建训练集的DataLoader
# 将张量封装成数据集
train_data = TensorDataset(torch.tensor(train_inputs), torch.tensor(train_masks), torch.tensor(train_labels))
# 随机采样器
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)

# 创建验证集的 DataLoader
validation_data = TensorDataset(torch.tensor(validation_inputs), torch.tensor(validation_masks), torch.tensor(validation_labels))
# 顺序采样器
validation_sampler = SequentialSampler(validation_data)
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)

# 创建一个迭代器对象
iterator = iter(train_dataloader)
# 加载批量数据
sent_id, mask, target = iterator.__next__()
# print(type(sent_id),type(mask),type(target))
# print(sent_id.shape,mask.shape,target.shape)


# 加载bert预训练模型
bert_model = BertModel.from_pretrained(model_path)
outputs = bert_model(sent_id, attention_mask=mask, return_dict=False)
'''
隐藏状态是指在特定时间步或层中，隐藏层中每个神经元的激活值。
在循环神经网络（RNN）中，隐藏状态是网络在处理序列数据时，传递到下一个时间步的信息。
在RNN中，隐藏状态是动态的，因为它随着序列的每个元素而更新。
在Transformer模型中，
每个标记（token）的隐藏状态是在自注意力层和前馈网络层处理后得到的表示
'''
# outputs[0]对应于模型最后一层的隐藏状态，这是模型对输入词汇的深层表示
hidden_states = outputs[0]
# outputs[1]对应于第一个令牌（通常是分类标记[CLS]）的隐藏状态
CLS_hidden_state = outputs[1]
# print("outputs:",len(outputs))
# print("outputs[0]:",outputs[0].shape)
# print("outputs[1]--[CLS]:",outputs[1].shape)

'''
以下代码
用于在基于PyTorch的BERT模型中冻结所有参数，使得在训练过程中这些参数不会被更新
'''
for param in bert_model.parameters():
    param.requires_grad = False
model = Classifier(bert_model)
# 如果可用，将模型推送到 GPU
model = model.to(device)
# 将张量推送到 GPU
sent_id = sent_id.to(device)
mask = mask.to(device)
target = target.to(device)
outputs = model(sent_id, mask)
# Adam 优化器
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# 计算类别权重
class_weights = compute_class_weight(class_weight="balanced", classes=np.unique(labels), y=labels)
print("Class Weights:", class_weights)
print("即使我全部选一个的最高正确率：{:.2f}".format(float(np.max(class_weights)/np.sum(class_weights))))
weights = torch.tensor(class_weights, dtype=torch.float)
# 将权重输到 GPU
weights = weights.to(device)
# 定义损失函数
cross_entropy = nn.NLLLoss(weight=weights)

# 计算损失
loss = cross_entropy(outputs, target.long())
# print("Loss:", loss)
# 将初始损失设为无穷大
best_valid_loss = float('inf')
'''
在注意力掩码中，我们的输入是0和1，但是在最终的计算时，会将在将无效位置的注意力权重设置为一个很小的值，通常为负无穷（-inf），以便在计算注意力分数时将其抑制为接近零的概率。
'''


'''
---------------训练流程-------------
'''
# 创建一个空列表来存储每个 epoch 的训练和验证损失
train_losses = []
valid_losses = []

epochs = 10


# 对于每个 epoch
for epoch in range(epochs):
    print("----------第",epoch+1,"轮次-----------")
    model.train()
    # 将损失和准确率初始化为 0
    train_total_loss, total_accuracy = 0, 0
    # 创建一个空列表以保存模型的预测结果
    total_preds = []
    # 对于每个 batch
    loop_train = tqdm((train_dataloader), total=len(train_dataloader))
    for step, batch in enumerate(loop_train):
        batch = tuple(t.to(device) for t in batch)
        sent_id, mask, labels = batch
        model.zero_grad()
        preds = model(sent_id, mask)
        loss = cross_entropy(preds, labels.long())
        train_total_loss = train_total_loss + loss.item()
        loss.backward()
        optimizer.step()
        preds = preds.detach().cpu().numpy()
        total_preds.append(preds)

    avg_loss = train_total_loss / len(train_dataloader)
    total_preds = np.concatenate(total_preds, axis=0)
    writer.add_scalar('train_total_loss', train_total_loss, epoch+1)
    train_loss = avg_loss


    # 模型评估
    model.eval()
    # 将损失和准确率初始化为 0
    val_total_loss, total_accuracy = 0, 0
    # 创建一个空列表以保存模型的预测结果
    total_preds = []
    # 创建空字典保存labels计算准确率
    acc_labels = []
    # 对于每个 batch
    loop_valid = tqdm((validation_dataloader), total=len(validation_dataloader))
    for step, batch in enumerate(loop_valid):
        batch = tuple(t.to(device) for t in batch)
        sent_id, mask, labels = batch
        # print("labels:",labels.tolist())
        acc_labels.append(labels.tolist())
        with torch.no_grad():
            preds = model(sent_id, mask)
            loss = cross_entropy(preds, labels.long())
            val_total_loss = val_total_loss + loss.item()
            preds = preds.detach().cpu().numpy()
            total_preds.append(preds)
    avg_loss = val_total_loss / len(validation_dataloader)
    total_preds = np.concatenate(total_preds, axis=0)
    writer.add_scalar('val_loss', val_total_loss, epoch+1)
    valid_loss = avg_loss
    total_preds = total_preds
    acc_labels = acc_labels


    # 保存最佳模型
    if valid_loss < best_valid_loss:
        best_valid_loss = valid_loss
        torch.save(model.state_dict(), 'saved_weights_外卖.pt')

    # 累积训练和验证损失
    train_losses.append(train_loss)
    valid_losses.append(valid_loss)
    label_acc = 0
    acc_totle = 0
    print(f'\n训练损失: {train_loss:.3f}')
    print(f'验证损失: {valid_loss:.3f}')
    accuracy = 0
    totle = []
    for i in acc_labels:
        for x in i:
            totle.append(x)

    # 预测正确的个数
    accuracy = torch.tensor(total_preds).argmax(1).eq(torch.tensor(totle)).sum().item()
    from sklearn.metrics import confusion_matrix

    cm = confusion_matrix(np.argmax(total_preds, axis=1), np.array(totle))
    print("混淆矩阵：",cm)
    print(f'准确率: {accuracy/len(total_preds):.2f}')

print("")
print("训练完成！")