# -*- coding: utf-8 -*-
from settings.path import *
import torch
from transformers.models import BertModel, BertTokenizer, BertConfig
from tqdm import tqdm

from torch.utils.data import DataLoader, Dataset

import torch.nn as nn

import torch.optim as optim

from sklearn.metrics import classification_report, f1_score, accuracy_score, precision_score

from torch.optim import AdamW

if not os.path.exists(path_model):
    os.mkdir(path_model)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class_list = [line.strip() for line in open(path_class_txt, encoding='utf-8')]

num_classes = len(class_list)
num_epochs = 2
batch_size = 128
pad_size = 32  # 每句话处理成长度
learning_rate = 5e-5  # 学习率
bert_model = BertModel.from_pretrained(path_bert_base_chinese)  # 预训练模型
tokenizer = BertTokenizer.from_pretrained(path_bert_base_chinese)  # 分词器
bert_config = BertConfig.from_pretrained(path_bert_base_chinese)  # 训练配置
hidden_size = 768


def load_raw_data(path):
    data = []
    with open(path, 'r', encoding="utf-8") as f:
        for line in tqdm(f, desc='Loading data'):
            line = line.strip()
            # print(line)
            if not line:
                continue
            text, label = line.split('\t')
            data.append((text, int(label)))

    return data


class TextDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        x = self.data[idx][0]
        y = self.data[idx][1]
        return x, y


def collate_fn(batch):
    texts = [item[0] for item in batch]
    labels = [item[1] for item in batch]
    # 批量分词，自动添加[CLS] 和 [SEP] add_special_tokens #padding ,统一处理
    text_tokens = tokenizer.batch_encode_plus(texts,
                                              padding='max_length',
                                              max_length=pad_size,
                                              truncation=True)
    token_ids_list = text_tokens['input_ids']
    token_attention_mask_list = text_tokens['attention_mask']
    input_ids = torch.tensor(token_ids_list)
    attention_mask = torch.tensor(token_attention_mask_list)
    labels = torch.tensor(labels)
    return input_ids, attention_mask, labels


def bert_dataloader():
    train_data = load_raw_data(path_train_txt)
    test_data = load_raw_data(path_test_txt)
    dev_data = load_raw_data(path_dev_txt)
    # 创建 Dataset
    train_dataset = TextDataset(train_data)
    test_dataset = TextDataset(test_data)
    dev_data = TextDataset(dev_data)
    # 创建 DataLoader
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
    dev_dataloader = DataLoader(dev_data, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
    return train_dataloader, test_dataloader, dev_dataloader


class BertClassifier(nn.Module):
    def __init__(self):
        super().__init__()
        self.bert = bert_model
        self.fc = nn.Linear(hidden_size, num_classes)

    def forward(self, input_ids, attention_mask):
        output = self.bert(input_ids, attention_mask=attention_mask,
                           return_dict=True)
        pooled = output.pooler_output
        out = self.fc(pooled)
        return out


def bert_train():
    train_dataloader, test_dataloader, dev_dataloader = bert_dataloader()
    model = BertClassifier()
    model.to(device)
    optimizer = AdamW(model.parameters(), lr=learning_rate)
    criterion = nn.CrossEntropyLoss()
    # 初始化最佳验证F1分数，用于保存性能最好的模型
    best_dev_f1 = 0.0
    for epoch in range(num_epochs):

        totoal_loss = 0  # 累计训练损失
        train_preds, train_labels = [], []  # 存储训练集预测和真实标签
        for batch in tqdm(train_dataloader, desc=f'Bert Classifier Training Epoch{epoch + 1}/{num_epochs}'):
            model.train()
            input_ids, attention_mask, labels = batch
            input_ids = input_ids.to(device)
            attention_mask = attention_mask.to(device)
            labels = labels.to(device)
            # 前向传播和模型预测
            logits = model(input_ids, attention_mask)
            loss = criterion(logits, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            totoal_loss += loss.item()
            preds = torch.argmax(logits, dim=-1)
            train_preds.extend(preds.tolist())
            train_labels.extend(labels.tolist())
            if len(batch) % 100 == 0:
                print(f"Epoch {epoch + 1}/{num_epochs}")
                print(f"Train Loss: {totoal_loss / len(train_dataloader):.4f}")
                eport, f1score, accuracy, precision = model2dev(model, dev_dataloader, device)
                print(f"Dev F1: {f1score:.4f}")
                print(f"Dev Accuracy: {accuracy:.4f}")
                if f1score > best_dev_f1:
                    best_dev_f1 = f1score
                    torch.save(model.state_dict(), path_modol_random_tree_vectorizer)
                    print('模型保存成功！')
        train_report = classification_report(train_labels, train_preds,
                                             target_names=class_list, output_dict=True)

        print(train_report)


def model2dev(model, dataloader, device):
    model.to(device)

    # 初始化列表，存储预测结果和真实标签
    preds, true_labels = [], []

    # 3. 禁用梯度计算以提高效率并减少内存占用
    with torch.no_grad():
        # 4. 遍历数据加载器，逐批次进行预测
        for batch in tqdm(dataloader, desc="Bert Classifier Evaluating ......"):
            # 4.1 提取批次数据并移动到设备
            input_ids, attention_mask, labels = batch
            input_ids = input_ids.to(device)
            attention_mask = attention_mask.to(device)
            labels = labels.to(device)

            # 4.2 前向传播：模型预测
            logits = model(input_ids, attention_mask)

            # 4.3 获取预测结果（最大 logits 对应的类别）
            batch_preds = torch.argmax(logits, dim=1)

            # 4.4 存储预测和真实标签
            preds.extend(batch_preds.cpu().numpy())
            true_labels.extend(labels.cpu().numpy())

    # 5. 计算分类报告、F1 分数、准确度和精确度
    report = classification_report(true_labels, preds)
    f1score = f1_score(true_labels, preds, average='micro')  # 使用微平均计算 F1 分数
    accuracy = accuracy_score(true_labels, preds)  # 计算准确度
    precision = precision_score(true_labels, preds, average='micro')  # 使用微平均计算精确度

    # 6. 返回评估结果
    return report, f1score, accuracy, precision
