from datetime import datetime

import torch
from keyring.core import load_config
from sklearn.metrics import f1_score, accuracy_score, recall_score
from torch import nn
from tqdm import tqdm
from transformers import BertTokenizer, BertModel
from torch.optim.adamw import AdamW

import warnings

from src.Commons.base_config_loader import BaseConfigLoader
from src.Commons.data_util import replace_underscore_and_dot
from src.process_data.bert_process_data import use_get_dataloader

warnings.filterwarnings("ignore")
base_config = BaseConfigLoader()
model_bert_src_path = base_config.get("system.resources.bert.models.model_bert_src_path")
bert_model = BertModel.from_pretrained(model_bert_src_path)

class BertClassifierModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.bert = bert_model
        self.linear = nn.Linear(self.bert.config.hidden_size, 10)

        # 冻结Bert模型核心参数
        for param in self.bert.parameters():
            param.requires_grad = False

    def forward(self, input_ids, attention_mask):
        """
        :param input_ids: 输入序列的ID张量，形状为(batch_size, seq_len)
        :param attention_mask: 注意力掩码张量，形状为(batch_size, seq_len)
        :return: 分类结果张量，形状为(batch_size, num_classes)
        """
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        # 取BERT的pooler_output([CLS token的隐藏状态，经过一层全连接+Tanh激活函数])
        # print(f"outputs.pooler_output:{outputs.pooler_output}, outputs.pooler_output.shape:{outputs.pooler_output.shape}")
        logits = self.linear(outputs.pooler_output)
        return logits

def get_bert_model():
    tokenizer = BertTokenizer.from_pretrained(model_bert_src_path)
    tokenizer_output = tokenizer.encode_plus(
        ["程序员的时间，是一个很重要的问题","我爱中国这边热土"],
        padding="max_length",
        truncation=True,
        max_length=5,
        return_tensors="pt",
        return_special_tokens_mask=True,
    )
    input_ids = tokenizer_output["input_ids"]
    attention_mask = tokenizer_output["attention_mask"]
    model = BertClassifierModel()
    logits = model(input_ids, attention_mask)
    print(f"logits:{logits}, logits.shape:{logits.shape}")

    probs = logits.softmax(dim=-1)
    print(f"probs:{probs}, probs.shape:{probs.shape}")

    return logits

def model_train(model, dataloader, device,model_save_path):
    """模型训练"""
    model.to(device)
    # 无需重复冻结bert底层参数
    # for param in model.bert.parameters():
    #     param.requires_grad = True
    optimizer = AdamW(
        filter(lambda p: p.requires_grad, model.parameters()),
        lr=2e-3
    )
    criterion = nn.CrossEntropyLoss()
    Epoches = 2
    # 用于记录最佳模型的F1值
    bert_f1 = 0.0
    start_time = datetime.now()
    print(f"模型训练开始时间:{start_time}, 共:{Epoches - 1} 轮次；{len(dataloader)} 批次，每批样本数：{dataloader.batch_size}")
    for epoch in range(1, Epoches):
        model.train()
        batch_loss = 0.0
        batch_num = 0
        batch_probs = []
        batch_labels = []
        idx = 1
        for batch_idx, (input_ids, attention_mask, labels) in enumerate(
                tqdm(dataloader, desc=f"bert开始训练，第{epoch}轮_第{idx}批次, 当前时刻：{datetime.now()}"), start=1):
            idx += 1
            input_ids = input_ids.to(device)
            attention_mask = attention_mask.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            logits = model(input_ids, attention_mask)
            loss = criterion(logits, labels)

            batch_loss += loss.item() * len(input_ids)
            batch_num += 1
            batch_probs.extend(logits.argmax(dim=-1).cpu().tolist())
            batch_labels.extend(labels.cpu().tolist())

            loss.backward()
            optimizer.step()

            #每100批次或者最后一个批次，计算一次指标，当f1值不再更新，保存模型
            if batch_idx % 100 == 0 or batch_idx == len(dataloader) - 1:
                print(f"当前{epoch} 轮次, 批次:{batch_idx}, 批次总损失:{batch_loss:.4f}")
                # 计算当前批次的F1值，准确率、召回率、平均损失
                f1 = round(f1_score(batch_labels, batch_probs, average='macro'),3)
                acc = round(accuracy_score(batch_labels, batch_probs), 3)
                recall = round(recall_score(batch_labels, batch_probs, average='macro'), 3)
                loss_avg = round(batch_loss / batch_num, 3)
                print(f"当前{epoch} 轮次, 批次:{batch_idx}, 批次F1值:{f1}, 准确率:{acc}, 召回率:{recall}, 平均损失:{loss_avg}")

                # 当前批次的指标重置
                batch_loss = 0.0
                batch_num = 0
                batch_probs = []
                batch_labels = []

                # 保存模型
                if f1 > bert_f1:
                    bert_f1 = f1
                    model_save_path = replace_underscore_and_dot(model_save_path, datetime.now().strftime("%Y%m%d"))
                    base_config.set("system.resources.bert.models.model_bert_path", model_save_path)
                    torch.save(model.state_dict(), model_save_path)
                    print(f"当前{epoch} 轮次, 批次:{batch_idx}, 模型最佳F1:{bert_f1}，模型已保存")

def use_model_train():
    model = BertClassifierModel()
    train_dataloader, test_dataloader, dev_dataloader = use_get_dataloader()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model_bert_path = base_config.get("system.resources.bert.models.model_bert_path")
    model_train(model, train_dataloader, device, model_bert_path)


def rf_predict(context: str):
    pass


def _rf_predict(context: str, config: BaseConfigLoader, model_rf_path: str, model_tfidf_vocab_path: str, class_path: str):

    pass



if __name__ == '__main__':
    # get_bert_model()

    # use_model_train()

    model_bert_path = base_config.get("system.resources.bert.models.model_bert_path")
    # print(f"model_save_path:{model_bert_path}")
    # model_save_path = replace_underscore_and_dot(model_bert_path, datetime.now().strftime("%Y%m%d"))
    # print(f"model_save_path:{model_save_path}")
    # base_config.set("system.resources.bert.models.model_bert_path", model_save_path)
