from typing import Any, Optional

import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torch.amp import GradScaler, autocast
from torchmetrics import Accuracy, F1Score, Recall
# from sklearn.metrics import f1_score, recall_score, accuracy_score
import tqdm
import swanlab

from dataset import TelecomFraudDataset, TelecomFraudData
from config import Config
from model import FraudClassifier

run = swanlab.init(
    project="transformer_telecom_fraud_texts",
    config={
        'optimizer': Config.Train.optimizer,
        'base_model': Config.Train.model_name,
        'model': FraudClassifier
    }
)

class TrainTask:
    def __init__(self,
                 epochs: int = Config.Train.epochs,
                 batch_size: int = Config.Train.batch_size,
                 device: Optional[Any] = None):
        self.epochs = epochs
        self.batch_size = batch_size

        if device is None:
            if torch.cuda.is_available():
                device = 'cuda'
            elif hasattr(torch, 'xpu') and torch.xpu.is_available():
                device = 'xpu'
            else:
                device = 'cpu'

        self.device = torch.device(device) if isinstance(device, str) else device
        self.model = FraudClassifier().to(self.device)

        self.loss_fn = nn.CrossEntropyLoss().to(device)
        self.acc = Accuracy(task='multiclass', num_classes=5).to(self.device)
        self.f1 = F1Score(task='multiclass', num_classes=5, average='macro').to(self.device)
        self.recall = Recall(task='multiclass', num_classes=5, average='macro').to(device)
        # 配置优化器
        optimizer = {
            'AdamW': optim.AdamW,
            'Adam': optim.Adam
        }[Config.Train.optimizer]
        lr = Config.Train.learning_rate_config[Config.Train.optimizer]
        self.optimizer = optimizer(params=self.model.parameters(), lr=lr)
        self.scaler = GradScaler()

        self.train_dataloader = DataLoader(TelecomFraudDataset('train'), batch_size=batch_size)
        self.test_dataloader = DataLoader(TelecomFraudDataset('test'), batch_size=batch_size)

        self.all_test_data = []

        for encoding in self.test_dataloader:
            input_ids = encoding['input_ids'].to(self.device)
            attention_mask = encoding['attention_mask'].to(self.device)
            labels = encoding['labels'].to(self.device)

            self.all_test_data.append((input_ids, attention_mask, labels))


    def train_epoch(self, epoch: int) -> tuple[float, float]:
        self.model.train()
        avg_loss, avg_acc = [], []
        for step, encoding in enumerate(
                tqdm.tqdm(self.train_dataloader, desc=f'[train] epoch: {epoch}/{self.epochs}', mininterval=0.1)):
            self.optimizer.zero_grad()

            encoding: TelecomFraudData = encoding

            input_ids = encoding['input_ids'].to(self.device)
            attention_mask = encoding['attention_mask'].to(self.device)
            labels = encoding['labels'].to(self.device)

            step += 1
            acc, pred = -1, []

            if Config.Train.amp != 'float16':
                # 单精度或bfloat16训练
                pred = self.model(input_ids=input_ids, attention_mask=attention_mask)
                loss = self.loss_fn(pred, labels)
                loss.backward()
                self.optimizer.step()
            else:
                # 半精度训练
                with autocast(device_type=self.device.type ,dtype=torch.float16):
                    pred = self.model(input_ids=input_ids, attention_mask=attention_mask)
                    loss = self.loss_fn(pred, labels)

                self.scaler.scale(loss).backward()
                self.scaler.step(self.optimizer)
                self.scaler.update()

            # 梯度裁剪
            nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)

            acc = self.acc(torch.argmax(pred, dim=1), torch.argmax(labels, dim=1))

            avg_loss.append(float(loss.item()))
            avg_acc.append(float(acc.item()))

            swanlab.log({
                'train_loss': loss,
                'train_acc': acc,
                'train_avg_loss': sum(avg_loss) / step,
                'train_avg_acc': sum(avg_acc) / step
            })

        return sum(avg_loss) / len(self.train_dataloader), sum(avg_acc) / len(self.train_dataloader)

    def train(self):
        self.model.train()
        self.acc.reset()

        for epoch in range(self.epochs):
            epoch += 1
            train_avg_loss, train_avg_acc = self.train_epoch(epoch)
            test_avg_loss, test_avg_acc, test_avg_f1, test_avg_recall = self.test_epoch(epoch)

            swanlab.log({
                'train_epoch_loss': train_avg_loss,
                'train_epoch_acc': train_avg_acc,
                'test_epoch_loss': test_avg_loss,
                'test_epoch_acc': test_avg_acc,
                'test_epoch_f1': test_avg_f1,
                'test_epoch_recall': test_avg_recall
            })

    def test_epoch(self, epoch: int) -> tuple[float, float, float, float]:
        self.model.eval()

        self.acc.reset()
        self.f1.reset()
        self.recall.reset()

        avg_loss = []
        for step, (input_ids, attention_mask, labels) in enumerate(
                tqdm.tqdm(self.all_test_data, desc=f'[test] epoch: {epoch}/{self.epochs}', mininterval=0.1)):
            step += 1

            with torch.no_grad():
                pred = self.model(input_ids=input_ids, attention_mask=attention_mask)
                loss = self.loss_fn(pred, labels)

                labels_flatten = torch.argmax(labels, dim=1)
                pred_flatten = torch.argmax(pred, dim=1)

                avg_loss.append(float(loss.item()))

                self.f1.update(pred_flatten, labels_flatten)
                self.recall.update(pred_flatten, labels_flatten)
                self.acc.update(pred_flatten, labels_flatten)

                # print()
                # print(*map(lambda x: x(pred_flatten, labels_flatten), (self.f1, self.recall, self.acc)))
                # print(f1_score(labels_flatten.cpu().numpy(), pred_flatten.cpu().numpy(), average='macro'),
                #       recall_score(labels_flatten.cpu().numpy(), pred_flatten.cpu().numpy(), average='macro'),
                #       accuracy_score(labels_flatten.cpu().numpy(), pred_flatten.cpu().numpy()))

        result_loss = sum(avg_loss) / len(self.all_test_data)
        result_acc, result_f1, result_recall = map(lambda x: x.compute(), (self.acc, self.f1, self.recall))
        print(f'测试集指标 准确率: {result_acc:.4f} F1: {result_f1:.4f} 召回率{result_recall:.4f}')
        return result_loss, result_acc, result_f1, result_recall

            
if __name__ == '__main__':
    print('开始训练')
    task = TrainTask()
    task.train()
    # task.test_epoch(1)
    print('训练完成')
