# train.py
import os
import gc
import torch
import time
from torch.utils.data import DataLoader
from tqdm import tqdm
from src.module import Transformer
from dataset import InMemoryTranslationDataset
from engine.trainer import BaseTrainer
import ijson
from src.tune import HyperparameterTune
from utils.plotting import Plotting
import copy
from src.validate import TranslationValidator


def get_next_run_dir(base_dir='../runs'):
    os.makedirs(base_dir, exist_ok=True)
    existing = [d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d)) and d.startswith('run')]
    run_ids = [int(d[3:]) for d in existing if d[3:].isdigit()]
    next_id = max(run_ids, default=0) + 1
    return os.path.join(base_dir, f'run{next_id}')


run_dir = get_next_run_dir()


class TransformerTrainer(BaseTrainer):
    def __init__(self, config_path, run_dir):
        super().__init__(config_path)
        self.run_dir = run_dir
        self._setup_dirs()
        # 初始化Transformer模型
        self.model = self._init_model()
        # 初始化Tuner
        self.tuner = HyperparameterTune(self.model, self.train_dataloader, self.test_dataloader, self.config)
        # 获取优化器(Tuner负责管理)
        self.optimizer = self.tuner.optimizer
        self.scheduler = torch.optim.lr_scheduler.LambdaLR(
            self.optimizer,
            lr_lambda=lambda step: min((step+1)/4000, 1.0)  # 4000步warmup
        )
        # 损失函数
        self.criterion = torch.nn.CrossEntropyLoss(label_smoothing=0.1, ignore_index=0)
        self.validator = TranslationValidator(
            model=self.model,
            dataloader=self.test_dataloader,
            criterion=self.criterion,
            device=self.device,
            tokenizer=self.tgt_vocab
        )
        self.best_model = None
        self.best_score = float('-inf')
        self.plotter = Plotting(self.run_dir)

    def _setup_dirs(self):
        self.checkpoints_dir = os.path.join(self.run_dir, 'checkpoints')
        os.makedirs(self.checkpoints_dir, exist_ok=True)

    def _build_vocab(self):
        """构建词汇表"""

        def build_vocab(files, vocab):
            for file in files:
                with open(file, "rb") as f:
                    for item in ijson.items(f, 'item'):
                        if isinstance(item, dict) and "english" in item and "chinese" in item:
                            vocab.add_sentence(item["english"])
                            vocab.add_sentence(item["chinese"])

        build_vocab([self.config['data']['src_train_file'], self.config['data']['src_test_file']], self.src_vocab)
        build_vocab([self.config['data']['tgt_train_file'], self.config['data']['tgt_test_file']], self.tgt_vocab)

        # 传递正确的参数
        self.src_vocab.build(sentences=self.src_vocab.word_count.keys())
        self.tgt_vocab.build(sentences=self.tgt_vocab.word_count.keys())

    def _init_model(self):
        return Transformer(
            len(self.src_vocab.word2idx), len(self.tgt_vocab.word2idx),
            d_model=self.config['d_model'], num_heads=self.config['num_heads'],
            num_layers=self.config['num_layers'], d_ff=self.config['d_ff'],
            dropout=self.config['dropout']
        ).to(self.device)

    def _init_dataloaders(self):
        """初始化数据加载器"""

        def create_dataloader(file_path):
            dataset = InMemoryTranslationDataset(file_path, self.src_vocab, self.tgt_vocab,
                                                 max_len=self.config['max_len'])
            return DataLoader(dataset, batch_size=self.config['batch_size'], shuffle=True, num_workers=4,
                              collate_fn=InMemoryTranslationDataset.collate_fn)

        return create_dataloader(self.config['data']['src_train_file']), create_dataloader(
            self.config['data']['src_test_file'])

    def save_best_model(self, score, save_path, mode='min'):
        """
        Save the best model if it improves.
        :param score: 当前得分（val_loss 或 BLEU 等）
        :param save_path: 最佳模型保存路径
        :param mode: 'min'（越小越好，比如 loss）或 'max'（越大越好，比如 BLEU）
        """
        is_better = (score < self.best_score) if mode == 'min' else (score > self.best_score)
        if is_better:
            self.best_score = score
            self.best_model = copy.deepcopy(self.model.state_dict())
            torch.save(self.best_model, save_path)
            tqdm.write(f"Best model saved with score: {score}")

    def get_predictions(self):
        self.model.eval()
        true_labels, predicted_labels = [], []
        with torch.no_grad():
            for batch in self.test_dataloader:
                src, tgt = batch['src'].to(self.device), batch['tgt'].to(self.device)
                output = self.model.generate(src, self.tgt_vocab)
                true_labels.extend(tgt.cpu().numpy().tolist())
                predicted_labels.extend(output.cpu().numpy().tolist())

        return true_labels, predicted_labels

    def train(self):
        gc.collect()
        torch.cuda.empty_cache()

        epoch_losses, val_losses, bleu_scores, train_times, learning_rates = [], [], [], [], []
        for epoch in range(self.config['num_epochs']):
            start_time, total_loss = time.time(), 0
            self.model.train()
            pbar = tqdm(self.train_dataloader, desc=f'Epoch {epoch + 1} Training', total=len(self.train_dataloader))

            # 训练过程
            gradients_dict = {}  # 用于存储梯度信息
            for batch_idx, batch in enumerate(pbar):
                src, tgt = batch['src'].to(self.device), batch['tgt'].to(self.device)
                src_mask, tgt_mask = self.validator.generate_masks(src, tgt)
                output = self.model(src, tgt[:, :-1], src_mask, tgt_mask[:, :, :-1, :-1])

                loss = self.criterion(output.view(-1, output.size(-1)), tgt[:, 1:].reshape(-1))

                self.optimizer.zero_grad()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
                self.optimizer.step()
                self.scheduler.step()  # 学习率将在前4000个训练step中线性warmup, 之后保持不变，有助于训练初期稳定收敛
                # 收集梯度信息
                for name, param in self.model.named_parameters():
                    if param.grad is not None:
                        gradients_dict[name] = param.grad  # 存储每个参数的梯度
                total_loss += loss.item()
                pbar.set_postfix({'train_loss': loss.item()})

            # 验证
            avg_loss, bleu_score = self.validator.validate()

            # 记录训练时间和学习率
            epoch_time = time.time() - start_time
            current_lr = self.optimizer.param_groups[0]['lr']

            # 更新超参数
            self.tuner.update_hyperparameters({"learning_rate": max(1e-5, self.config['learning_rate'] * 0.98)})
            torch.save(self.model, os.path.join(self.checkpoints_dir, f'epoch_{epoch + 1}.pt'))

            # 保存最佳模型
            self.save_best_model(avg_loss, save_path=os.path.join(self.run_dir, "best_model.pt"), mode='min')
            # 记录指标
            avg_train_loss = total_loss / len(self.train_dataloader)
            epoch_losses.append(avg_train_loss)
            val_losses.append(avg_loss)
            bleu_scores.append(bleu_score)
            train_times.append(epoch_time)
            learning_rates.append(current_lr)
            # **更新 Plotting 记录**
            self.plotter.update(
                epoch_losses=epoch_losses,
                bleu_scores=bleu_scores,
                train_losses=epoch_losses,
                val_losses=val_losses,
                train_accuracies=[],
                val_accuracies=[],
                train_times=train_times,
                learning_rates=learning_rates
            )

        # **保存最终可视化结果**
        # 获取预测标签和真实标签（用于混淆矩阵）
        true_labels, predicted_labels = self.get_predictions()
        label_list = self.tgt_vocab.idx2word
        self.plotter.save(self.model, true_labels, predicted_labels, label_list)


if __name__ == '__main__':
    trainer = TransformerTrainer(r'D:\FengGong_Project\Machine_Trans\config\config.yaml', run_dir=run_dir)
    trainer.train()



