import yaml
import torch
from utils.logger import Logger
from src.dataset import Vocabulary


class BaseTrainer:
    def __init__(self, config_path):
        self.logger = Logger("training.log").get_logger()
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.logger.info(f'Using device: {self.device}')

        with open(config_path, 'r', encoding='utf-8') as f:
            self.config = yaml.safe_load(f)

        # self.logger.info("Starting training with config:")
        # for k, v in self.config.items():
        #     self.logger.info(f'{k}: {v}')

        self.src_vocab, self.tgt_vocab = Vocabulary(), Vocabulary()
        self._build_vocab()

        self.model = self._init_model()
        # self.criterion = self._init_criterion()
        self.optimizer = self._init_optimizer()

        self.train_dataloader, self.test_dataloader = self._init_dataloaders()

    def _build_vocab(self):
        raise NotImplementedError

    def _init_model(self):
        raise NotImplementedError

    def _init_dataloaders(self):
        raise NotImplementedError

    # def _init_criterion(self):
    #     # 计算损失时进行标签平滑
    #     return nn.CrossEntropyLoss(ignore_index=0, label_smoothing=0.05)

    def _init_optimizer(self):
        return torch.optim.AdamW(self.model.parameters(), lr=self.config['learning_rate'], weight_decay=0.01)

    def train(self):
        raise NotImplementedError
