import os
from data import get_data
from eval import get_clf_metrics
from config import get_config
from model import Bert_FNN, Bert_GRU, Bert_LSTM, Bert_BiLSTM, Bert_RNN
from transformers import AutoTokenizer, AutoModel, get_scheduler
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm.auto import tqdm
from utils import StopWatch

class Model(nn.Module):
    def __init__(self, args, logger, datasets_path: str = None, model_path: str = None):
        super(Model, self).__init__()
        self.args = args
        self.logger = logger
        self.datasets_path = datasets_path
        self.model_path = model_path

        # 创建基座模型
        if args.base_model == 'bert':
            self.tokenizer, self.base_model = self._process_model(
                args.base_model, model_path)
        elif args.base_model == 'distilbert':
            self.tokenizer, self.base_model = self._process_model(
                args.base_model, model_path)
        else:
            return ValueError("暂不支持该模型")

        # 创建不同方法
        if args.method == 'fnn':
            self.model = Bert_FNN(self.base_model, args.num_classes)
        elif args.method == 'gru':
            self.model = Bert_GRU(self.base_model, args.num_classes)
        elif args.method == 'lstm':
            self.model = Bert_LSTM(self.base_model, args.num_classes)
        elif args.method == 'bilstm':
            self.model = Bert_BiLSTM(self.base_model, args.num_classes)
        elif args.method == 'rnn':
            self.model = Bert_RNN(self.base_model, args.num_classes)
        else:
            return ValueError("暂不支持该方法")

        self.device = torch.device(self.args.device)
        self.model.to(self.device)

        self._log_args()

        # 创建训练和验证数据集
        self.train_dataloader, self.eval_dataloader = get_data(
            self.tokenizer, datasets_path, self.args.train_batch_size, self.args.eval_batch_size)

        # 创建优化器和损失函数
        self.optimizer = optim.AdamW(self.model.parameters(), lr=args.lr)
        self.criterion = nn.CrossEntropyLoss()

        self.num_training_steps = self.args.num_epoch * \
            len(self.train_dataloader)
        self.lr_scheduler = get_scheduler(
            name="linear", optimizer=self.optimizer, num_warmup_steps=0, num_training_steps=self.num_training_steps
        )

        # 创建评估器
        self.clf_metrics = get_clf_metrics()

    def run(self):
        sw=StopWatch()
        self.best_loss = self._train()
        elapsed_time = sw.elapsed_time(unit='m', precision=2)
        self.logger.info(
            f"\r> Training results:\r>>> best loss: {self.best_loss:.4f}\r>>> elapsed_time: {elapsed_time}")
        
        sw=StopWatch()
        self.matrics = self._eval()
        elapsed_time = sw.elapsed_time(unit='m', precision=2)
        self.logger.info(f"\r> Eval results:\r>>> matrics: {self.matrics}\r>>> elapsed_time: {elapsed_time}")

    def _train(self):
        progress_bar = tqdm(range(self.num_training_steps))
        self.model.train()
        # 初始化一个列表来保存损失值
        self.losses = []

        # 用于比较 得到最好的模型参数
        best_loss = 1e10

        # 设置最小误差的模型保存路经
        if not os.path.exists('my_awesome_model'):
            os.mkdir('my_awesome_model')
        self.save_model_path = "./my_awesome_model"

        for epoch in range(self.args.num_epoch):
            for batch in self.train_dataloader:
                batch = {k: v.to(self.device) for k, v in batch.items()}
                labels = batch.pop("labels")
                outputs = self.model(**batch)
                loss = self.criterion(outputs, labels)
                self.losses.append(loss.item())
                loss.backward()
                self.optimizer.step()
                self.lr_scheduler.step()
                self.optimizer.zero_grad()
                progress_bar.update(1)
            if (epoch+1) % 1 == 0:
                print(
                    f'Epoch [{epoch+1}/{self.args.num_epoch}], Loss: {loss.item():.4f}')
            if loss.item() < best_loss:
                best_loss = loss.item()
                # 保存模型
                torch.save(self.model.state_dict(), self.save_model_path + '/' +
                           self.args.base_model + "_" + self.args.method+'_model.pth')
        return best_loss

    def _eval(self):
        progress_bar = tqdm(range(len(self.eval_dataloader)))
        # 将训练的模型参数更新为最小误差
        self.model.load_state_dict(torch.load(
            self.save_model_path + '/'+self.args.base_model + "_" + self.args.method+'_model.pth'))
        for batch in self.eval_dataloader:
            batch = {k: v.to(self.device) for k, v in batch.items()}
            labels = batch.pop("labels")
            outputs = self.model(**batch)
            predicted_class_id = outputs.argmax(-1)
            prediction = predicted_class_id.tolist()
            reference = labels.tolist()
            self.clf_metrics.add_batch(
                predictions=prediction, references=reference)
            progress_bar.update(1)
        matrics = self.clf_metrics.compute()
        return matrics

    def _log_args(self):
        self.logger.info('> Training arguments:')
        for arg in vars(self.args):
            self.logger.info(f">>> {arg}: {getattr(self.args, arg)}")

    def _process_model(self, model_name: str,  model_path: str = None):
        if model_path is None:
            try:
                if model_name == 'bert_base':
                    download_model_name = "bert-base-uncased"
                elif model_name == 'bert_large':    
                    download_model_name = "bert-large-uncased"
                else:
                    download_model_name = model_name + "-base-uncased"
                tokenizer = AutoTokenizer.from_pretrained(
                    download_model_name, cache_dir="./models")
                base_model = AutoModel.from_pretrained(
                    download_model_name, cache_dir="./models")
            except:
                raise ValueError("在线下载预训练模型失败, 请指定本地预训练模型路径")
        else:
            tokenizer = AutoTokenizer.from_pretrained(model_path)
            base_model = AutoModel.from_pretrained(model_path)
        return tokenizer, base_model


if __name__ == '__main__':

    args, logger = get_config()
    model = Model(args, logger, datasets_path='./datasets/imdb',
                  model_path='./models/bert-base-uncased')
    model.run()
