import torch
import torch.nn as nn
import torch.optim as optim
import os
from metrics import accuracy
from data_loader import load_mnist_data
from model import *

class Trainer(object):
    def __init__(self, output_dir):
        super(Trainer, self).__init__()
        self.device = torch.device("cpu")
        self.output_model_dir = os.path.join(output_dir, "models")

    def train_loop(self, epoch, train_batch_step, dataloader, model, loss_fn, opt):
        model.train()
        losses = []
        accuracies = []
        for batch_idx, (data, target) in enumerate(dataloader):
            # 前向传播
            output = model(data)
            loss = loss_fn(output, target)
            acc = accuracy(output, target)
            losses.append(loss.item())
            accuracies.append(acc.item())

            # 反向过程
            opt.zero_grad()
            loss.backward()
            opt.step()
            print(f"Epoch:{epoch} Train Batch:{train_batch_step} Loss:{loss.item():.3f}  Accuracy:{acc.item():.3f}")
            train_batch_step += 1
        return train_batch_step

    def training(self, n_epochs, checkpoint_path, lr, weight_decay):

        # 加载数据
        train_dataloader, eval_dataloader = load_mnist_data(
            train_batch_size=64,
            test_batch_size=128,
            train_ratio=0.8
        )
        model = LeNet().to(self.device)
        opt = optim.Adam(params=model.parameters(), lr=lr, weight_decay=weight_decay)

        # 参数恢复
        weights = None
        start_epoch = 0
        if checkpoint_path is None:
            checkpoints = os.listdir(self.output_model_dir)
            if len(checkpoints) > 0:
                checkpoints = sorted(checkpoints, key=lambda name: int(name.split(".")[0].split("_")[1])) # 默认排序是升序
                checkpoint_path = os.path.join(self.output_model_dir, checkpoints[-1])
        if checkpoint_path is not None and os.path.exists(checkpoint_path):
            checkpoint = torch.load(checkpoint_path, map_location='cpu')
            model.load_state_dict(checkpoint['model'])
            opt.load_state_dict(checkpoint['opt'])
            weights = checkpoint['model']
            start_epoch = checkpoint['epoch'] + 1
            n_epochs = n_epochs + start_epoch
            print(f"参数恢复成功，来源---{checkpoint_path}")


        loss_fn = nn.CrossEntropyLoss()
        print(model)

        train_batch_step, eval_batch_step = 0, 0
        for epoch in range(start_epoch, n_epochs):
            train_batch_step = self.train_loop(epoch, train_batch_step, train_dataloader, model, loss_fn, opt)
            self.save_model(epoch, model, opt)
        self.save_model(n_epochs, model, opt)
        print('训练完成！')

    def save_model(self, epoch, model, opt):
        obj = {
            'epoch': epoch,
            'model': model.state_dict(),
            'opt': opt.state_dict()
        }
        torch.save(
            obj, os.path.join(self.output_model_dir, f"model_{epoch:06d}.pkl")
        )

class Predictor(object):
    def __init__(self):
        super(Predictor, self).__init__()

    def predict(self, ):
        pass

