import json
import os
import torch
import torch.nn as nn
import torch.optim as optim
from .config import *
from .process_bar import ProcessBar
from .log_util import Logger
from .file_utils import *
from .resnet import ResNetSpeaker, ResNet50Custom, ResNet34Custom
from .cnn import CNN18, CNN34, CNN50


logger = Logger('')

batchSize, epochs, learningRate, weightDecay, stepSize, gamma = trainPram['batchSize'], trainPram['epochs'], trainPram['learningRate'], trainPram['weightDecay'], trainPram['stepSize'], trainPram['gamma']


# 训练模型
def train_model(model, train_loader, val_loader, criterion, optimizer, scheduler, device, model_root,
                epochs=epochs, current_epoch=1):
    rest_epoch = epochs - current_epoch + 1
    for _ in range(rest_epoch):
        sample_amount = len(train_loader)
        log_text = f'Epoch {current_epoch}/{epochs} starts, {sample_amount} batches'
        print(log_text)
        logger.log(log_text)
        pb = ProcessBar(sample_amount)
        model.train()
        running_loss = 0.0
        for index, (features, labels) in enumerate(train_loader):
            features, labels = features.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(features)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            pb.print(index)
        scheduler.step()  # 更新学习率
        log_text = f'Epoch {current_epoch}/{epochs}, Loss: {running_loss / len(train_loader)}'
        print('\n', log_text)
        logger.log(log_text)
        # 验证模型
        model.eval()
        correct = 0
        total = 0
        with torch.no_grad():
            for features, labels in val_loader:
                features, labels = features.to(device), labels.to(device)
                outputs = model(features)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).int().sum().item()
        log_text = f'Accuracy on validation set: {100.0 * correct / total}%'
        print(log_text)
        logger.log(log_text)
        # 每轮结束后保存模型
        if not os.path.exists(model_root):
            os.makedirs(model_root)
        model_path = get_model_path(model_root)
        torch.save(model.state_dict(), model_path)
        print(f'Model saved to {model_path}')
        info = {
            'finished_epoch': current_epoch
        }
        with open(get_model_info_path(model_root), 'w') as f:
            json.dump(info, f)
        current_epoch = current_epoch + 1


# Trainer主类
class Trainer:
    def __init__(self, model_type, model_root, train_loader, test_loader, resume=False):
        self.model_type = model_type
        self.resume = resume
        self.model_root = model_root
        self.current_epoch = 1
        self.train_loader = train_loader
        self.test_loader = test_loader

        global logger
        logger = Logger(get_model_log_path(model_root))

    def train(self):
        # 检查CUDA可用性
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"Training on {device}")

        # 根据model_type选择指定模型
        default_list = ['resnet18', 'resnet34', 'resnet50']
        if self.model_type in default_list:
            model = ResNetSpeaker(self.model_type).to(device)
        elif self.model_type == 'cnn18':
            model = CNN18().to(device)
        elif self.model_type == 'cnn34':
            model = CNN34().to(device)
        elif self.model_type == 'cnn50':
            model = CNN50().to(device)
        elif self.model_type == 'resnet34_custom':
            model = ResNet34Custom().to(device)
        elif self.model_type == 'resnet50_custom':
            model = ResNet50Custom().to(device)
        else:
            raise ValueError('输入模型未知')

        model_path = get_model_path(self.model_root)
        if self.resume and os.path.exists(model_path):
            # 从保存的模型恢复
            model.load_state_dict(torch.load(model_path))
            print("Resuming model from", model_path)
            info_path = get_model_info_path(self.model_root)
            with open(info_path, 'r') as f:
                info = json.load(f)
            self.current_epoch = info['finished_epoch'] + 1
            print("Resuming training at epoch", self.current_epoch)

        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=learningRate, weight_decay=weightDecay)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=stepSize, gamma=gamma)

        train_model(model, self.train_loader, self.test_loader, criterion, optimizer,
                    scheduler, device, self.model_root, current_epoch=self.current_epoch)
