import torch
from torch.utils.data import DataLoader
from dset import AudioDataset
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from model import AudioClassifier
from funcodec.tasks.gan_speech_codec import GANSpeechCodecTask
import os
from util import ArgumentParserWithUnkown, load_cfg, update_cfg
from module import Audio2Mel

def get_feature_preprocessor(name, params):
    if name == 'encodec':
        model, model_args = GANSpeechCodecTask.build_model_from_file(
            config_file=params['encodec_config'],
            model_file=params['encodec_model']
        )
        return model.encoder
    elif name == 'mel':
        audio2mel = Audio2Mel(
            sampling_rate=params['sampling_rate'],
            n_fft=params['n_fft'],
            hop_length=params['hop_length'],
            win_length=params['win_length'],
            n_mel_channels=params['n_mel_channels']
        )
        return audio2mel
    else:
        return None


class Trainer:
    def __init__(self, cfg, device):
        self.cfg = cfg
        self.device = device
        self.model = self.get_model(cfg['model'])
        self.train_dataloader = self.get_dataloader(cfg['data']['train_flist'], cfg['data']['batch_size'])
        self.valid_dataloader = self.get_dataloader(cfg['data']['valid_flist'], cfg['data']['batch_size'])
        self.class_weights = self.calculate_class_weights(self.train_dataloader)
        self.criterion = self.get_criterion(self.class_weights)
        self.optimizer = self.get_optim(self.model.get_params())
        self.scheduler = self.get_scheduler(self.optimizer, total_steps=len(self.train_dataloader) * cfg['train']['num_epochs'], warmup_steps=cfg['train']['warmup_steps'])
    
    def get_dataloader(self, filelist, batch_size):
        dataset = AudioDataset(filelist)
        return DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
    
    def get_model(self, model_cfg):
        feat_p_cfg = model_cfg['feature_preprocessor']
        feature_preprocessor = get_feature_preprocessor(feat_p_cfg['name'], feat_p_cfg['params'])
        a_c_cfg = model_cfg['audio_classifier']
        audio_classifer = AudioClassifier(feature_preprocessor=feature_preprocessor,
                                          num_classes=a_c_cfg['num_classes'],
                                          in_feats_seq_length=a_c_cfg['in_feats_seq_length'],
                                          in_feats_feature_dim=a_c_cfg['in_feats_feature_dim'])
        return audio_classifer.to(self.device)
    
    def calculate_class_weights(self, dataloader):
        class_counts = torch.zeros(self.cfg['model']['audio_classifier']['num_classes'])
        for _, labels in dataloader:
            class_counts += torch.bincount(labels, minlength=self.cfg['model']['audio_classifier']['num_classes'])
        total_samples = class_counts.sum()
        class_weights = total_samples / (self.cfg['model']['audio_classifier']['num_classes'] * class_counts)
        return class_weights.to(self.device)

    def get_criterion(self, class_weights):
        return torch.nn.CrossEntropyLoss(weight=class_weights)
    
    def get_optim(self, params=None):
        if params is None:
            params = self.model.get_params()
        optimizer = optim.Adam(params, lr=self.cfg['train']['learning_rate'], weight_decay=self.cfg['train']['weight_decay'])
        return optimizer
    
    def get_scheduler(self, optimizer, total_steps, warmup_steps):
        # Lambda function for the learning rate scheduler with warmup and decay
        def lr_lambda(current_step: int):
            if current_step < warmup_steps:
                return float(current_step) / float(max(1, warmup_steps))
            return max(
                0.0,
                float(total_steps - current_step) / float(max(1, total_steps - warmup_steps))
            )
        return LambdaLR(optimizer, lr_lambda)
    
    def train(self):
        self.model.train()
        training_step = 0
        best_val_loss = float('inf')
        for epoch_iter in range(self.cfg['train']['num_epochs']):
            for in_epoch_step, (inputs, labels) in enumerate(self.train_dataloader):
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                self.optimizer.zero_grad()
                outputs, _ = self.model(inputs)
                loss = self.criterion(outputs, labels)
                loss.backward()
                self.optimizer.step()
                self.scheduler.step()  # Upda
                training_step += 1
                if training_step % self.cfg['train']['log_interval'] == 0:
                    print(f"step:{training_step}, epoch:{epoch_iter}, training loss:{loss.item():.4f}")
                if training_step % self.cfg['train']['valid_interval'] == 0:
                    valid_loss, acc = self.valid()
                    print(f"step:{training_step}, epoch:{epoch_iter}, training loss:{loss.item():.4f}, valid loss:{valid_loss:.4f}, accuracy:{acc:.4f}")
                    self.save_checkpoint('last', None, None)
                    if valid_loss < best_val_loss:
                        best_val_loss = valid_loss
                        self.save_checkpoint('best', training_step, valid_loss, acc)

    def valid(self):
        self.model.eval()
        total_loss = 0
        correct_predictions = 0
        total_samples = 0

        with torch.no_grad():
            for inputs, labels in self.valid_dataloader:
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs, _ = self.model(inputs)
                loss = self.criterion(outputs, labels)
                total_loss += loss.item()

                # Calculate accuracy
                _, predicted = torch.max(outputs, 1)
                correct_predictions += (predicted == labels).sum().item()
                total_samples += labels.size(0)

        average_loss = total_loss / len(self.valid_dataloader)
        accuracy = correct_predictions / total_samples

        self.model.train()  # Make sure to switch back to train mode after validation
        print(f'Validation Loss: {average_loss:.4f}, Validation Accuracy: {accuracy:.4f}')
        return average_loss, accuracy

    
    def save_checkpoint(self, description, step, val_loss, acc=None):
        """Save a model checkpoint."""
        checkpoint_dir = self.cfg.get('train', {}).get('checkpoint_dir', './checkpoints')
        os.makedirs(checkpoint_dir, exist_ok=True)
        if step is None or val_loss is None:
            checkpoint_path = os.path.join(checkpoint_dir, f"{description}.pt")
        elif acc is not None:
            checkpoint_path = os.path.join(checkpoint_dir, f"{description}_step_{step}_loss_{val_loss:.4f}_acc_{acc:.4f}.pt")
        else:
            checkpoint_path = os.path.join(checkpoint_dir, f"{description}_step_{step}_loss_{val_loss:.4f}.pt")
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'loss': val_loss,
            'step': step
        }, checkpoint_path)
        print(f"Checkpoint saved to {checkpoint_path}")
    

def parse_args():
    parser = ArgumentParserWithUnkown('train args')
    parser.add_argument('--config', type=str, default='./config.yaml')
    parser.add_argument('--device', type=str, default='cuda:0')
    args, args_dict = parser.parse_to_dict()
    return args, args_dict


def main():
    args, args_dict = parse_args()
    cfg = load_cfg(args.config)
    update_cfg(cfg, args_dict)
    trainer = Trainer(cfg, args.device)
    trainer.train()


if __name__ == '__main__':
    main()
