import torch
from torch.utils.data import DataLoader
from dset import AudioDataset
from model import AudioClassifier
from module import Audio2Mel
from util import ArgumentParserWithUnkown, load_cfg
from sklearn.metrics import confusion_matrix
import numpy as np
from funcodec.tasks.gan_speech_codec import GANSpeechCodecTask


def get_feature_preprocessor(name, params):
    if name == 'encodec':
        model, model_args = GANSpeechCodecTask.build_model_from_file(
            config_file=params['encodec_config'],
            model_file=params['encodec_model']
        )
        return model.encoder
    elif name == 'mel':
        audio2mel = Audio2Mel(
            sampling_rate=params['sampling_rate'],
            n_fft=params['n_fft'],
            hop_length=params['hop_length'],
            win_length=params['win_length'],
            n_mel_channels=params['n_mel_channels']
        )
        return audio2mel
    else:
        return None

class Evaluator:
    def __init__(self, cfg, device, checkpoint, test_file):
        self.cfg = cfg
        self.device = device
        self.model = self.load_model(checkpoint)
        self.test_dataloader = self.get_dataloader(test_file, cfg['data']['batch_size'])
    
    def get_dataloader(self, filelist, batch_size):
        dataset = AudioDataset(filelist)
        return DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True)
    
    def load_model(self, checkpoint_path):
        # Define model architecture
        feat_p_cfg = self.cfg['model']['feature_preprocessor']
        feature_preprocessor = get_feature_preprocessor(feat_p_cfg['name'], feat_p_cfg['params'])
        a_c_cfg = self.cfg['model']['audio_classifier']
        audio_classifier = AudioClassifier(feature_preprocessor=feature_preprocessor,
                                          num_classes=a_c_cfg['num_classes'],
                                          in_feats_seq_length=a_c_cfg['in_feats_seq_length'],
                                          in_feats_feature_dim=a_c_cfg['in_feats_feature_dim'])
        # Load model checkpoint
        checkpoint = torch.load(checkpoint_path)
        audio_classifier.load_state_dict(checkpoint['model_state_dict'])
        audio_classifier.to(self.device)
        audio_classifier.eval()
        return audio_classifier
    
    def evaluate(self):
        true_labels = []
        predicted_labels = []

        with torch.no_grad():
            for inputs, labels in self.test_dataloader:
                inputs = inputs.to(self.device)
                labels = labels.to(self.device)
                outputs, _ = self.model(inputs)
                _, predicted = torch.max(outputs, 1)
                true_labels.extend(labels.cpu().numpy())
                predicted_labels.extend(predicted.cpu().numpy())

        accuracy = np.mean(np.array(true_labels) == np.array(predicted_labels))
        print(f'Test Accuracy: {accuracy:.4f}')

        # Compute confusion matrix
        cm = confusion_matrix(true_labels, predicted_labels)
        print('Confusion Matrix:')
        print(cm)

def parse_args():
    parser = ArgumentParserWithUnkown('eval args')
    parser.add_argument('--config', type=str, default='./config.yaml')
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('--checkpoint', type=str, required=True)
    parser.add_argument('--test_file', type=str, required=True)
    args, args_dict = parser.parse_to_dict()
    return args, args_dict

def main():
    args, args_dict = parse_args()
    cfg = load_cfg(args.config)
    evaluator = Evaluator(cfg, args.device, args.checkpoint, args.test_file)
    evaluator.evaluate()

if __name__ == '__main__':
    main()
