import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
import pandas as pd

from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config


def audio_tagging(args):
    """Inference audio tagging result of an audio clip.
    """

    # Arugments & parameters
    sample_rate = args.sample_rate
    window_size = args.window_size
    hop_size = args.hop_size
    mel_bins = args.mel_bins
    fmin = args.fmin
    fmax = args.fmax
    model_type = args.model_type
    checkpoint_path = args.checkpoint_path
    audio_path = args.audio_path
    device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
    
    classes_num = config.classes_num
    labels = config.labels

    # Model
    Model = eval(model_type)
    model = Model(sample_rate=sample_rate, window_size=window_size, 
        hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax, 
        classes_num=classes_num,freeze_base=False)
    
    checkpoint = torch.load(checkpoint_path, map_location=device)
    model.load_state_dict(checkpoint['model'])

    # Parallel
    if 'cuda' in str(device):
        model.to(device)
        print('GPU number: {}'.format(torch.cuda.device_count()))
        model = torch.nn.DataParallel(model)
    else:
        print('Using CPU.')
    
    # Load audio
    (waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)

    waveform = waveform[None, :]    # (1, audio_length)
    waveform = move_data_to_device(waveform, device)

    # Forward
    with torch.no_grad():
        model.eval()
        batch_output_dict = model(waveform, None)

    clipwise_output = batch_output_dict['clipwise_output'].data.cpu().numpy()[0]
    """(classes_num,)"""

    sorted_indexes = np.argsort(clipwise_output)[::-1]
    
    if(args.mode == 'audio_tagging'):
        # Print audio tagging top probabilities
        for k in range(10):
            print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]], 
                clipwise_output[sorted_indexes[k]]))

        # Print embedding
        if 'embedding' in batch_output_dict.keys():
            embedding = batch_output_dict['embedding'].data.cpu().numpy()[0]
            print('embedding: {}'.format(embedding.shape))

    return clipwise_output[sorted_indexes[0]], np.array(labels)[sorted_indexes[0]]

def evaluate_acc(args):
    # 读取CSV文件
        df = pd.read_csv(args.csv_path)
        
        # 随机选择指定数量的样本
        if len(df) < args.num_samples:
            print(f"Warning: Only {len(df)} samples available, using all")
            selected = df
        else:
            selected = df.sample(n=args.num_samples, random_state=42)
        
        correct = 0
        total = 0
        
        # 遍历每个选择的样本
        for idx, row in selected.iterrows():
            audio_file = os.path.join(args.audio_root, row['filename'])
            true_label = row['category']
            
            args.audio_path = audio_file
            
            try:
                # 进行预测
                _, pred_label = audio_tagging(args)
                
                # 比较预测结果
                if pred_label == true_label:
                    correct += 1
                total += 1
                
                print(f"File: {row['filename']} | True: {true_label} | Pred: {pred_label} | {'✓' if pred_label == true_label else '✗'}")
                
            except Exception as e:
                print(f"Error processing {audio_file}: {str(e)}")
        
        # 计算并打印准确率
        accuracy = correct / total if total > 0 else 0
        print(f"/nEvaluation Complete | Samples: {total} | Correct: {correct} | Accuracy: {accuracy:.4f}")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Example of parser. ')
    subparsers = parser.add_subparsers(dest='mode')

    # 原始audio_tagging模式
    parser_at = subparsers.add_parser('audio_tagging')
    parser_at.add_argument('--sample_rate', type=int, default=32000)
    parser_at.add_argument('--window_size', type=int, default=1024)
    parser_at.add_argument('--hop_size', type=int, default=320)
    parser_at.add_argument('--mel_bins', type=int, default=64)
    parser_at.add_argument('--fmin', type=int, default=50)
    parser_at.add_argument('--fmax', type=int, default=14000) 
    parser_at.add_argument('--model_type', type=str, required=True)
    parser_at.add_argument('--checkpoint_path', type=str, required=True)
    parser_at.add_argument('--audio_path', type=str, required=True)
    parser_at.add_argument('--cuda', action='store_true', default=False)
    
    # 新增evaluate模式
    parser_eval = subparsers.add_parser('evaluate')
    parser_eval.add_argument('--sample_rate', type=int, default=32000)
    parser_eval.add_argument('--window_size', type=int, default=1024)
    parser_eval.add_argument('--hop_size', type=int, default=320)
    parser_eval.add_argument('--mel_bins', type=int, default=64)
    parser_eval.add_argument('--fmin', type=int, default=50)
    parser_eval.add_argument('--fmax', type=int, default=14000) 
    parser_eval.add_argument('--model_type', type=str, required=True)
    parser_eval.add_argument('--checkpoint_path', type=str, required=True)
    parser_eval.add_argument('--csv_path', type=str, default='C:/code_of_user/PANNs/PANN-ESC50/dataset/esc50.csv')
    parser_eval.add_argument('--audio_root', type=str, default='C:/code_of_user/PANNs/PANN-ESC50/dataset/audio')
    parser_eval.add_argument('--num_samples', type=int, default=30)
    parser_eval.add_argument('--cuda', action='store_true', default=False)
    
    args = parser.parse_args()

    if args.mode == 'audio_tagging':
        audio_tagging(args)
        
    elif args.mode == 'evaluate':
        evaluate_acc(args)
        
    else:
        raise Exception('Invalid mode!')