#!/usr/bin/env python3
"""
Script to re-split the strong_real dataset to match the label distribution of val_weak.tsv

This script:
1. Analyzes the label distribution in val_weak.tsv
2. Merges train_strong_real.tsv and val_strong_real.tsv
3. Samples filenames from the merged dataset to match the val_weak distribution
4. Creates new train_strong_real.tsv and val_strong_real.tsv files
"""

import pandas as pd
import numpy as np
from collections import Counter
import os
import random
from typing import Dict, List, Tuple

def analyze_weak_label_distribution(weak_file: str) -> Dict[str, int]:
    """
    Step 1: Analyze the label distribution in val_weak.tsv
    
    Args:
        weak_file: Path to val_weak.tsv file
        
    Returns:
        Dictionary with label counts
    """
    print("Step 1: Analyzing val_weak.tsv label distribution...")
    
    # Read weak validation set
    weak_df = pd.read_csv(weak_file, sep='\t')
    print(f"Total files in val_weak: {len(weak_df)}")
    
    # Count labels (handle multiple labels per file)
    label_counts = Counter()
    for labels_str in weak_df['event_labels']:
        # Split multiple labels and count each
        labels = [label.strip() for label in labels_str.split(',')]
        for label in labels:
            label_counts[label] += 1
    
    print("Label distribution in val_weak:")
    for label, count in sorted(label_counts.items()):
        print(f"  {label}: {count}")
    
    return dict(label_counts)

def merge_strong_datasets(train_strong_file: str, val_strong_file: str) -> pd.DataFrame:
    """
    Step 2: Merge train_strong_real.tsv and val_strong_real.tsv
    
    Args:
        train_strong_file: Path to train_strong_real.tsv
        val_strong_file: Path to val_strong_real.tsv
        
    Returns:
        Merged dataframe
    """
    print("\nStep 2: Merging train_strong_real.tsv and val_strong_real.tsv...")
    
    # Read both files
    train_df = pd.read_csv(train_strong_file, sep='\t')
    val_df = pd.read_csv(val_strong_file, sep='\t')
    
    print(f"Train strong samples: {len(train_df)}")
    print(f"Val strong samples: {len(val_df)}")
    
    # Merge datasets
    merged_df = pd.concat([train_df, val_df], ignore_index=True)
    print(f"Total merged samples: {len(merged_df)}")
    
    return merged_df

def get_file_label_summary(strong_df: pd.DataFrame) -> Dict[str, set]:
    """
    Get a summary of labels for each file in the strong dataset
    
    Args:
        strong_df: Strong dataset dataframe
        
    Returns:
        Dictionary mapping filename to set of labels
    """
    file_labels = {}
    for _, row in strong_df.iterrows():
        filename = row['filename']
        label = row['event_label']
        
        if filename not in file_labels:
            file_labels[filename] = set()
        file_labels[filename].add(label)
    
    return file_labels

def sample_files_by_distribution(file_labels: Dict[str, set], 
                                target_distribution: Dict[str, int],
                                target_total_files: int) -> List[str]:
    """
    Step 3: Sample filenames to match the target label distribution
    
    Args:
        file_labels: Dictionary mapping filename to set of labels
        target_distribution: Target label distribution from val_weak
        target_total_files: Target number of files for new validation set
        
    Returns:
        List of selected filenames
    """
    print(f"\nStep 3: Sampling files to match target distribution...")
    print(f"Target total files: {target_total_files}")
    
    # Calculate target proportions
    total_target_labels = sum(target_distribution.values())
    target_proportions = {label: count/total_target_labels 
                         for label, count in target_distribution.items()}
    
    print("Target proportions:")
    for label, prop in sorted(target_proportions.items()):
        print(f"  {label}: {prop:.3f}")
    
    # Group files by their label combinations
    files_by_labels = {}
    for filename, labels in file_labels.items():
        label_key = tuple(sorted(labels))
        if label_key not in files_by_labels:
            files_by_labels[label_key] = []
        files_by_labels[label_key].append(filename)
    
    print(f"\nFound {len(files_by_labels)} unique label combinations")
    
    # Use greedy sampling approach
    selected_files = []
    current_label_counts = Counter()
    available_files = list(file_labels.keys())
    random.shuffle(available_files)  # Randomize order
    
    # Calculate target counts for validation set
    target_val_counts = {label: int(count * target_total_files / sum(target_distribution.values())) 
                        for label, count in target_distribution.items()}
    
    print("Target validation label counts:")
    for label, count in sorted(target_val_counts.items()):
        print(f"  {label}: {count}")
    
    # Greedy selection to match distribution as closely as possible
    for filename in available_files:
        if len(selected_files) >= target_total_files:
            break
            
        file_labels_set = file_labels[filename]
        
        # Check if adding this file would improve the distribution
        can_add = True
        for label in file_labels_set:
            if current_label_counts[label] >= target_val_counts.get(label, 0):
                can_add = False
                break
        
        if can_add:
            selected_files.append(filename)
            for label in file_labels_set:
                current_label_counts[label] += 1
    
    # If we haven't reached the target, add more files regardless of perfect distribution match
    remaining_files = [f for f in available_files if f not in selected_files]
    random.shuffle(remaining_files)
    
    while len(selected_files) < target_total_files and remaining_files:
        selected_files.append(remaining_files.pop())
    
    print(f"\nSelected {len(selected_files)} files for new validation set")
    
    # Show final distribution
    final_label_counts = Counter()
    for filename in selected_files:
        for label in file_labels[filename]:
            final_label_counts[label] += 1
    
    print("Final validation label distribution:")
    for label in sorted(target_distribution.keys()):
        target_count = target_val_counts.get(label, 0)
        actual_count = final_label_counts[label]
        print(f"  {label}: {actual_count} (target: {target_count})")
    
    return selected_files

def create_new_splits(merged_df: pd.DataFrame, 
                     val_filenames: List[str],
                     output_dir: str):
    """
    Step 4: Create new train_strong_real.tsv and val_strong_real.tsv files
    
    Args:
        merged_df: Merged strong dataset
        val_filenames: List of filenames for validation set
        output_dir: Directory to save new files
    """
    print(f"\nStep 4: Creating new dataset splits...")
    
    # Split the merged dataset
    val_mask = merged_df['filename'].isin(val_filenames)
    new_val_df = merged_df[val_mask].copy()
    new_train_df = merged_df[~val_mask].copy()
    
    print(f"New validation set: {len(new_val_df)} samples from {len(val_filenames)} files")
    print(f"New training set: {len(new_train_df)} samples")
    
    # Create output directory if it doesn't exist
    os.makedirs(output_dir, exist_ok=True)
    
    # Save new files
    new_train_file = os.path.join(output_dir, 'train_strong_real_new.tsv')
    new_val_file = os.path.join(output_dir, 'val_strong_real_new.tsv')
    
    new_train_df.to_csv(new_train_file, sep='\t', index=False)
    new_val_df.to_csv(new_val_file, sep='\t', index=False)
    
    print(f"Saved new training set to: {new_train_file}")
    print(f"Saved new validation set to: {new_val_file}")
    
    return new_train_df, new_val_df

def compare_distributions(weak_file: str, new_val_df: pd.DataFrame):
    """
    Step 5: Compare the distributions to verify the result
    
    Args:
        weak_file: Path to original val_weak.tsv
        new_val_df: New validation strong dataset
    """
    print(f"\nStep 5: Comparing distributions...")
    
    # Get weak distribution
    weak_df = pd.read_csv(weak_file, sep='\t')
    weak_label_counts = Counter()
    for labels_str in weak_df['event_labels']:
        labels = [label.strip() for label in labels_str.split(',')]
        for label in labels:
            weak_label_counts[label] += 1
    
    # Get new val distribution
    new_val_label_counts = Counter(new_val_df['event_label'])
    
    # Compare
    print("\nDistribution comparison:")
    print(f"{'Label':<20} {'Val_Weak':<10} {'New_Val_Strong':<15} {'Ratio':<10}")
    print("-" * 55)
    
    all_labels = set(weak_label_counts.keys()) | set(new_val_label_counts.keys())
    for label in sorted(all_labels):
        weak_count = weak_label_counts.get(label, 0)
        strong_count = new_val_label_counts.get(label, 0)
        ratio = strong_count / weak_count if weak_count > 0 else float('inf')
        print(f"{label:<20} {weak_count:<10} {strong_count:<15} {ratio:<10.2f}")

def main():
    """Main function to execute the re-splitting process"""
    print("Starting dataset re-splitting process...")
    
    # File paths
    base_dir = "/home/shaonian/SED/SED/configs/dataset/desed_tsv"
    weak_file = os.path.join(base_dir, "val_weak.tsv")
    train_strong_file = os.path.join(base_dir, "train_strong_real.tsv")
    val_strong_file = os.path.join(base_dir, "val_strong_real.tsv")
    output_dir = "./"
    
    # Set random seed for reproducibility
    random.seed(42)
    np.random.seed(42)
    
    try:
        # Step 1: Analyze weak label distribution
        target_distribution = analyze_weak_label_distribution(weak_file)
        
        # Step 2: Merge strong datasets
        merged_df = merge_strong_datasets(train_strong_file, val_strong_file)
        
        # Get file-level label summary
        file_labels = get_file_label_summary(merged_df)
        print(f"\nTotal unique files in merged dataset: {len(file_labels)}")
        
        # Step 3: Sample files to match distribution
        # Use the same number of files as in original val_weak
        weak_df = pd.read_csv(weak_file, sep='\t')
        target_total_files = len(weak_df)
        
        selected_val_files = sample_files_by_distribution(
            file_labels, target_distribution, target_total_files
        )
        
        # Step 4: Create new splits
        new_train_df, new_val_df = create_new_splits(
            merged_df, selected_val_files, output_dir
        )
        
        # Step 5: Compare distributions
        compare_distributions(weak_file, new_val_df)
        
        print("\nDataset re-splitting completed successfully!")
        
    except Exception as e:
        print(f"Error occurred: {str(e)}")
        raise

if __name__ == "__main__":
    main()
