import os
import numpy as np
import argparse
from tqdm import tqdm
import pandas as pd
from collections import defaultdict

def inspect_npz_file(npz_path):
    """
    Inspect a single NPZ file in detail
    
    Args:
        npz_path: Path to the NPZ file
    """
    print(f"\nInspecting file: {os.path.basename(npz_path)}")
    print("-" * 50)
    
    try:
        # Load the NPZ file
        data = np.load(npz_path)
        
        # Print basic file info
        file_size_mb = os.path.getsize(npz_path) / (1024 * 1024)
        print(f"File size: {file_size_mb:.2f} MB")
        print(f"Number of arrays: {len(data.files)}")
        
        # Print detailed info for each array
        for key in data.files:
            feature = data[key]
            print(f"\nFeature: {key}")
            print(f"  Shape: {feature.shape}")
            print(f"  Data type: {feature.dtype}")
            print(f"  Memory size: {feature.nbytes / (1024 * 1024):.2f} MB")
            
            # Print basic statistics
            print("  Statistics:")
            print(f"    Min: {feature.min()}")
            print(f"    Max: {feature.max()}")
            print(f"    Mean: {feature.mean()}")
            print(f"    Std: {feature.std()}")
            
            # Check for NaN and inf values
            nan_count = np.isnan(feature).sum()
            inf_count = np.isinf(feature).sum()
            if nan_count > 0:
                print(f"    WARNING: Contains {nan_count} NaN values")
            if inf_count > 0:
                print(f"    WARNING: Contains {inf_count} infinite values")
            
            # Print first few values
            print("  First 5 values:")
            flat_feature = feature.flatten()
            print(f"    {flat_feature[:5]}")
            
    except Exception as e:
        print(f"Error inspecting file: {str(e)}")

def validate_npz_file(npz_path):
    """
    Validate a single NPZ feature file
    
    Args:
        npz_path: Path to the NPZ file
        
    Returns:
        dict: Validation results including status and any errors
    """
    result = {
        'file': os.path.basename(npz_path),
        'status': 'OK',
        'error': None,
        'feature_types': [],
        'feature_shapes': {},
        'feature_dtypes': {},
        'file_size_mb': 0
    }
    
    try:
        # Check if file exists
        if not os.path.exists(npz_path):
            result['status'] = 'ERROR'
            result['error'] = 'File does not exist'
            return result
        
        # Get file size
        result['file_size_mb'] = os.path.getsize(npz_path) / (1024 * 1024)
        
        # Try to load the NPZ file
        data = np.load(npz_path)
        
        # Check if file contains any arrays
        if len(data.files) == 0:
            result['status'] = 'ERROR'
            result['error'] = 'No arrays found in NPZ file'
            return result
        
        # Record feature types and shapes
        result['feature_types'] = data.files
        for key in data.files:
            feature = data[key]
            result['feature_shapes'][key] = feature.shape
            result['feature_dtypes'][key] = str(feature.dtype)
            
            # Basic validation of feature values
            if not isinstance(feature, np.ndarray):
                result['status'] = 'WARNING'
                result['error'] = f'Feature {key} is not a numpy array'
            elif np.isnan(feature).any():
                result['status'] = 'WARNING'
                result['error'] = f'Feature {key} contains NaN values'
            elif np.isinf(feature).any():
                result['status'] = 'WARNING'
                result['error'] = f'Feature {key} contains infinite values'
        
    except Exception as e:
        result['status'] = 'ERROR'
        result['error'] = str(e)
    
    return result

def analyze_feature_shapes(results):
    """
    Analyze feature shapes across all files
    
    Args:
        results: List of validation results
        
    Returns:
        dict: Shape analysis results
    """
    shape_analysis = defaultdict(list)
    dtype_analysis = defaultdict(set)
    
    for result in results:
        for feature_type, shape in result['feature_shapes'].items():
            shape_analysis[feature_type].append(shape)
            if feature_type in result['feature_dtypes']:
                dtype_analysis[feature_type].add(result['feature_dtypes'][feature_type])
    
    analysis = {}
    for feature_type in shape_analysis:
        shapes = shape_analysis[feature_type]
        unique_shapes = set(str(shape) for shape in shapes)
        dtypes = dtype_analysis[feature_type]
        
        analysis[feature_type] = {
            'unique_shapes': list(unique_shapes),
            'shape_count': len(shapes),
            'dtypes': list(dtypes)
        }
    
    return analysis

def validate_feature_directory(feature_dir):
    """
    Validate all NPZ feature files in a directory
    
    Args:
        feature_dir: Directory containing NPZ feature files
        
    Returns:
        pd.DataFrame: Validation results for all files
    """
    # Find all NPZ files
    npz_files = [f for f in os.listdir(feature_dir) if f.endswith('.npz')]
    
    if not npz_files:
        print(f"No NPZ feature files found in {feature_dir}")
        return None
    
    print(f"Found {len(npz_files)} NPZ feature files")
    
    # Validate each file
    results = []
    for npz_file in tqdm(npz_files, desc="Validating files"):
        npz_path = os.path.join(feature_dir, npz_file)
        result = validate_npz_file(npz_path)
        results.append(result)
    
    # Convert results to DataFrame
    df = pd.DataFrame(results)
    
    # Analyze feature shapes
    shape_analysis = analyze_feature_shapes(results)
    
    # Print summary
    print("\nValidation Summary:")
    print("-" * 50)
    print(f"Total files: {len(df)}")
    print(f"OK: {len(df[df['status'] == 'OK'])}")
    print(f"Warnings: {len(df[df['status'] == 'WARNING'])}")
    print(f"Errors: {len(df[df['status'] == 'ERROR'])}")
    
    # Print shape analysis
    print("\nFeature Shape Analysis:")
    print("-" * 50)
    for feature_type, analysis in shape_analysis.items():
        print(f"\nFeature type: {feature_type}")
        print(f"Number of files: {analysis['shape_count']}")
        print(f"Unique shapes: {analysis['unique_shapes']}")
        print(f"Data types: {analysis['dtypes']}")
        
        if len(analysis['unique_shapes']) > 1:
            print("WARNING: Inconsistent shapes detected!")
    
    # Print details of files with issues
    if len(df[df['status'] != 'OK']) > 0:
        print("\nFiles with issues:")
        print("-" * 50)
        for _, row in df[df['status'] != 'OK'].iterrows():
            print(f"\nFile: {row['file']}")
            print(f"Status: {row['status']}")
            print(f"Error: {row['error']}")
            if row['feature_types']:
                print("Feature types:", row['feature_types'])
                print("Feature shapes:", row['feature_shapes'])
    
    # Save detailed results to CSV
    output_csv = os.path.join(feature_dir, 'feature_validation_results.csv')
    df.to_csv(output_csv, index=False)
    print(f"\nDetailed results saved to: {output_csv}")
    
    # Save shape analysis to JSON
    import json
    shape_analysis_file = os.path.join(feature_dir, 'feature_shape_analysis.json')
    with open(shape_analysis_file, 'w') as f:
        json.dump(shape_analysis, f, indent=2)
    print(f"Shape analysis saved to: {shape_analysis_file}")
    
    return df

def main():
    parser = argparse.ArgumentParser(description='Validate NPZ feature files')
    parser.add_argument('--feature_dir', type=str, help='Directory containing NPZ feature files')
    parser.add_argument('--single_file', type=str, help='Path to a single NPZ file to inspect')
    
    args = parser.parse_args()
    
    if args.single_file:
        if not os.path.exists(args.single_file):
            print(f"Error: File {args.single_file} does not exist")
            return
        inspect_npz_file(args.single_file)
    elif args.feature_dir:
        validate_feature_directory(args.feature_dir)
    else:
        parser.print_help()

if __name__ == "__main__":
    main() 