import numpy as np
import os
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt

def load_data(data_dir):
    """加载所有数据文件"""
    data_files = {
        'shum': 'shum.npy',
        'temp': 'temp.npy',
        'wind': 'wind.npy',
        'prec': 'prec.npy',
        'pres': 'pres.npy',
        'lrad': 'lrad.npy',
        'srad': 'srad.npy'
    }
    
    data = {}
    for name, file in data_files.items():
        path = os.path.join(data_dir, file)
        if os.path.exists(path):
            data[name] = np.load(path)
            print(f"Loaded {name} with shape {data[name].shape}")
            # 打印数据的基本统计信息
            print(f"  Min: {np.nanmin(data[name]):.2f}")
            print(f"  Max: {np.nanmax(data[name]):.2f}")
            print(f"  Mean: {np.nanmean(data[name]):.2f}")
            print(f"  Std: {np.nanstd(data[name]):.2f}")
        else:
            print(f"Warning: {file} not found!")
    
    return data

def analyze_missing_values(data):
    """分析缺失值的分布"""
    for name, array in data.items():
        missing_count = np.isnan(array).sum()
        total_elements = array.size
        missing_percentage = (missing_count / total_elements) * 100
        
        print(f"\nAnalyzing missing values in {name}:")
        print(f"Total missing values: {missing_count}")
        print(f"Missing percentage: {missing_percentage:.2f}%")
        
        # 检查缺失值的时间分布
        missing_per_time = np.isnan(array).sum(axis=(1, 2))
        print("\nMissing values per time step:")
        for t, count in enumerate(missing_per_time):
            if count > 0:
                print(f"Time step {t}: {count} missing values")
        
        # 检查缺失值的空间分布
        missing_per_location = np.isnan(array).sum(axis=0)
        print("\nMissing values per location:")
        print(f"Max missing count at a location: {np.max(missing_per_location)}")
        print(f"Locations with >50% missing: {np.sum(missing_per_location > array.shape[0]/2)}")

def handle_missing_values(data):
    """处理缺失值"""
    for name, array in data.items():
        missing_count = np.isnan(array).sum()
        if missing_count > 0:
            print(f"\nHandling missing values in {name}:")
            
            # 对于时间序列数据，使用前后时间步的平均值
            for t in range(array.shape[0]):
                for i in range(array.shape[1]):
                    for j in range(array.shape[2]):
                        if np.isnan(array[t, i, j]):
                            # 获取前后时间步的值
                            prev_val = array[t-1, i, j] if t > 0 else np.nan
                            next_val = array[t+1, i, j] if t < array.shape[0]-1 else np.nan
                            
                            # 如果前后都有值，取平均
                            if not np.isnan(prev_val) and not np.isnan(next_val):
                                array[t, i, j] = (prev_val + next_val) / 2
                            # 如果只有前一个值
                            elif not np.isnan(prev_val):
                                array[t, i, j] = prev_val
                            # 如果只有后一个值
                            elif not np.isnan(next_val):
                                array[t, i, j] = next_val
                            # 如果都没有，使用空间邻域的平均值
                            else:
                                neighbors = []
                                for di in [-1, 0, 1]:
                                    for dj in [-1, 0, 1]:
                                        if (0 <= i+di < array.shape[1] and 
                                            0 <= j+dj < array.shape[2] and 
                                            not np.isnan(array[t, i+di, j+dj])):
                                            neighbors.append(array[t, i+di, j+dj])
                                if neighbors:
                                    array[t, i, j] = np.mean(neighbors)
                                else:
                                    # 如果实在没有可用的值，使用全局平均值
                                    array[t, i, j] = np.nanmean(array)
            
            print(f"Missing values filled using temporal and spatial interpolation")

def normalize_data(data):
    """对每个变量进行标准化"""
    scalers = {}
    normalized_data = {}
    
    for name, array in data.items():
        # 重塑数据以适应StandardScaler
        original_shape = array.shape
        reshaped_data = array.reshape(-1, 1)
        
        # 创建并拟合scaler
        scaler = StandardScaler()
        normalized = scaler.fit_transform(reshaped_data)
        
        # 恢复原始形状
        normalized_data[name] = normalized.reshape(original_shape)
        scalers[name] = scaler
        
        print(f"\nNormalized {name}:")
        print(f"  Original mean: {scaler.mean_[0]:.2f}")
        print(f"  Original std: {scaler.scale_[0]:.2f}")
        print(f"  Normalized mean: {np.mean(normalized):.2f}")
        print(f"  Normalized std: {np.std(normalized):.2f}")
    
    return normalized_data, scalers

def visualize_data(data, name, time_step=0):
    """可视化数据分布"""
    plt.figure(figsize=(15, 5))
    
    # 原始数据分布
    plt.subplot(131)
    plt.hist(data[name][time_step].flatten(), bins=50)
    plt.title(f'Original {name} distribution')
    
    # 标准化后数据分布
    normalized_data, _ = normalize_data({name: data[name]})
    plt.subplot(132)
    plt.hist(normalized_data[name][time_step].flatten(), bins=50)
    plt.title(f'Normalized {name} distribution')
    
    # 空间分布
    plt.subplot(133)
    plt.imshow(data[name][time_step], cmap='viridis')
    plt.colorbar()
    plt.title(f'{name} spatial distribution')
    
    plt.tight_layout()
    plt.savefig(f'{name}_distribution.png')
    plt.close()

def save_processed_data(data, output_dir):
    """保存处理后的数据"""
    os.makedirs(output_dir, exist_ok=True)
    for name, array in data.items():
        np.save(os.path.join(output_dir, f'{name}_normalized.npy'), array)
        # 保存数据统计信息
        stats = {
            'mean': np.mean(array),
            'std': np.std(array),
            'min': np.min(array),
            'max': np.max(array)
        }
        np.save(os.path.join(output_dir, f'{name}_stats.npy'), stats)
    print(f"Processed data saved to {output_dir}")

def main():
    # 设置数据目录
    data_dir = '.'
    output_dir = 'processed_data'
    
    # 加载数据
    print("Loading data...")
    data = load_data(data_dir)
    
    # 分析缺失值
    print("\nAnalyzing missing values...")
    analyze_missing_values(data)
    
    # 处理缺失值
    print("\nHandling missing values...")
    handle_missing_values(data)
    
    # 数据可视化
    print("\nVisualizing data distributions...")
    for name in data.keys():
        visualize_data(data, name)
    
    # 数据标准化
    print("\nNormalizing data...")
    normalized_data, scalers = normalize_data(data)
    
    # 保存处理后的数据
    print("\nSaving processed data...")
    save_processed_data(normalized_data, output_dir)
    
    # 保存scaler以便后续使用
    np.save(os.path.join(output_dir, 'scalers.npy'), scalers)
    print("Data preprocessing completed!")

if __name__ == '__main__':
    main() 