import numpy as np
from pathlib import Path
from PIL import Image
import torch
import torchvision.transforms as transforms
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score
from scipy import stats
import json
from typing import List, Tuple, Dict
import warnings
warnings.filterwarnings('ignore')

# 配置
DATA_ROOT = Path(r'D:/project/dataset/ay_net_dataset/US_dataset')
TRAIN_RATIO = 0.9  # 增加验证集比例从0.9到0.8
SEED = 1234
TRAIN_FILE = Path('train_files.txt').resolve()
VAL_FILE = Path('val_files.txt').resolve()
STATS_FILE = Path('split_statistics.json').resolve()
N_CLUSTERS = 8  # 减少聚类数量，避免过度分层
MIN_CLUSTER_SIZE = 5  # 增加最小聚类大小
USE_RANDOM_SPLIT = False  # 添加随机分割选项

def extract_comprehensive_features(image_path: Path) -> np.ndarray:
    """提取图像的综合特征向量"""
    try:
        img = Image.open(image_path)
        if img.mode != 'RGB':
            img = img.convert('RGB')
        
        # 转换为tensor
        img_tensor = transforms.ToTensor()(img)
        
        # 1. 基本统计特征
        mean_rgb = img_tensor.mean(dim=(1, 2))  # 每个通道的均值
        std_rgb = img_tensor.std(dim=(1, 2))    # 每个通道的标准差
        max_rgb = img_tensor.max(dim=2)[0].max(dim=1)[0]  # 每个通道的最大值
        min_rgb = img_tensor.min(dim=2)[0].min(dim=1)[0]  # 每个通道的最小值
        
        # 2. 直方图特征 (每个通道15个bin)
        hist_features = []
        for c in range(3):
            hist = torch.histc(img_tensor[c], bins=15, min=0, max=1)
            hist = hist / (hist.sum() + 1e-8)  # 归一化，避免除零
            hist_features.append(hist.numpy())
        hist_features = np.concatenate(hist_features)
        
        # 3. 纹理特征 (局部方差)
        kernel_size = 5
        if img_tensor.shape[1] >= kernel_size and img_tensor.shape[2] >= kernel_size:
            # 计算局部方差作为纹理特征
            unfold = torch.nn.Unfold(kernel_size=kernel_size, stride=kernel_size)
            patches = unfold(img_tensor.unsqueeze(0))  # [1, C*K*K, L]
            patch_vars = patches.var(dim=1)  # [1, L]
            texture_mean = patch_vars.mean().item()
            texture_std = patch_vars.std().item()
        else:
            texture_mean = texture_std = 0.0
        
        # 4. 空间特征 (图像尺寸比例)
        h, w = img_tensor.shape[1], img_tensor.shape[2]
        aspect_ratio = w / h
        log_area = np.log(h * w + 1)
        
        # 5. 颜色分布特征
        # RGB各通道间的相关性
        r, g, b = img_tensor[0].flatten(), img_tensor[1].flatten(), img_tensor[2].flatten()
        rg_corr = np.corrcoef(r.numpy(), g.numpy())[0, 1]
        rb_corr = np.corrcoef(r.numpy(), b.numpy())[0, 1]
        gb_corr = np.corrcoef(g.numpy(), b.numpy())[0, 1]
        
        # 处理NaN相关系数
        rg_corr = 0.0 if np.isnan(rg_corr) else rg_corr
        rb_corr = 0.0 if np.isnan(rb_corr) else rb_corr
        gb_corr = 0.0 if np.isnan(gb_corr) else gb_corr
        
        # 组合所有特征
        features = np.concatenate([
            mean_rgb.numpy(),      # 3个特征
            std_rgb.numpy(),       # 3个特征
            max_rgb.numpy(),       # 3个特征
            min_rgb.numpy(),       # 3个特征
            hist_features,         # 45个特征 (15*3)
            [texture_mean, texture_std],  # 2个特征
            [aspect_ratio, log_area],     # 2个特征
            [rg_corr, rb_corr, gb_corr]   # 3个特征
        ])
        
        return features
        
    except Exception as e:
        print(f"Error processing {image_path}: {e}")
        # 返回零特征向量作为备选
        return np.zeros(64)  # 总特征数量

def validate_distribution_similarity(train_features: np.ndarray, val_features: np.ndarray) -> Dict:
    """验证训练集和验证集的特征分布相似性"""
    similarity_metrics = {}
    
    for i in range(train_features.shape[1]):
        train_feature = train_features[:, i]
        val_feature = val_features[:, i]
        
        # Kolmogorov-Smirnov测试
        ks_stat, ks_p = stats.ks_2samp(train_feature, val_feature)
        
        # Mann-Whitney U测试
        mw_stat, mw_p = stats.mannwhitneyu(train_feature, val_feature, alternative='two-sided')
        
        similarity_metrics[f'feature_{i}'] = {
            'ks_statistic': float(ks_stat),
            'ks_p_value': float(ks_p),
            'mw_p_value': float(mw_p),
            'train_mean': float(train_feature.mean()),
            'val_mean': float(val_feature.mean()),
            'train_std': float(train_feature.std()),
            'val_std': float(val_feature.std())
        }
    
    # 计算总体相似性得分
    avg_ks_p = np.mean([m['ks_p_value'] for m in similarity_metrics.values()])
    avg_mw_p = np.mean([m['mw_p_value'] for m in similarity_metrics.values()])
    
    return {
        'feature_metrics': similarity_metrics,
        'overall_ks_p_value': float(avg_ks_p),
        'overall_mw_p_value': float(avg_mw_p),
        'distribution_similar': avg_ks_p > 0.05 and avg_mw_p > 0.05
    }

def random_stratified_split(stems: List[str], features: np.ndarray) -> Tuple[List[str], List[str]]:
    """简单的随机分层分割，避免过度聚类"""
    np.random.seed(SEED)
    
    # 按特征相似度排序
    feature_norms = np.linalg.norm(features, axis=1)
    sorted_indices = np.argsort(feature_norms)
    
    train_stems = []
    val_stems = []
    
    # 交替分配样本以保持分布平衡
    for i, idx in enumerate(sorted_indices):
        if i % 5 < 4:  # 4:1 ratio for 0.8 train ratio
            train_stems.append(stems[idx])
        else:
            val_stems.append(stems[idx])
    
    return train_stems, val_stems

def improved_stratified_split(stems: List[str], features: np.ndarray, clusters: np.ndarray, n_clusters: int) -> Tuple[List[str], List[str]]:
    """改进的分层分割，确保验证集质量"""
    train_stems = []
    val_stems = []
    
    # 计算全局特征中心
    global_center = features.mean(axis=0)
    
    for cluster_id in range(n_clusters):
        cluster_indices = np.where(clusters == cluster_id)[0]
        
        if len(cluster_indices) < MIN_CLUSTER_SIZE:
            # 小聚类全部分配给训练集
            train_stems.extend([stems[i] for i in cluster_indices])
            continue
        
        cluster_features = features[cluster_indices]
        cluster_stems = [stems[i] for i in cluster_indices]
        
        # 计算到全局中心的距离
        distances_to_center = np.linalg.norm(cluster_features - global_center, axis=1)
        
        # 按距离排序，确保验证集包含各种难度的样本
        sorted_indices = np.argsort(distances_to_center)
        
        n_val = max(1, int(len(cluster_indices) * (1 - TRAIN_RATIO)))
        n_train = len(cluster_indices) - n_val
        
        # 均匀分布验证集样本
        val_step = len(sorted_indices) // n_val if n_val > 0 else len(sorted_indices)
        val_indices = []
        train_indices = []
        
        for i, idx in enumerate(sorted_indices):
            if len(val_indices) < n_val and i % val_step == 0:
                val_indices.append(idx)
            else:
                train_indices.append(idx)
        
        # 如果验证集样本不够，从训练集末尾补充
        while len(val_indices) < n_val and train_indices:
            val_indices.append(train_indices.pop())
        
        train_stems.extend([cluster_stems[i] for i in train_indices])
        val_stems.extend([cluster_stems[i] for i in val_indices])
    
    return train_stems, val_stems

def create_balanced_split():
    """创建分布均衡的训练集和验证集划分"""
    print(f"Scanning directory: {DATA_ROOT}")
    
    # 收集所有数据文件
    data = []
    failed_files = []
    
    for gt_path in DATA_ROOT.glob('*_gt.png'):
        stem = gt_path.stem[:-3]  # 移除'_gt'
        try:
            features = extract_comprehensive_features(gt_path)
            data.append((stem, features))
        except Exception as e:
            failed_files.append(str(gt_path))
            print(f"Failed to process {gt_path}: {e}")
    
    if not data:
        print("Error: No valid files found. Check the DATA_ROOT path.")
        return
    
    print(f"Successfully processed {len(data)} samples, failed: {len(failed_files)}")
    
    # 提取特征矩阵
    stems = [item[0] for item in data]
    features = np.vstack([item[1] for item in data])
    
    # 计算全局特征统计
    global_feature_stats = {
        'overall_min_value': float(features.min()),
        'overall_max_value': float(features.max()),
        'overall_mean_value': float(features.mean()),
        'overall_std_value': float(features.std()),
        'feature_dimension': int(features.shape[1]),
        'min_values': features.min(axis=0).tolist(),
        'max_values': features.max(axis=0).tolist(),
        'mean_values': features.mean(axis=0).tolist(),
        'std_values': features.std(axis=0).tolist(),
        'feature_names': [
            'mean_r', 'mean_g', 'mean_b',
            'std_r', 'std_g', 'std_b', 
            'max_r', 'max_g', 'max_b',
            'min_r', 'min_g', 'min_b'
        ] + [f'hist_r_{i}' for i in range(15)] + [f'hist_g_{i}' for i in range(15)] + [f'hist_b_{i}' for i in range(15)] + [
            'texture_mean', 'texture_std',
            'aspect_ratio', 'log_area',
            'rg_corr', 'rb_corr', 'gb_corr'
        ]
    }
    
    # 标准化特征
    scaler = StandardScaler()
    features_scaled = scaler.fit_transform(features)
    
    if USE_RANDOM_SPLIT:
        print("Using random stratified split...")
        train_stems, val_stems = random_stratified_split(stems, features_scaled)
        best_n_clusters = 0
        best_score = 0
    else:
        # 寻找最优聚类数量
        best_score = -1
        best_n_clusters = N_CLUSTERS
        
        for n in range(max(3, N_CLUSTERS-3), min(len(data)//6, N_CLUSTERS+3)):
            if n >= len(data):
                continue
            kmeans = KMeans(n_clusters=n, random_state=SEED, n_init=10)
            cluster_labels = kmeans.fit_predict(features_scaled)
            score = silhouette_score(features_scaled, cluster_labels)
            
            if score > best_score:
                best_score = score
                best_n_clusters = n
        
        print(f"Optimal number of clusters: {best_n_clusters} (silhouette score: {best_score:.3f})")
        
        # 使用最优聚类数量进行聚类
        np.random.seed(SEED)
        kmeans = KMeans(n_clusters=best_n_clusters, random_state=SEED, n_init=20)
        clusters = kmeans.fit_predict(features_scaled)
        
        # 改进的分层聚类分割
        train_stems, val_stems = improved_stratified_split(stems, features_scaled, clusters, best_n_clusters)
    
    # 确保验证集有足够的样本
    min_val_samples = max(10, int(len(data) * 0.15))  # 至少15%作为验证集
    if len(val_stems) < min_val_samples:
        print(f"Warning: Validation set too small ({len(val_stems)}), adjusting...")
        # 从训练集移动一些样本到验证集
        additional_needed = min_val_samples - len(val_stems)
        np.random.seed(SEED)
        additional_samples = np.random.choice(len(train_stems), additional_needed, replace=False)
        
        for idx in sorted(additional_samples, reverse=True):
            val_stems.append(train_stems.pop(idx))
    
    # 随机打乱
    np.random.seed(SEED)
    np.random.shuffle(train_stems)
    np.random.shuffle(val_stems)
    
    # 验证分布相似性
    train_indices = [stems.index(s) for s in train_stems]
    val_indices = [stems.index(s) for s in val_stems]
    
    similarity_results = validate_distribution_similarity(
        features_scaled[train_indices], 
        features_scaled[val_indices]
    )
    
    # 保存文件列表
    try:
        with open(TRAIN_FILE, 'w', encoding='utf-8') as f:
            for stem in train_stems:
                abs_path = DATA_ROOT / f"{stem}"
                f.write(f"{abs_path}\n")
        print(f"Saved training files to: {TRAIN_FILE}")
        
        with open(VAL_FILE, 'w', encoding='utf-8') as f:
            for stem in val_stems:
                abs_path = DATA_ROOT / f"{stem}"
                f.write(f"{abs_path}\n")
        print(f"Saved validation files to: {VAL_FILE}")
        
    except Exception as e:
        print(f"Error saving file lists: {e}")
        return
    
    # 保存统计信息
    statistics = {
        'total_samples': len(data),
        'train_samples': len(train_stems),
        'val_samples': len(val_stems),
        'train_ratio': len(train_stems) / len(data),
        'failed_files': failed_files,
        'split_method': 'random_stratified' if USE_RANDOM_SPLIT else 'improved_clustering',
        'n_clusters_used': best_n_clusters,
        'silhouette_score': float(best_score),
        'global_feature_statistics': global_feature_stats,
        'distribution_similarity': {
            'feature_metrics': similarity_results['feature_metrics'],
            'overall_ks_p_value': float(similarity_results['overall_ks_p_value']),
            'overall_mw_p_value': float(similarity_results['overall_mw_p_value']),
            'distribution_similar': bool(similarity_results['distribution_similar'])
        }
    }
    
    try:
        with open(STATS_FILE, 'w', encoding='utf-8') as f:
            json.dump(statistics, f, indent=2, ensure_ascii=False)
        print(f"Saved statistics to: {STATS_FILE}")
        
    except Exception as e:
        print(f"Error saving statistics: {e}")
        return
    
    # 打印结果
    print(f"\nSplit complete:")
    print(f"- Training files: {len(train_stems)} ({len(train_stems)/len(data):.1%})")
    print(f"- Validation files: {len(val_stems)} ({len(val_stems)/len(data):.1%})")
    print(f"- Split method: {'Random stratified' if USE_RANDOM_SPLIT else 'Improved clustering'}")
    print(f"- Distribution similarity: {'✓' if similarity_results['distribution_similar'] else '✗'}")
    print(f"- Overall KS test p-value: {similarity_results['overall_ks_p_value']:.4f}")
    print(f"- Overall MW test p-value: {similarity_results['overall_mw_p_value']:.4f}")
    
    # 打印全局特征统计
    print(f"\nGlobal feature statistics:")
    print(f"- Feature vector dimension: {global_feature_stats['feature_dimension']}")
    print(f"- Overall min value: {global_feature_stats['overall_min_value']:.6f}")
    print(f"- Overall max value: {global_feature_stats['overall_max_value']:.6f}")
    print(f"- Overall mean value: {global_feature_stats['overall_mean_value']:.6f}")
    print(f"- Overall std value: {global_feature_stats['overall_std_value']:.6f}")
    
    # 聚类分布统计
    if not USE_RANDOM_SPLIT:
        print(f"\nCluster distribution:")
        for cluster_id in range(best_n_clusters):
            train_count = sum(1 for i in train_indices if clusters[i] == cluster_id)
            val_count = sum(1 for i in val_indices if clusters[i] == cluster_id)
            total_cluster = train_count + val_count
            
            if total_cluster > 0:
                print(f"Cluster {cluster_id:2d}: Total={total_cluster:3d}, "
                      f"Train={train_count:3d} ({train_count/total_cluster:.1%}), "
                      f"Val={val_count:3d} ({val_count/total_cluster:.1%})")
    
    print(f"\nStatistics saved to: {STATS_FILE}")
    print(f"\nRecommendations:")
    print(f"- Try setting USE_RANDOM_SPLIT=True for simpler splitting")
    print(f"- Consider using cross-validation if validation set is still problematic")
    print(f"- Monitor both train and validation metrics during training")

if __name__ == '__main__':
    create_balanced_split()