import os
import yaml
from collections import defaultdict
from tqdm import tqdm
import numpy as np
import shutil
from ultralytics.data.dataset import YOLODataset

def _load_class_names(DATA_YAML_PATH):
    """
    从 DATA_YAML_PATH 的 .yaml 文件中加载类别名称。
    """
    class_names = {}
    try:
        with open(DATA_YAML_PATH, 'r', encoding='utf-8') as f:
            data_yaml = yaml.safe_load(f)
        
        names_list = data_yaml.get('names', [])
        if isinstance(names_list, dict):
            for idx, name in names_list.items():
                class_names[int(idx)] = name
        elif isinstance(names_list, list):
            for idx, name in enumerate(names_list):
                class_names[idx] = name
        
        if not class_names:
            print(f"警告: {DATA_YAML_PATH} 中未找到 'names' 字段或为空。将仅显示类别ID。")
        else:
            print(f"从 {DATA_YAML_PATH} 读取到 {len(class_names)} 个类别。")
            for idx in sorted(class_names.keys()):
                print(f"  ID {idx}: {class_names[idx]}")

    except FileNotFoundError:
        print(f"错误: 找不到 {DATA_YAML_PATH} 文件。请检查路径并确保文件存在。")
        exit()
    except yaml.YAMLError as e:
        print(f"错误: 解析 {DATA_YAML_PATH} 失败: {e}")
        exit()
    return class_names

def _get_subsets_to_scan(DATASET_ROOT, subsets_to_scan=None):
    """
    确定要扫描的数据集子集。
    """
    if subsets_to_scan is None:
        subsets_to_scan = ['train', 'val', 'test']
    
    test_labels_path = os.path.join(DATASET_ROOT, 'test', 'labels')
    test_images_path = os.path.join(DATASET_ROOT, 'test', 'images')
    
    if (os.path.exists(test_labels_path) or 
        (os.path.exists(test_images_path) and os.path.exists(test_labels_path))):
        if 'test' not in subsets_to_scan:
            subsets_to_scan.append('test')
    
    return subsets_to_scan

def _scan_subset(subset_name, DATASET_ROOT, subset_class_counts, total_class_counts, subset_image_counts):
    """
    扫描指定的数据集子集，并统计类别实例数量。
    """

    labels_dir = os.path.join(DATASET_ROOT, subset_name, 'labels')
    images_dir = os.path.join(DATASET_ROOT, subset_name, 'images')

    if not os.path.exists(labels_dir):
        print(f"警告: 找不到 {subset_name} 标签目录: {labels_dir}，跳过此子集。")
        return

    print(f"\n正在扫描 {subset_name} 标签文件...")
    
    if os.path.exists(images_dir):
        subset_image_counts[subset_name] = len([f for f in os.listdir(images_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp'))])

    label_files = [f for f in os.listdir(labels_dir) if f.endswith('.txt')]
    all_labels = []
    for label_filename in tqdm(label_files, desc=f"处理 {subset_name} 标签"):
        label_path = os.path.join(labels_dir, label_filename)
        label_info = {
            'filename': label_filename,
            'cls': []
        }
        try:
            with open(label_path, 'r') as f:
                for line in f:
                    parts = line.strip().split()
                    if parts:
                        try:
                            class_id = int(parts[0])
                            label_info['cls'].append(class_id)
                            subset_class_counts[subset_name][class_id] += 1
                            total_class_counts[class_id] += 1
                        except ValueError:
                            print(f"警告: 文件 {label_filename} 包含无效的类别ID: '{parts[0]}'")
                            continue
            all_labels.append(label_info)
        except Exception as e:
            print(f"错误: 读取标签文件 {label_path} 失败: {e}")
            continue
    return all_labels

# YOLOWeightedDataset 类，继承自 YOLODataset
# 平衡权重的核心代码
class YOLOWeightedDataset(YOLODataset):
    def __init__(self, labels, class_names, *args, mode="train", **kwargs):
        self.labels = labels
        self.data = {"names": class_names}
        self.train_mode = "train" in mode

        self.count_instances()
        class_weights = np.sum(self.counts) / self.counts
        self.agg_func = np.mean

        self.class_weights = np.array(class_weights)
        self.weights = self.calculate_weights()
        self.probabilities = self.calculate_probabilities()

    def count_instances(self):
        self.counts = [0 for i in range(len(self.data["names"]))]
        for label in self.labels:
            cls = np.array(label['cls']).astype(int)
            for id in cls:
                self.counts[id] += 1

        self.counts = np.array(self.counts)
        self.counts = np.where(self.counts == 0, 1, self.counts)

    def calculate_weights(self):
        weights = []
        for label in self.labels:
            cls = np.array(label['cls']).astype(int)

            if cls.size == 0:
                weights.append(1)
                continue

            weight = self.agg_func(self.class_weights[cls])
            weights.append(weight)
        return weights

    def calculate_probabilities(self):
        total_weight = sum(self.weights)
        probabilities = [w / total_weight for w in self.weights]
        return probabilities

    def sample_indices(self, num_samples):
        """
        样本加权采样，返回采样的索引列表。
        Args:
            num_samples (int): 要采样的样本数量。
        Returns:
            list: self.labels 的索引数组，如果有[0,1,2]三个label种类，则会返回例如[0,1,1,2,2,1,0]这样一个需要采样的数组
        """
        return np.random.choice(len(self.labels), size=num_samples, p=self.probabilities)

class DatasetBalancer:
    def __init__(self, dataset_root, data_yaml_path, output_dir, subset=['valid']):
        self.dataset_root = dataset_root
        self.data_yaml_path = data_yaml_path
        self.output_dir = output_dir
        self.subset = subset
        self.class_names = {}
        self.subsets_to_scan = []
        self.subset_class_counts = defaultdict(lambda: defaultdict(int))
        self.total_class_counts = defaultdict(int)
        self.subset_image_counts = defaultdict(int)
        self.all_subset_labels = {}
    
    def load_class_and_subset_info(self):
        self.class_names = _load_class_names(self.data_yaml_path)
        self.subsets_to_scan = _get_subsets_to_scan(self.dataset_root, subsets_to_scan=self.subset)

    def scan_dataset_subsets(self):
        for subset_name in self.subsets_to_scan:
            labels = _scan_subset(subset_name, self.dataset_root, self.subset_class_counts, self.total_class_counts, self.subset_image_counts)
            self.all_subset_labels[subset_name] = labels

    def calculate_num_samples(self):
        if self.total_class_counts:
            min_count = min(self.total_class_counts.values())
            num_classes = len(self.class_names)
            self.num_samples_total = min_count * num_classes
            print(f"类别数：{num_classes}，最小样本数为：{min_count}，因此将总计采样: {self.num_samples_total} 次")
        else:
            self.num_samples_total = 0

    def sample_and_copy_files(self):
        os.makedirs(self.output_dir, exist_ok=True)
        for subset_name in self.subsets_to_scan:
            subset_labels = self.all_subset_labels[subset_name]
            if not subset_labels:
                continue

            weighted_dataset = YOLOWeightedDataset(subset_labels, self.class_names, mode=subset_name)
            sampled_indices = weighted_dataset.sample_indices(self.num_samples_total)

            output_subset_dir = os.path.join(self.output_dir, subset_name)
            os.makedirs(output_subset_dir, exist_ok=True)
            os.makedirs(os.path.join(output_subset_dir, 'images'), exist_ok=True)
            os.makedirs(os.path.join(output_subset_dir, 'labels'), exist_ok=True)

            for idx in tqdm(sampled_indices, desc=f"从目录 {subset_name} 平衡采样"):
                label_info = subset_labels[idx]
                label_filename = label_info['filename']
                image_filename = os.path.splitext(label_filename)[0] + os.path.splitext(os.listdir(os.path.join(self.dataset_root, subset_name, 'images'))[0])[1]

                src_label_path = os.path.join(self.dataset_root, subset_name, 'labels', label_filename)
                dst_label_path = os.path.join(output_subset_dir, 'labels', label_filename)
                shutil.copyfile(src_label_path, dst_label_path)

                src_image_path = os.path.join(self.dataset_root, subset_name, 'images', image_filename)
                dst_image_path = os.path.join(output_subset_dir, 'images', image_filename)
                shutil.copyfile(src_image_path, dst_image_path)

    def run(self):
        class Colors:
            BLUE = '\033[92m'
            RESET = '\033[0m'
        print(f"\n\n{Colors.BLUE} *** 欢迎使用数据集类别数量分析工具 作者: marss ***{Colors.RESET}")
        print(f"\n -- 正在采样目录: {self.dataset_root} ---")

        # --- step1: 加载类别名称和子集信息 ---
        self.load_class_and_subset_info()

        # --- step2: 扫描数据集子集，获取各样本数量 ---
        self.scan_dataset_subsets()

        # --- step3: 计算需要平衡采样的总样本数 ---
        self.calculate_num_samples()

        # --- step4: 采样平衡样本并复制到新目录 ---
        self.sample_and_copy_files()

        print("\n样本平衡采样完成，平衡后的数据集已保存到: ", self.output_dir)


if __name__ == "__main__":
    balancer = DatasetBalancer(
        dataset_root='datasets',
        data_yaml_path='hhf.yaml',
        output_dir='datasets_balanced',
        subset=['train', 'valid', 'test']
    )
    balancer.run()