import argparse
import os
import sys
from collections import defaultdict
from dataclasses import dataclass, field
from pathlib import Path
from typing import Union, Optional, List

import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from tqdm import tqdm

from yms_zsl.models.HSAZLM import Encoder
from yms_zsl.tools.tools import generate_images_dataframe, get_device


def create_encoder(encoder_path):
    """创建并加载预训练编码器"""
    encoder = Encoder()
    encoder.load_state_dict(torch.load(encoder_path, map_location='cpu', weights_only=True))
    return encoder


def decea_extract_features(encoder, loader, device):
    """从数据集中提取特征"""
    features = defaultdict(list)
    encoder.eval()
    with torch.no_grad():
        for images, labels in tqdm(loader, desc="Extracting features", colour='blue', file=sys.stdout):
            outputs = encoder(images.to(device))
            for label, feat in zip(labels.cpu().numpy(), outputs.cpu().numpy()):
                features[int(label)].append(feat)
    return features


def cnn_extract_features(encoder, loader, device):
    """从数据集中提取特征"""
    all_features = []
    all_labels = []
    encoder.eval()
    with torch.no_grad():
        for images, labels in tqdm(loader, desc="Extracting features", colour='blue', file=sys.stdout):
            outputs = encoder(images.to(device))
            # 将特征和标签转换为 numpy 数组
            output_features = outputs.cpu().numpy()
            output_labels = labels.cpu().numpy()
            # 将特征和标签添加到对应的列表中
            all_features.extend(output_features)
            all_labels.extend(output_labels)

    return all_features, all_labels


def calculate_label_means(features_dict):
    """计算每个类别的特征均值并保持原始顺序"""
    sorted_labels = sorted(features_dict.keys())
    return np.array([np.mean(features_dict[label], axis=0) for label in sorted_labels])


sa_matrix = {
        'B-007': np.array([1, 0, 0, 1, 0, 0]),
        'B-014': np.array([0, 1, 0, 1, 0, 0]),
        'B-021': np.array([0, 0, 1, 1, 0, 0]),
        'IR-007': np.array([1, 0, 0, 0, 1, 0]),
        'IR-014': np.array([0, 1, 0, 0, 1, 0]),
        'IR-021': np.array([0, 0, 1, 0, 1, 0]),
        'OR-007': np.array([1, 0, 0, 0, 0, 1]),
        'OR-014': np.array([0, 1, 0, 0, 0, 1]),
        'OR-021': np.array([0, 0, 1, 0, 0, 1]),
        'No-0': np.array([0, 0, 0, 0, 0, 0])
    }

def extract_features_by_class(
    images_df,
    encoder,
    transform,
    device,
    batch_size=32
) -> dict:
    """
    按“新类别名”逐类别提取特征，对每种类别特征求平均值，并转为NumPy格式
    返回：key=新类别名，value=该类别的平均特征（NumPy数组，shape: [feature_dim]）
    """
    # 1. 按“新类别名”分组，获取所有唯一类别（保证处理顺序稳定）
    unique_classes = images_df["新类别名"].unique()
    # 字典存储：类别名 → 该类别的平均特征（最终为NumPy格式）
    class_avg_features_np = defaultdict(np.ndarray)

    # 2. 逐类别处理
    for cls_name in tqdm(unique_classes, desc="按类别提取特征→求平均→转NumPy"):
        # 2.1 筛选当前类别的所有数据
        cls_df = images_df[images_df["新类别名"] == cls_name].reset_index(drop=True)
        cls_img_paths = cls_df["图片路径"].tolist()
        total_cls_imgs = len(cls_img_paths)
        if total_cls_imgs == 0:
            tqdm.write(f"⚠️  类别 {cls_name} 无有效图片，跳过")
            continue

        # 2.2 临时存储当前类别的所有批次特征（PyTorch张量）
        cls_batch_features = []

        # 2.3 当前类别内部按batch_size分批处理
        for batch_start in tqdm(
            range(0, total_cls_imgs, batch_size),
            desc=f"  类别{cls_name}（共{total_cls_imgs}张）",
            leave=False
        ):
            batch_end = min(batch_start + batch_size, total_cls_imgs)
            batch_img_paths = cls_img_paths[batch_start:batch_end]

            # 2.4 批量读取并预处理图片
            batch_imgs = []
            for img_path in batch_img_paths:
                try:
                    img_pil = Image.open(img_path).convert("RGB")
                    img_tensor = transform(img_pil)
                    batch_imgs.append(img_tensor)
                except Exception as e:
                    tqdm.write(f"❌ 读取图片 {img_path} 失败：{str(e)}，跳过该图片")
                    continue

            if len(batch_imgs) == 0:
                continue

            # 2.5 特征提取（PyTorch张量）
            batch_tensor = torch.stack(batch_imgs, dim=0).to(device)
            encoder.eval()
            with torch.no_grad():
                batch_features = encoder(batch_tensor)  # shape: [batch_size, feature_dim]

            # 移回CPU并暂存
            cls_batch_features.append(batch_features.cpu())

        # 2.6 检查是否有有效特征
        if not cls_batch_features:
            tqdm.write(f"⚠️  类别 {cls_name} 无有效特征，跳过")
            continue

        # 2.7 拼接特征→求平均（PyTorch张量）
        cls_all_features = torch.cat(cls_batch_features, dim=0)  # [num_valid_imgs, feature_dim]
        cls_avg_feature_tensor = torch.mean(cls_all_features, dim=0)  # [feature_dim]

        # 2.8 核心修改：PyTorch张量→NumPy数组
        cls_avg_feature_np = cls_avg_feature_tensor.numpy()  # 转换为NumPy格式
        class_avg_features_np[cls_name] = cls_avg_feature_np

    # 3. 返回“类别名→NumPy格式平均特征”的字典
    return class_avg_features_np


def merge_feature_with_sa(
        class_feature_dict: dict,
        sa_matrix: dict,
) -> dict:
    """
    将语义属性矩阵（sa_matrix）水平拼接至类别平均特征后
    逻辑：
    1. 智能匹配类别名（如“B-007英寸” → 匹配 sa_matrix 中的“B-007”）
    2. 校验特征和语义向量维度，确保可拼接
    3. 水平拼接（特征维度 + sa维度，如 512 + 6 = 518）

    参数:
        class_feature_dict: 类别-平均特征字典（NumPy格式，value shape: [feature_dim]）
        sa_matrix: 类别-语义属性字典（NumPy格式，value shape: [sa_dim]）
        sa_dim: 语义向量固定维度（用于校验）

    返回:
        class_feat_sa_dict: 类别-(特征+语义)拼接后的字典（NumPy格式）
    """
    # 初始化结果字典
    class_feat_sa_dict = defaultdict(np.ndarray)

    # 辅助函数：从“新类别名”中提取核心名称（匹配 sa_matrix 的 key）
    def extract_core_cls_name(full_cls_name: str) -> str:
        """
        示例：
        - 输入“B-007英寸” → 输出“B-007”
        - 输入“IR-014” → 输出“IR-014”
        - 输入“No-0_health” → 输出“No-0”
        """
        # 正则匹配 sa_matrix key 的格式（如 B-007、IR-014、No-0）
        import re
        pattern = r'(B-\d{3}|IR-\d{3}|OR-\d{3}|No-\d+)'  # 覆盖 sa_matrix 所有 key 格式
        match = re.search(pattern, full_cls_name)
        if match:
            return match.group(1)
        return full_cls_name  # 无匹配时返回原名称（用于后续警告）

    for full_cls_name, cls_avg_feat in class_feature_dict.items():
        core_cls_name = extract_core_cls_name(full_cls_name)
        sa_vec = sa_matrix[core_cls_name]
        merged_feat = np.hstack([cls_avg_feat, sa_vec])  # shape: [feature_dim + sa_dim]
        class_feat_sa_dict[full_cls_name] = merged_feat
    return class_feat_sa_dict




def semantics(configs):
    device = configs.device
    transform = configs.transform
    save_dir = configs.save_dir

    images_df = generate_images_dataframe(
        root_dir=configs.data_dir,
        image_subdir='train',
        class_list_path=configs.train_class,
        factor_index_map_path=configs.factor_index_map_path,
        ignore_factors=configs.ignore_factors
    )

    encoder = Encoder()
    encoder.load_state_dict(torch.load(os.path.join(configs.save_dir, 'checkpoints', 'encoder.pth'),
                                       map_location='cpu', weights_only=True))
    encoder.to(device)
    class_feature_dict = extract_features_by_class(
        images_df=images_df,
        encoder=encoder,
        transform=transform,
        device=device,
        batch_size=512  # 可根据GPU内存调整
    )
    class_feat_hsa_dict = merge_feature_with_sa(
        class_feature_dict=class_feature_dict,
        sa_matrix=sa_matrix,
    )
    for cls_name, feat in class_feat_hsa_dict.items():
        save_path = os.path.join(save_dir,'attributes', 'semantic_attribute',  f"{cls_name}.npy")
        np.save(save_path, feat)
        print(f"💾 已保存：{save_path}")


def parse_args(args=None):
    parser = argparse.ArgumentParser(description='Generate HSA matrix')
    parser.add_argument('--batch_size', type=int, default=100)
    parser.add_argument('--data_dir', default=r'/data/coding/CRWU/D0')
    parser.add_argument('--encoder_path', default='/data/coding/output/train_D0/models/decae.pt')
    parser.add_argument('--sa_path', default='/data/coding/CRWU/predicate-matrix-binary.txt')
    parser.add_argument('--save_path', default='/data/coding/output/train_D0')
    parser.add_argument('--feature_extractor_path',
                        default=r'/data/coding/output/train_D0/models/feature_extractor.pt')
    return parser.parse_args(args if args else [])

@dataclass
class SemanticsConfig:
    # 设备与数据变换（添加明确类型注解）
    device: torch.device = field(
        default_factory=get_device,  # 用default_factory延迟初始化，避免模块导入时执行
        metadata={"desc": "训练使用的设备（CPU/GPU）"},
        repr=False
    )
    transform: transforms.Compose = field(
        default_factory=lambda: transforms.Compose([  # 用default_factory创建Compose实例
            transforms.Resize((32, 32)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ]),
        metadata={"desc": "图像预处理变换管道"},
        repr=False
    )
    # -------------------------- 路径参数--------------------------
    data_dir: Union[str, Path] = r'D:\Code\2-ZSL\0-data\CWRU\dataset'  # 扩展为Union类型
    save_dir: Union[str, Path] = r'D:\Code\2-ZSL\1-output\特征解耦结果\exp-3'
    train_class: Union[str, Path] = r'D:\Code\2-ZSL\0-data\CWRU\dataset\seen_classes.txt'
    factor_index_map_path: Union[str, Path] = None
    ignore_factors: Optional[List[str]] = None

if __name__ == '__main__':
    opts = SemanticsConfig(
        data_dir=r'D:\Code\2-ZSL\0-data\data\data',
        save_dir=r'D:\Code\2-ZSL\1-output\论文实验结果\对比方法\CWRU\HSAZLM\H01\exp-2',
        train_class=r'D:\Code\2-ZSL\0-data\data\data/seen_classes.txt',
        factor_index_map_path=r'D:\Code\2-ZSL\0-data\data\data\factor_index_map.txt',
        ignore_factors=['Operating Condition']
    )
    semantics(opts)
