import argparse
import os
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from typing import Union, Optional, List

import numpy as np
import pandas as pd
import torch
from PIL import Image
from sklearn.metrics import classification_report
from torchvision import transforms
from tqdm import tqdm

from yms_zsl.models.HSAZLM import FCNN
from yms_zsl.tools.tools import generate_images_dataframe, get_device


def write_classification_report_to_txt(
        report_str: str,
        output_dir: str,
        encoding: str = "utf-8",
        append: bool = False,
        add_context: Optional[dict] = None
):
    """
    将字符串格式的分类报告写入TXT文件

    参数:
        report_str: 分类报告字符串（如sklearn输出的classification_report结果）
        output_dir: 输出目录路径（不存在会自动创建）
        filename: 输出文件名（默认"classification_report.txt"）
        encoding: 文件编码（默认"utf-8"，避免中文乱码）
        append: 是否追加模式（True=在文件末尾追加，False=覆盖文件，默认False）
        add_context: 额外上下文信息（如实验配置、时间戳），字典格式，会添加到报告开头

    返回:
        完整的TXT文件路径（方便后续打印或使用）
    """

    # 3. 处理额外上下文（如时间戳、实验配置）
    context_str = ""
    if add_context and isinstance(add_context, dict):
        # 添加时间戳
        current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        context_str += f"=== 分类报告生成时间: {current_time} ===\n"
        # 添加自定义上下文（如数据集、模型、参数等）
        for key, value in add_context.items():
            context_str += f"{key}: {value}\n"
        # 分隔线（增强可读性）
        context_str += "-" * 80 + "\n\n"

    # 4. 写入文件
    mode = "a" if append else "w"  # "a"=追加，"w"=覆盖
    with open(output_dir, mode, encoding=encoding) as f:
        f.write(context_str)  # 先写上下文
        f.write(report_str)  # 再写分类报告
        f.write("\n\n")  # 结尾留空，方便多次追加时区分

    print(f"✅ 分类报告已成功写入: {output_dir}")


def predict(configs):
    device = configs.device
    transform = configs.transform
    save_dir = configs.save_dir

    images_df = generate_images_dataframe(
        root_dir=configs.data_dir,
        image_subdir='val',
        class_list_path=configs.train_class,
        factor_index_map_path=configs.factor_index_map_path,
        ignore_factors=configs.ignore_factors
    )
    classes = images_df['新类别名'].drop_duplicates().tolist()
    total_samples = len(images_df)
    pred_labels_list: List[int] = []
    semantic_attributes = []
    for cls_name in classes:
        npy_path = os.path.join(save_dir, 'attributes', 'semantic_attribute', f"{cls_name}.npy")
        semantic_attributes.append(np.load(npy_path, allow_pickle=True))
    attributes = torch.tensor(np.array(semantic_attributes, dtype=np.float32), dtype=torch.float32).to(
        device)  # shape: [num_classes, feature_dim]
    batch_size = configs.batch_size
    model = FCNN(configs.fcnn_channels)
    model.load_state_dict(torch.load(os.path.join(save_dir, 'checkpoints', 'FCnn.pth')))
    model.to(device)
    for batch_start in tqdm(range(0, total_samples, batch_size)):
        batch_end = min(batch_start + batch_size, total_samples)
        batch_df = images_df.iloc[batch_start:batch_end]

        # 1. 读取并预处理图片：保持原逻辑
        batch_imgs = []
        for img_path in batch_df["图片路径"].tolist():
            img_pil = Image.open(img_path).convert("RGB")
            img_tensor = transform(img_pil)
            batch_imgs.append(img_tensor)
        batch_tensor = torch.stack(batch_imgs, dim=0).to(device)  # shape: [batch_size, C, H, W]

        # 2. 模型前向传播：保持原逻辑（获取图片特征）
        outputs = model(batch_tensor)  # shape: [batch_size, feature_dim]
        distances = torch.cdist(outputs, attributes, p=2)  # [batch_size, num_classes]
        # 找到每个样本的最小距离索引
        _, predicted = torch.min(distances, dim=1)
        pred_labels_list.extend(predicted.cpu().numpy())

    # 结果整理：保持原逻辑（拼接数据框+生成分类报告）
    pred_df = pd.DataFrame({"类别预测ID": pred_labels_list})
    result_df = pd.concat([images_df, pred_df], axis=1)

    # 计算分类报告（标注ID vs 预测ID）
    report_str = classification_report(
        images_df['标注类别ID'].values,
        pred_labels_list,
        target_names=classes,
        digits=4
    )

    write_classification_report_to_txt(
        report_str=report_str,
        output_dir=os.path.join(save_dir, 'classification_report.txt'),
    )

    print(report_str)
    result_df.to_csv(os.path.join(save_dir, 'classification_report.csv'), index=False, encoding='utf-8-sig')


def parse_args(args=None):
    parser = argparse.ArgumentParser()
    # parser.add_argument('--fcnn-dim', type=int, default=517)
    # parser.add_argument('--cnn-dim', type=int, default=4)
    parser.add_argument('--data_path', default=r'/data/coding/data')
    parser.add_argument('--fcnn_path', default=r'/data/coding/results/train_D0/models/best_fcnn.pt')
    parser.add_argument('--hsa_path', default=r'/data/coding/results/train_D0/HSA.mat')
    parser.add_argument('--feature_path', default=r'/data/coding/results/train_D0/models/feature_extractor.pt')
    parser.add_argument('--cnn_path', default=r'/data/coding/results/train_D0/models/best_cnn.pt')
    parser.add_argument('--output_dir', default=r'/data/coding/results/train_D0/predict')
    parser.add_argument('--batch_size', type=int, default=32)
    return parser.parse_args(args if args else [])


@dataclass
class PredictConfig:
    device: torch.device = field(
        default_factory=get_device,
        metadata={"desc": "训练使用的设备（CPU/GPU）"},
        repr=False
    )
    transform: transforms.Compose = field(
        default_factory=lambda: transforms.Compose([
            transforms.Resize((32, 32)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ]),
        metadata={"desc": "图像预处理变换管道"},
        repr=False
    )
    # -------------------------- 路径参数--------------------------
    data_dir: Union[str, Path] = r'D:\Code\2-ZSL\0-data\CWRU\dataset'  # 扩展为Union类型
    save_dir: Union[str, Path] = r'D:\Code\2-ZSL\1-output\特征解耦结果\exp-3'
    train_class: Union[str, Path] = r'D:\Code\2-ZSL\0-data\CWRU\dataset\seen_classes.txt'
    factor_index_map_path: Union[str, Path] = None
    ignore_factors: Optional[List[str]] = None
    batch_size: int = 128
    fcnn_channels: int = 518


if __name__ == '__main__':
    # opt = parse_args()
    # print(opt)
    # # os.makedirs(opt.output_dir, exist_ok=True)
    # main(opt)
    opts = PredictConfig(
        data_dir=r'D:\Code\2-ZSL\0-data\data\data',
        save_dir=r'D:\Code\2-ZSL\1-output\论文实验结果\对比方法\CWRU\HSAZLM\H06\exp-1',
        train_class=r'D:\Code\2-ZSL\0-data\data\data/unseen_classes.txt',
        factor_index_map_path=r'D:\Code\2-ZSL\0-data\data\data\factor_index_map.txt',
        ignore_factors=['Operating Condition']
    )
    predict(opts)
