import os

import numpy as np
import pandas as pd
import torch
from PIL import Image
from sklearn.metrics import classification_report
from torch.utils import data
from torchvision import transforms
from tqdm import tqdm

from zsl_ma.models.CNN import create_resnet, CNN
from zsl_ma.models.VAE import Encoder
from zsl_ma.models.projection import FeatureProjectionModel, AttributeProjectionModel
from zsl_ma.tools.plot import npy_dim_reduction_visualization, visualize_mean_features, visualize_features, \
    visualize_multi_features, plot_confusion_matrix, visualize_from_csv
from zsl_ma.tools.predict_untils import euclidean_predict, cls_predict, extract_features
from zsl_ma.models.DisentangledModel import DisentangledModel
from zsl_ma.tools.predict_untils import disent_predict
from zsl_ma.tools.tool import get_device, extract_class_features, save_class_mean_features, concat_fault_features, \
    generate_image_dataframe, write_list_to_file
from zsl_ma.train.train_disent import process_and_save_disent

# -------------------------- 1. 基础配置（路径/预处理/设备）--------------------------

data_dir = r'D:\Code\2-ZSL\0-data\HOB\HOB'
save_dir = r'D:\Code\2-ZSL\1-output\论文实验结果\H01\exp-2'
# type_feat = np.load(os.path.join(save_dir, 'attributes', 'overall_feature_extraction', 'val-Operating Condition.npy'), allow_pickle=True)
# degree_feat = np.load(os.path.join(save_dir, 'attributes', 'overall_feature_extraction', 'val-Fault Location.npy'), allow_pickle=True)
# combined_feat = np.concatenate([type_feat, degree_feat], axis=1)
# np.save(os.path.join(save_dir, 'attributes', 'em', '验证集拼接.npy'), combined_feat)

transform = transforms.Compose([
    transforms.Resize((64, 64)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

device = get_device()  # 自动获取GPU/CPU
# numpy_data = np.load(os.path.join(save_dir, "attributes", "overall_feature_extraction", 'val-Fault Location.npy'))
# model  = AttributeProjectionModel(512, num_classes=4)
# model.load_state_dict(torch.load(os.path.join(save_dir, "checkpoints", "semantic_projection.pth")))
# model = model.to(device)
# tensor_data = torch.from_numpy(numpy_data).float()
# dataset = data.TensorDataset(tensor_data)
# data_loader = data.DataLoader(
#         dataset,
#         batch_size=1000,
#         shuffle=False,  # 推理时通常不打乱数据
#     )
# all_outputs = []
# with torch.no_grad():  # 推理时不需要计算梯度
#     for batch in data_loader:
#         inputs = batch[0]  # 获取批次数据
#         inputs = inputs.to(device)
#         outputs,_ = model(inputs)  # 模型前向传播
#         all_outputs.append(outputs)
# final_output = torch.cat(all_outputs, dim=0)
# np.save(os.path.join(save_dir, "attributes", "em", '验证集公共空间语义嵌入.npy'), final_output.detach().cpu().numpy())
# model = DisentangledModel([2, 4], 512)
# model.load_state_dict(torch.load(os.path.join(save_dir, 'checkpoints','disent.pth')))
# model.to(device)
# model = Encoder()
# model.load_state_dict(torch.load(os.path.join(save_dir, 'checkpoints','encoder.pth')))
# model = model.to(device)
# model = create_resnet(weight_path=os.path.join(save_dir, 'checkpoints', 'cnn.pth'))
# model = FeatureProjectionModel(embed_dim=512)
# model.load_state_dict(torch.load(os.path.join(save_dir, 'checkpoints', 'feature_projection.pth')))
# from torchvision.models.feature_extraction import create_feature_extractor
# target_layers = {
#     'avgpool': 'output',
# }
# model = create_feature_extractor(model, return_nodes=target_layers)
# model.to(device)
# model.eval()
# # --------------------- 1. 新增：定义钩子函数和存储容器 ---------------------
# # 存储每个批次的shared_backbone输出
# shared_backbone_outputs_total = []
# # 临时存储单批次的shared_backbone输出（每次前向传播后清空，避免数据叠加）
# shared_backbone_batch = []
#
# # 钩子函数：捕获shared_backbone的输出并存入临时容器
# def hook_fn(module, input, output):
#     shared_backbone_batch.append(output)  # 保存当前批次的shared_backbone输出
#
# test_list = ['验证集解耦']
# for test in tqdm(test_list):
#     df = pd.read_csv(os.path.join(save_dir, f'{test}.csv'))
#     total_samples = len(df)
#     all_outputs = []
#     batch_size = 512
#
#     # --------------------- 2. 新增：注册钩子（循环内、前向传播前注册） ---------------------
#     # 为model.shared_backbone注册正向钩子，返回钩子对象（后续需移除）
#     hook = model.shared_backbone.register_forward_hook(hook_fn)
#
#     for batch_start in tqdm(range(0, total_samples, batch_size)):
#         batch_end = min(batch_start + batch_size, total_samples)
#         batch_df = df.iloc[batch_start:batch_end]
#
#         # 1. 读取并预处理图片：保持原逻辑
#         batch_imgs = []
#         for img_path in batch_df["图片路径"].tolist():
#             img_pil = Image.open(img_path).convert("RGB")
#             img_tensor = transform(img_pil)
#             batch_imgs.append(img_tensor)
#         batch_tensor = torch.stack(batch_imgs, dim=0).to(device)  # shape: [batch_size, C, H, W]
#
#         # --------------------- 3. 新增：清空单批次临时容器（避免上一批次数据残留） ---------------------
#         shared_backbone_batch.clear()
#
#         # 2. 模型前向传播：保持原逻辑（前向传播时会触发钩子，捕获shared_backbone输出）
#         with torch.no_grad():
#             outputs = model(batch_tensor)  # 触发钩子，shared_backbone输出存入shared_backbone_batch
#             all_outputs.append(outputs)
#
#             # --------------------- 4. 新增：提取当前批次的shared_backbone输出，存入总容器 ---------------------
#             # shared_backbone_batch[0] 即为当前批次的输出（钩子单次调用存1个结果）
#             batch_shared_output = shared_backbone_batch[0].flatten(start_dim=1, end_dim=-1).detach().cpu()  # 转移到CPU，脱离计算图
#             shared_backbone_outputs_total.append(batch_shared_output)
#
#     # --------------------- 5. 新增：处理并保存shared_backbone的最终输出 ---------------------
#     # 拼接所有批次的shared_backbone输出
#     final_shared_backbone_output = torch.cat(shared_backbone_outputs_total, dim=0).numpy()
#     # 保存shared_backbone输出（路径可根据需求调整）
#     np.save(os.path.join(save_dir, "attributes", "em", f'验证集共享卷积层特征.npy'), final_shared_backbone_output)
#
#     # --------------------- 6. 新增：移除钩子（循环结束后必做，避免内存泄漏） ---------------------
#     hook.remove()



# test_list = ['1HP-预测结果']
# for test in tqdm(test_list):
#     df = pd.read_csv(os.path.join(save_dir, f'{test}.csv'))
#     total_samples = len(df)
#     all_outputs = []
#     batch_size = 512
#     for batch_start in tqdm(range(0, total_samples, batch_size)):
#         batch_end = min(batch_start + batch_size, total_samples)
#         batch_df = df.iloc[batch_start:batch_end]
#
#         # 1. 读取并预处理图片：保持原逻辑
#         batch_imgs = []
#         for img_path in batch_df["图片路径"].tolist():
#             img_pil = Image.open(img_path).convert("RGB")
#             img_tensor = transform(img_pil)
#             batch_imgs.append(img_tensor)
#         batch_tensor = torch.stack(batch_imgs, dim=0).to(device)  # shape: [batch_size, C, H, W]
#
#         # 2. 模型前向传播：保持原逻辑（获取图片特征）
#         with torch.no_grad():
#             outputs = model(batch_tensor)  # shape: [batch_size, feature_dim]
#             # all_outputs.append(outputs['output'].flatten(start_dim=1, end_dim=-1))
#             all_outputs.append(outputs)
#     final_output = torch.cat(all_outputs, dim=0)
#     final_output = final_output.detach().cpu().numpy()
#     np.save(os.path.join(save_dir, "attributes", "em", f'不可见类-特征嵌入.npy'), final_output)
# model = CNN(4)
# model.load_state_dict(torch.load(os.path.join(save_dir, 'checkpoints', 'cnn.pth')))
# from torchvision.models.feature_extraction import create_feature_extractor
# target_layers = {
#     'shared_backbone': 'output',
# }
# model = create_feature_extractor(model, return_nodes=target_layers).to(device)
# model = model.to(device)
# image_df, maper = generate_image_dataframe(
#         data_dir, 'train',
#         os.path.join(data_dir, 'unseen_classes.txt'), os.path.join(data_dir,'factor_index_map.txt'),
#         ignore_factors=None,
#         need_parse_factors=True
#     )
# image_df.to_csv(os.path.join(save_dir, 'unseen.csv'), index=False, encoding='utf-8-sig')
# test_list = ['unseen_classes']
# for test_img in test_list:
#     df, metrics = euclidean_predict(
#         model,
#         data_dir,
#         os.path.join(save_dir, 'attributes', 'semantic_embed'),
#         os.path.join(data_dir, f'{test_img}.txt'),
#         os.path.join(data_dir, 'factor_index_map.txt'),
#         device,
#         transform,
#         subdir='val',
#         ignore_factors=['Operating Condition'],
#         batch_size=1000
#     )
#     result_path = os.path.join(save_dir, f'{test_img}-预测结果.csv')
#     df.to_csv(result_path, index=False, encoding='utf-8-sig')
#     print(metrics)

# visualize_multi_features([os.path.join(save_dir, 'attributes', 'em', '训练集语义嵌入.npy'),
#     os.path.join(save_dir, 'attributes', 'em', f'不可见类-特征嵌入.npy')],
#     [os.path.join(save_dir, f'训练集解耦.csv'),
#         os.path.join(save_dir, f'不可见类预测结果.csv')],
#     ['Fault Location','类别名称'],  # 'Fault Location',
#  ['o', 's'],
#     os.path.join(save_dir, 'images', f'不可见类公共空间嵌入可视化-1.jpg'),
#                          os.path.join(save_dir, 'images',f'不可见类公共空间嵌入可视化-1.csv' ),
#     ['centers_only','original']  # 'centers_only',
# )
visualize_from_csv(os.path.join(save_dir, 'images',f'不可见类公共空间嵌入可视化-1.csv'),
                   os.path.join(save_dir, 'images',f'不可见类公共空间嵌入可视化-1.jpg'))
# visualize_multi_features([os.path.join(save_dir, 'attributes', 'overall_feature_extraction', 'val-Operating Condition.npy')],
#     [os.path.join(save_dir, f'val_disent.csv')],
#     ['Operating Condition'],  # 'Fault Location',
#  ['o'],
#     os.path.join(save_dir, 'images', f'验证集工况解耦可视化.jpg'),
#     ['combined']
# )


# cls_predict(model=model,
#             data_dir=data_dir,
#             test_image_class=os.path.join(data_dir, f'unseen_classes.txt'),
#                 device=device,
#                 transform=transform,
#                 image_subdir='val',
#                 batch_size=1000,
#             factor_index_map_path=os.path.join(data_dir, f'factor_index_map.txt'),
#             ignore_factors=['Operating Condition'])
# image_df, feature_matrix=extract_features(model=model,
#                                           device=device,
#                                           transform=transform,
#                                           csv_path=os.path.join(data_dir, 'train_disent.csv'),
#                  )
# np.save(os.path.join(save_dir, '1HP-feature_matrix.npy'), feature_matrix)
# image_df.to_csv(os.path.join(save_dir, '1HP-feature_matrix.csv'), index=False, encoding='utf-8-sig')
# df, dis, features = disent_predict(
#         model=model,
#         data_dir=data_dir,
#         device=device,
#         transform=transform,
#         image_subdir='val',  # 区分训练/验证数据
#         class_list_path=r'D:\Code\2-ZSL\0-data\HOB\dataset\1HP.txt',
#         factor_index_map_path=r'D:\Code\2-ZSL\0-data\HOB\dataset\factor_index_map.txt',
#         ignore_factors=None,
#         batch_size=1000
#     )
#

# train_csv=process_and_save_disent(model, data_dir, device, transform, split='train',
#                         train_class=os.path.join(data_dir, 'seen_classes.txt'),
#                         factor_index_map_path=os.path.join(data_dir, 'factor_index_map.txt'),
#                         save_dir=save_dir,
#                         ignore_factors=None, batch_size=100)
#
# val_csv=process_and_save_disent(model, data_dir, device, transform, split='val',
#                         train_class=os.path.join(data_dir, 'seen_classes.txt'),
#                         factor_index_map_path=os.path.join(data_dir, 'factor_index_map.txt'),
#                         save_dir=save_dir,
#                         ignore_factors=None, batch_size=100)
# #
# # # -------------------------- 2. 定义全局路径常量（集中管理，避免重复拼接） --------------------------
# # 原始NPY特征文件根目录（train/val的类别NPY均存于此）
# OVERALL_FEAT_DIR = os.path.join(save_dir, "attributes", "overall_feature_extraction")
# # 提取后语义特征保存根目录（自动匹配train/val子目录）
# SEMANTIC_EMBED_ROOT = os.path.join(save_dir, "attributes", "train_semantic_embed")
# # 类别均值特征保存目录（save_class_mean_features的输出目录）
# AVG_DISENT_FEAT_DIR = os.path.join(save_dir, "attributes", "avg_disent_feats")
# # 特征拼接结果保存目录（concat_fault_features的输出目录）
# SEMANTIC_ATTR_SAVE_DIR = os.path.join(save_dir, "attributes", "semantic_attribute")
#
# # -------------------------- 3. 批量提取类别特征：调用 extract_class_features --------------------------
# # 功能：按CSV的类别列（Fault Location/Fault Size）划分NPY特征，保存到对应目录
# # 任务参数：(数据集划分, CSV路径, 类别列名)
# extract_tasks = [
#         # ("train", train_csv, 'Operating Condition'),
#         ("train", train_csv, "Fault Location"),
#         # ("train", train_csv, "Fault Size"),
#         # ("val", val_csv, 'Operating Condition'),
#         ("val", val_csv, "Fault Location"),
#         # ("val", val_csv, "Fault Size")
#     ]
#
# for split, csv_path, class_col in extract_tasks:
#     # 构建当前任务的NPY特征路径（格式：{划分}-{类别列名}.npy）
#     npy_file_path = os.path.join(OVERALL_FEAT_DIR, f"{split}-{class_col}.npy")
#     # 构建特征保存目录（格式：语义特征根目录/{划分}）
#     feat_save_dir = os.path.join(SEMANTIC_EMBED_ROOT, split, class_col)
#
#     # 调用特征提取函数
#     extract_class_features(
#         csv_path=csv_path,  # 对应数据集的类别CSV
#         npy_base_dir=npy_file_path,  # 原始NPY特征文件路径
#         name=class_col,  # 双重角色：CSV列名 + NPY文件名核心
#         save_dir=feat_save_dir  # 提取后特征的保存目录
#     )

# -------------------------- 4. 批量计算类别均值：调用 save_class_mean_features --------------------------
# 功能：针对val集NPY特征，按类别计算均值并保存
# 仅需遍历两个类别列名
# save_tasks = ["Fault Location", "Fault Size"]
# save_tasks = ["Fault Location"]
# train_csv = os.path.join(save_dir, 'train_disent.csv')
# for class_col in save_tasks:
#     # 构建val集当前类别的NPY路径（格式：val-{类别列名}.npy）
#     val_npy_path = os.path.join(OVERALL_FEAT_DIR, f"train-{class_col}.npy")
#
#     # 调用均值计算函数
#     save_class_mean_features(
#         encoding_path=val_npy_path,  # val集NPY特征（形状：[样本数, 特征维度]）
#         csv_path=train_csv,  # val集类别CSV
#         show_feature=class_col,  # 分类依据的类别列名
#         save_npy_path=AVG_DISENT_FEAT_DIR  # 均值特征保存目录
#     )
# FAULT_LOCATIONS = ["B007", "IR007", "OR007",
#                         "B014", "IR014", "OR014",
#                         "B021", "IR021", "OR021", "No"]
    # 故障尺寸：普通位置（B/IR/OR）对应3个尺寸，No位置对应1个尺寸（0inch）
    # FAULT_SIZES = {
    #     "B": ["007inch", "014inch", "021inch"],
    #     "IR": ["007inch", "014inch", "021inch"],
    #     "OR": ["007inch", "014inch", "021inch"],
    #     "No": ["0inch"]
    # }

    # 遍历所有故障位置+尺寸组合，批量拼接特征
# for loc in FAULT_LOCATIONS:
#     loc_feat_path = os.path.join(AVG_DISENT_FEAT_DIR, f"Fault Location_{loc}.npy")
#         # 调用特征拼接函数
#     concat_fault_features(
#             [loc_feat_path],  # 故障位置特征路径
#             SEMANTIC_ATTR_SAVE_DIR  # 拼接结果保存目录
# )
# import os
#
# from zsl_ma.tools.plot import visualize_mean_features
# save_dir = r'D:\Code\2-ZSL\1-output\特征解耦结果\HOB\exp-3'
# spilt = 'val'
# show_feature='Fault Location'
# visualize_features(os.path.join(save_dir, 'attributes', 'em', 'train-feature-em.npy'),
#                         os.path.join(save_dir, f'train_disent.csv'), 'Fault Location',
#                         os.path.join(save_dir, 'images',f'feature_projection_space.jpg'))
# npy_dim_reduction_visualization(r'D:\Code\2-ZSL\1-output\特征解耦结果\exp-4\attributes\semantic_embed', 'tsne')
# concat_fault_features([r'D:\Code\2-ZSL\1-output\特征解耦结果\exp-6\attributes\avg_disent_feats\Operating Condition_0HP.npy',
#     r'D:\Code\2-ZSL\1-output\特征解耦结果\exp-4\attributes\avg_disent_feats\Fault Location_B.npy',
#                        r'D:\Code\2-ZSL\1-output\特征解耦结果\exp-4\attributes\avg_disent_feats\Fault Size_007inch.npy'],
#                       r'D:\Code\2-ZSL\1-output\特征解耦结果\exp-6\attributes\semantic_attribute')
# visualize_mean_features(os.path.join(save_dir, 'attributes', 'overall_feature_extraction','val-Operating Condition.npy'),
#                    os.path.join(save_dir, '验证集解耦.csv'),
#                    'Operating Condition',
#                    os.path.join(save_dir, 'images', '验证集-故障工况可视化.jpg'))
# save_dir = '/data/coding/output/T06/1HP-预测结果.csv'
# df = pd.read_csv(os.path.join(save_dir, 'unseen_classes-预测结果.csv'))
# y_ture = df['标注类别ID'].tolist()
# y_pred = df['类别预测ID'].tolist()
# classes = df['标注类别ID'].drop_duplicates().tolist()
# plot_confusion_matrix(y_true=y_ture, y_pred=y_pred, classes=classes,
#                       save_path=os.path.join(save_dir, 'images', '1-confusion_matrix.jpg'), title=None)
# res = classification_report(y_ture, y_pred, digits=4)
# print(res)

# 1. 读取CSV文件（替换为你的CSV路径）
# input_csv = r"D:\Code\2-ZSL\1-output\论文实验结果\H01\exp-2\1HP-预测结果.csv"  # 输入CSV路径
# df = pd.read_csv(input_csv)
#
# # 2. 定义映射关系（现有列值 -> 新列值）
# mapping = {
#     "1-BackWear": "1-Flank wear",
#     "1-FrontWear": "1-Rake face wear",
#     # "0-No": "0-Normal",
# "1-No": "1-Normal",
#     "1-ToothFracture": "1-Tooth breakage"
# }
#
# # 3. 新增列（替换"现有列名"为实际的列名，如"缺陷类型"）
# # 假设现有列的列名为"OriginalType"，新列名为"TranslatedType"
# existing_col = "标注类别名称"  # 现有列的列名（必须替换为你的实际列名）
# new_col = "类别名称"           # 新列的列名（可自定义）
#
# # 根据现有列的值映射生成新列
# df[new_col] = df[existing_col].map(mapping)
#
# # 4. 保存修改后的CSV（可覆盖原文件或保存为新文件）
# output_csv = r"D:\Code\2-ZSL\1-output\论文实验结果\H01\exp-2\不可见类预测结果.csv"  # 输出CSV路径（如需覆盖原文件，可设为input_csv）
# df.to_csv(output_csv, index=False, encoding='utf-8-sig')  # index=False 不保存索引列
#
# print(f"处理完成！已在{output_csv}中新增列'{new_col}'")