import os
import sys
from typing import Tuple, List, Any

import torch
import torchvision.models as models
import torchvision.transforms as transforms
from torch.nn import Sequential
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import numpy as np
from torchvision.models import ResNet
from tqdm import tqdm
import logging
import torchvision
import pandas as pd

# 日志设置
from util.file_util import get_static_file_path
from util.log_util import get_logger

get_logger(logging.INFO)

# 设备设置：有GPU用GPU，没有就用CPU
device = None

# 图像预处理：与ResNet预训练模型对齐
transform = None

# 自定义数据集
class PatchDataset(Dataset):
    def __init__(self, img_dir, transform=None):
        self.img_dir = img_dir
        self.files = [f for f in os.listdir(img_dir) if f.endswith('.png') or f.endswith('.jpg')]
        self.transform = transform

    def __len__(self):
        return len(self.files)

    def __getitem__(self, idx):
        img_name = self.files[idx]
        img_path = os.path.join(self.img_dir, img_name)
        image = Image.open(img_path).convert("RGB")
        if self.transform:
            image = self.transform(image)
        return image, img_name

# 获取某个svs文件下面所有patch的特征
def getAllFeatures(dataloader: DataLoader, feature_extractor: Sequential) -> Tuple[List[Any], List[Any]]:
    # 提取特征并保存为CSV和NPY
    all_features = []
    all_names = []

    # 提取并保存特征
    with torch.no_grad():
        for images, names in tqdm(dataloader):
            images = images.to(device)
            features = feature_extractor(images)  # 输出 shape: [batch_size, 2048, 1, 1]
            features = features.squeeze(-1).squeeze(-1).cpu().numpy()  # shape: [batch_size, 2048]
            # 收集所有特征用于CSV
            all_features.extend(features)
            all_names.extend(names)
    return all_names, all_features

def getResnetModel(resnetName: str) -> ResNet:
    resnet = None
    if resnetName == "18":
        resnet = models.resnet18(weights=None)  # 不加载权重
        state_dict = torch.load("../../static/model/resnet/resnet18.pth", map_location='cpu', weights_only=False)
        resnet.load_state_dict(state_dict)
    elif resnetName == "34":
        resnet = models.resnet34(weights=None)  # 不加载权重
        state_dict = torch.load("../../static/model/resnet/resnet34.pth", map_location='cpu', weights_only=False)
        resnet.load_state_dict(state_dict)
    elif resnetName == "50":
        resnet = models.resnet50(weights=None)  # 不加载权重
        state_dict = torch.load("../../static/model/resnet/resnet50.pth", map_location='cpu', weights_only=False)
        resnet.load_state_dict(state_dict)
    elif resnetName == "101":
        resnet = models.resnet101(weights=None)  # 不加载权重
        state_dict = torch.load("../../static/model/resnet/resnet101.pth", map_location='cpu', weights_only=False)
        resnet.load_state_dict(state_dict)
    elif resnetName == "152":
        resnet = models.resnet152(weights=None)  # 不加载权重
        state_dict = torch.load("../../static/model/resnet/resnet152.pth", map_location='cpu', weights_only=False)
        resnet.load_state_dict(state_dict)
    else:
        logging.error("renetName输入错误，请输入: 18 或者 34 或者 50 或者 101 或者152")
        sys.exit(1)  # 非0状态码表示异常退出
    return resnet


def start(path: str, resnetName: str):
    npy_dir = os.path.join(path, "npy_" + resnetName)  # npy特征文件的保存地址
    csv_dir = os.path.join(path, "csv_" + resnetName)  # csv特征文件的保存地址
    # 如果指定的文件夹不存在，就自动创建它；如果已经存在，就跳过，不报错。
    os.makedirs(npy_dir, exist_ok=True)
    # 如果指定的文件夹不存在，就自动创建它；如果已经存在，就跳过，不报错。
    os.makedirs(csv_dir, exist_ok=True)

    # 第一步：数据加载器
    # 创建了一个可以逐张读取和预处理 patch 图像的数据集。
    dataset = PatchDataset(path, transform)
    # DataLoader 来批量加载数据，每次从数据集中读取 32 张图像（组成一个 batch），shuffle=False 表示不打乱数据顺序。
    dataloader = DataLoader(dataset, batch_size=32, shuffle=False)

    # 第二步：加载ResNet50模型（去掉分类层，只保留特征提取部分，resnet提取的特征默认是2048维）
    # -----------  这里放开会从远程下载，下载到 C:\Users\17328\.cache\torch\hub\checkpoints
    # resnet50 = models.resnet50(weights=torchvision.models.ResNet50_Weights.IMAGENET1K_V1)
    # ------------   从本地加载模型结构
    # resnet50 = models.resnet50(weights=None)  # 不加载权重
    # state_dict = torch.load("D:/zm_scientific_research/resnet50-0676ba61.pth", map_location='cpu')
    # resnet50.load_state_dict(state_dict)

    # 通过 resnetName 指定加载的模型
    resnetModel = getResnetModel(resnetName)

    feature_extractor = torch.nn.Sequential(*list(resnetModel.children())[:-1])  # 去掉最后的fc层
    feature_extractor = feature_extractor.to(device)
    feature_extractor.eval()
    
    # 第三步：获取特征数据
    R = getAllFeatures(dataloader, feature_extractor)

    # 第四步：保存到npy文件
    saveNpyFile(R[0], R[1], npy_dir)

    # 第五步：保存到csv文件
    saveCSVFile(R[0], R[1], csv_dir)

def saveNpyFile(names: List, features: List, npy_dir: str):
    for i, name in enumerate(names):
        np.save(os.path.join(npy_dir, name.replace(".png", ".npy").replace(".jpg", ".npy")), features[i])
    logging.info("npy文件保存完成！")
    
def saveCSVFile(names: List, features: List, csv_dir: str):
    # 生成CSV文件
    df = pd.DataFrame(features, index=names)
    df.columns = [f"feature_{i}" for i in range(df.shape[1])]
    csv_path = os.path.join(csv_dir, "features.csv")
    df.to_csv(csv_path, index_label="image_name")
    logging.info("特征已保存为CSV文件: %s", csv_path)

# 目的：获取每个svs所有patch的特征
if __name__ == '__main__':
    # 第一步: GPU还是CPU全局变量赋值
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logging.info(f"Using device: {device}")

    # 指定使用什么resnet网络提取特征  18  34  50  101  152
    resnetName = "18"

    # 第二步：图像预处理：与ResNet预训练模型对齐
    transform = transforms.Compose([
        # 调整图像大小到 224x224
        transforms.Resize((224, 224)),
        # 将 PIL 图像或 numpy 数组转为 PyTorch Tensor，图像像素值从 [0, 255] 转换为 [0.0, 1.0] 的浮点数。
        transforms.ToTensor(),
        # 对每个颜色通道（R/G/B）进行标准化处理，使图像数据分布更接近训练 ResNet 的 ImageNet 数据分布，提高模型泛化能力。
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    # 得到目录下所有的文件夹
    listPath = get_static_file_path("D:\zm_scientific_research\supervised_cutting")
    for path in listPath:
        path = os.path.join(path, "valid")
        logging.info("提取 %s 文件的特征", path)
        start(path, resnetName)
