import cv2  # 导入cv2模块,用于读取和处理图像
import numpy as np  # 导入numpy模块,主要用于数值计算
import os  # 导入os模块,用于处理文件和目录
from os.path import exists  # 从os.path模块导入exists函数,用于检测文件或目录是否存在
from imutils import paths  # 导入imutils中的paths工具,用于获取文件路径
import pickle  # 导入pickle模块,用于序列化和反序列化Python对象结构
from tqdm import tqdm  # 导入tqdm模块,用于在循环中添加进度条
import zipfile
import io

import logging  # 用于记录日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # 配置logging

# 定义函数get_size,获取文件大小,返回文件大小的兆字节的浮点数表示
def get_size(file):
    """
    获取指定文件的大小（以MB为单位）
    
    参数:
    file (str): 文件的路径
    
    返回:
    float: 文件大小（MB）
    """
    size_bytes = os.path.getsize(file)
    return size_bytes / (1024.0 * 1024.0)

# 定义函数createXY,用于从图像创建特征(X)和标签(y)
def _infer_label_from_path(path: str) -> int:
    """从文件名或其父目录名推断标签：包含 'dog' 为 1，包含 'cat' 为 0。"""
    lower = path.replace("\\", "/").lower()
    base = os.path.basename(lower)
    name_wo_ext = os.path.splitext(base)[0]
    parts = lower.split('/')
    # 先看文件名前缀 cat./dog.
    if name_wo_ext.startswith('dog'):
        return 1
    if name_wo_ext.startswith('cat'):
        return 0
    # 再看路径层级中是否出现目录名 dog/cat
    if any(p == 'dog' or p.startswith('dog_') for p in parts):
        return 1
    if any(p == 'cat' or p.startswith('cat_') for p in parts):
        return 0
    # 默认按非 dog 处理为 cat=0
    return 0


def createXY(train_folder, dest_folder, method='vgg', batch_size=64):
    x_file_path = os.path.join(dest_folder, "X.pkl")  # 设置X文件的路径,用于保存特征数据
    y_file_path = os.path.join(dest_folder, "y.pkl")  # 设置y文件的路径,用于保存标签数据

    # 如果 X 和 y 已经存在，尝试读取；若为空或损坏则忽略缓存并重新构建
    if os.path.exists(x_file_path) and os.path.exists(y_file_path):
        logging.info("X和y已经存在，直接读取")  # 提示用户X和y文件已经存在
        x_size = get_size(x_file_path)
        y_size = get_size(y_file_path)
        logging.info(f"X文件大小:{x_size:.2f}MB")  # 打印X文件的大小
        logging.info(f"y文件大小:{y_size:.2f}MB")  # 打印y文件的大小
        try:
            if x_size > 0 and y_size > 0:
                with open(x_file_path, "rb") as fx:
                    X = pickle.load(fx)
                with open(y_file_path, "rb") as fy:
                    y = pickle.load(fy)
                if isinstance(X, np.ndarray) and isinstance(y, np.ndarray) and X.size > 0 and y.size > 0:
                    return X, y
                else:
                    logging.warning("检测到缓存 X/y 为空或异常，忽略并重新构建……")
            else:
                logging.warning("检测到缓存文件大小为 0，忽略并重新构建……")
        except Exception as e:
            logging.warning(f"读取缓存失败（将重新构建）：{e}")

    logging.info("读取所有图像，生成X和y")  # 提示用户开始读取图像并生成X和y

    X = []  # 初始化X列表,用于存储特征
    y = []  # 初始化y列表,用于存储标签
    
    # 根据传入的方法选择不同的模型
    if method == 'vgg':
        # 惰性导入，避免 flat 模式触发 TensorFlow 输出
        try:
            from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input  # type: ignore
            from tensorflow.keras.preprocessing import image as keras_image  # type: ignore
        except Exception as e:
            raise ImportError("使用 method='vgg' 需要安装 TensorFlow/Keras。建议在仅用 flat 时无需安装。") from e
        model = VGG16(weights='imagenet', include_top=False, pooling="max")  # 加载VGG16模型,不包括顶层,使用最大池化
        logging.info("完成构建 VGG16 模型")  # 提示用户VGG16模型构建完成
    elif method == 'flat':
        model = None  # 如果方法为'flat',不使用任何预训练模型

    # 分两种数据源：目录或ZIP
    if os.path.isfile(train_folder) and train_folder.lower().endswith(".zip"):
        with zipfile.ZipFile(train_folder, 'r') as zf:
            names = [n for n in zf.namelist()
                     if n.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp')) and not n.endswith('/')]
            num_batches = len(names) // batch_size + (1 if len(names) % batch_size else 0)
            for idx in tqdm(range(num_batches), desc="读取图像"):
                batch_images, batch_labels = [], []
                start, end = idx * batch_size, min((idx + 1) * batch_size, len(names))
                for i in range(start, end):
                    name = names[i]
                    data = zf.read(name)
                    if method == 'vgg':
                        from tensorflow.keras.preprocessing import image as keras_image  # type: ignore
                        img = keras_image.load_img(io.BytesIO(data), target_size=(224, 224))
                        img = keras_image.img_to_array(img)
                    else:
                        arr = np.frombuffer(data, dtype=np.uint8)
                        img = cv2.imdecode(arr, cv2.IMREAD_GRAYSCALE)
                        if img is None:
                            continue
                        img = cv2.resize(img, (32, 32))
                    batch_images.append(img)

                    label = _infer_label_from_path(name)
                    batch_labels.extend([label])

                if not batch_images:
                    continue
                batch_images = np.array(batch_images)
                if method == 'vgg':
                    from tensorflow.keras.applications.vgg16 import preprocess_input  # type: ignore
                    batch_images = preprocess_input(batch_images)
                    batch_pixels = model.predict(batch_images, verbose=0)
                else:
                    batch_pixels = batch_images.reshape(batch_images.shape[0], -1)

                X.extend(batch_pixels)
                y.extend(batch_labels)
    else:
        image_paths = list(paths.list_images(train_folder))  # 获取训练文件夹中所有图像的路径

        # 计算需要的批次数
        num_batches = len(image_paths) // batch_size + (1 if len(image_paths) % batch_size else 0)

        # 使用进度条对批次进行循环处理
        for idx in tqdm(range(num_batches), desc="读取图像"):
            batch_images = []  # 初始化存储批次图像的列表
            batch_labels = []  # 初始化存储批次标签的列表
            
            start = idx * batch_size  # 计算批次开始的索引
            end = min((idx + 1) * batch_size, len(image_paths))  # 计算批次结束的索引

            # 对于每个批次中的图像
            for i in range(start, end):
                image_path = image_paths[i]  # 获取图像路径
                if method == 'vgg':
                    from tensorflow.keras.preprocessing import image as keras_image  # type: ignore
                    img = keras_image.load_img(image_path, target_size=(224, 224))  # 加载图像并调整大小到224x224
                    img = keras_image.img_to_array(img)  # 将图像转换为数组
                elif method == 'flat':
                    img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)  # 以灰度模式读取图像
                    img = cv2.resize(img, (32, 32))  # 调整图像大小到32x32
                batch_images.append(img)  # 将图像数组添加到批次图像列表
                
                label = _infer_label_from_path(image_path)
                batch_labels.extend([label])  # 将标签添加到批次标签列表

            batch_images = np.array(batch_images)  # 将批次图像列表转换为numpy数组
            if method == 'vgg':
                batch_images = preprocess_input(batch_images)  # 对批次图像进行预处理
                batch_pixels = model.predict(batch_images, verbose=0)  # 使用VGG16模型进行预测
            else:
                batch_pixels = batch_images.reshape(batch_images.shape[0], -1)  # 将 batch_images 展平

            X.extend(batch_pixels)  # 将处理后的图像特征添加到X列表
            y.extend(batch_labels)  # 将标签添加到y列表

    logging.info(f"X.shape: {np.shape(X)}")  # 打印X的形状
    logging.info(f"y.shape: {np.shape(y)}")  # 打印y的形状
    if len(X) == 0 or len(y) == 0:
        logging.warning("未从数据源读取到任何有效图像，请检查路径与文件命名（cat.* / dog.*）。")
        
    # 将X和y分别序列化到文件
    os.makedirs(dest_folder, exist_ok=True)
    X = np.array(X)
    y = np.array(y)
    with open(x_file_path, "wb") as fx:
        pickle.dump(X, fx, protocol=pickle.HIGHEST_PROTOCOL)
    with open(y_file_path, "wb") as fy:
        pickle.dump(y, fy, protocol=pickle.HIGHEST_PROTOCOL)

    return X, y  # 返回构建的X和y

