import os
import xml.etree.ElementTree as ET
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers, models
from sklearn.utils import shuffle

# 设置路径
annotations_path = 'F:/OneDrive/Desktop/Annotations'
images_path = 'F:/baiduwangpan/10.21PIC'

# 解析XML文件以获取标签
def parse_xml(xml_file):
    tree = ET.parse(xml_file)
    root = tree.getroot()

    boxes = []
    for obj in root.iter('object'):
        label = obj.find('name').text
        bbox = obj.find('bndbox')

        # 尝试将坐标解析为浮点数
        xmin = float(bbox.find('xmin').text)
        ymin = float(bbox.find('ymin').text)
        xmax = float(bbox.find('xmax').text)
        ymax = float(bbox.find('ymax').text)

        # 将浮点数转换为整数
        boxes.append((label, int(xmin), int(ymin), int(xmax), int(ymax)))

    return boxes

# 加载数据集
def load_data(annotations_path, images_path):
    X = []
    y = []

    # 遍历可能的图像编号
    for i in range(1, 286):  # 假设我们有001到285
        xml_file = os.path.join(annotations_path, f"{i:03d}.xml")
        if not os.path.exists(xml_file):
            continue
        boxes = parse_xml(xml_file)

        image_file = os.path.join(images_path, f"{i:03d}.jpg")
        if not os.path.exists(image_file):
            continue

        image = cv2.imread(image_file)
        image = cv2.resize(image, (224, 224))  # 根据需要调整大小
        X.append(image)

        # 只添加第一个对象作为标签（假设每张图只有一个标签）
        if boxes:
            label, xmin, ymin, xmax, ymax = boxes[0]  # 取第一个边界框的标签
            y.append((label, xmin, ymin, xmax, ymax))  # 添加标签和边界框

    return np.array(X), y

# 创建模型
def create_model(num_classes):
    model = models.Sequential()
    model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(224, 224, 3)))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(num_classes, activation='softmax'))
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    return model

# 训练模型
def train_model(X, y):
    # 将标签转为适合模型格式
    labels = [label for label, _, _, _, _ in y]
    unique_labels = list(set(labels))
    label_to_index = {label: idx for idx, label in enumerate(unique_labels)}
    y_labels = np.array([label_to_index[label] for label, _, _, _, _ in y])

    # 打乱 X 和 y_labels 的顺序
    X, y_labels = shuffle(X, y_labels, random_state=42)

    # 归一化
    X = X / 255.0

    # 使用ImageDataGenerator进行数据增强
    datagen = ImageDataGenerator(
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True,
        vertical_flip=True,
        validation_split=0.2
    )

    # 确保数据量相匹配
    assert len(X) == len(y_labels), "X and y_labels must have the same length"

    # 创建模型
    model = create_model(len(unique_labels))

    # 学习率调度
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.0001)

    model.fit(datagen.flow(X, y_labels, subset='training', batch_size=32),
              validation_data=datagen.flow(X, y_labels, subset='validation'),
              epochs=50,
              callbacks=[reduce_lr])
    return model, unique_labels

# 主程序
if __name__ == '__main__':
    X, y = load_data(annotations_path, images_path)
    model, unique_labels = train_model(X, y)

    # 评估新的报纸图片
    for i in range(1, 286):  # 001.jpg到285.jpg
        img_file = os.path.join(images_path, f"{i:03d}.jpg")
        if not os.path.exists(img_file):
            continue

        img = cv2.imread(img_file)
        img = cv2.resize(img, (224, 224))  # 根据模型输入要求
        img = img / 255.0  # 归一化
        img = np.expand_dims(img, axis=0)  # 增加batch维度

        predictions = model.predict(img)
        predicted_label_idx = np.argmax(predictions)
        predicted_label = unique_labels[predicted_label_idx]

        print(f"Predictions for {i:03d}.jpg: {predicted_label} (Probability: {np.max(predictions):.4f})")