# coding=utf-8
# 图像分割
import os
# 完全禁用GPU以避免CUDA相关错误
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import random
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose
from tensorflow.keras.layers import MaxPooling2D, Cropping2D, Concatenate
from tensorflow.keras.layers import Lambda, Activation, BatchNormalization, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from PIL import ImageOps
from IPython.display import Image,display
import numpy as np

print("GPU已禁用，将使用CPU进行训练")

# 配置GPU内存增长
gpus = tf.config.list_physical_devices('GPU')
if gpus:
    try:
        # 设置GPU内存增长，避免一次性占用所有显存
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        print(f"检测到 {len(gpus)} 个GPU设备，已启用内存增长模式")
    except RuntimeError as e:
        print(f"GPU配置错误: {e}")
else:
    print("未检测到GPU设备，将使用CPU进行训练")

def downsampling_block(input_tensor, filters, dropout_rate=0.0):
    # 输入：input_tensor,通道数：filters
    # 卷积
    x = Conv2D(filters, kernel_size=(3, 3), padding='same', kernel_regularizer=l2(1e-4))(input_tensor)
    # BN
    x = BatchNormalization()(x)
    # 激活
    x = Activation('relu')(x)
    # Dropout
    if dropout_rate > 0:
        x = Dropout(dropout_rate)(x)
    # 卷积
    x = Conv2D(filters, kernel_size=(3, 3), padding='same', kernel_regularizer=l2(1e-4))(x)
    # BN
    x = BatchNormalization()(x)
    # 激活
    x = Activation('relu')(x)
    # Dropout
    if dropout_rate > 0:
        x = Dropout(dropout_rate)(x)
    # 返回
    return MaxPooling2D(pool_size=(2, 2))(x), x

def upsampling_block(input_tensor, skip_tensor, filters, dropout_rate=0.0):
    # input——tensor:输入特征图，skip_tensor:编码部分的特征图，filters:通道数
    # 反卷积
    x = Conv2DTranspose(filters, kernel_size=(2, 2), strides=(2, 2), padding='same')(input_tensor)
    # 尺寸
    _, x_height, x_width, _ = x.shape
    _, s_height, s_width, _ = skip_tensor.shape
    # 计算差异
    h_crop = s_height - x_height
    w_crop = s_width - x_width
    # 判断是否进行裁剪
    if h_crop == 0 and w_crop == 0:
        y = skip_tensor
    else:
        # 获取裁剪的大小
        cropping = ((h_crop // 2, h_crop - h_crop // 2), (w_crop // 2, w_crop - w_crop // 2))
        y = Cropping2D(cropping=cropping)(skip_tensor)
    # 特征融合
    x = Concatenate()([x, y])
    # 卷积
    x = Conv2D(filters, kernel_size=(3, 3), padding='same', kernel_regularizer=l2(1e-4))(x)
    # BN
    x = BatchNormalization()(x)
    # 激活层
    x = Activation('relu')(x)
    # Dropout
    if dropout_rate > 0:
        x = Dropout(dropout_rate)(x)
    # 卷积
    x = Conv2D(filters, kernel_size=(3, 3), padding='same', kernel_regularizer=l2(1e-4))(x)
    # BN
    x = BatchNormalization()(x)
    # 激活层
    x = Activation('relu')(x)
    # Dropout
    if dropout_rate > 0:
        x = Dropout(dropout_rate)(x)
    return x

def unet(imagesize, classes, features=64, depth=4, dropout_rate=0.1):
    # 定义输入
    inputs = keras.Input(shape=(imagesize+(3,)))
    x = inputs
    # 构建编码部分
    skips = []
    for i in range(depth):
        x, x0 = downsampling_block(x, features, dropout_rate)
        skips.append(x0)
        features *= 2
    
    # 中间层（瓶颈层）
    x = Conv2D(filters=features, kernel_size=(3,3), padding='same', kernel_regularizer=l2(1e-4))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(dropout_rate)(x)
    x = Conv2D(filters=features, kernel_size=(3,3), padding='same', kernel_regularizer=l2(1e-4))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Dropout(dropout_rate)(x)
    
    # 解码部分
    for i in reversed(range(depth)):
        features //= 2
        x = upsampling_block(x, skips[i], features, dropout_rate)
    
    # 1x1卷积
    x = Conv2D(filters=classes, kernel_size=(1,1), padding='same')(x)
    # 激活
    outputs = Activation('softmax')(x)
    return keras.Model(inputs=inputs, outputs=outputs)

def create_model(image_size, batch_size, num_classes):
    model = unet(image_size, num_classes, features=32, depth=4, dropout_rate=0.1)  # 减少初始特征数，增加深度
    #model.summary(    # 安全地尝试可视化模型结构，避免pydot相关错误
    try:
        # keras.utils.plot_model(model, to_file='model.png', show_shapes=True)
        pass
    except Exception as e:
        print(f"模型可视化跳过（pydot/graphviz相关错误）: {e}")
    return model

class OxfordPets(keras.utils.Sequence):
    # 初始化
    def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
        # 批次大小
        self.batch_size = batch_size
        # 图像大小
        self.img_size = img_size
        # 图像的路径
        self.input_img_paths = input_img_paths
        # 目标值路经
        self.target_img_paths = target_img_paths

    # 迭代次数
    def __len__(self):
        return len(self.target_img_paths)//self.batch_size

    # 获取batch数据
    def __getitem__(self, idx):
        # 获取该批次对应的样本的索引
        i = idx * self.batch_size
        # 获取该批次数据
        batch_input_img_paths = self.input_img_paths[i:i+self.batch_size]
        batch_target_img_paths = self.target_img_paths[i:i+self.batch_size]
        # 构建特征值
        x = np.zeros((self.batch_size,)+self.img_size+(3,),dtype="float32")
        for j,path in enumerate(batch_input_img_paths):
            img = load_img(path,target_size=self.img_size)
            x[j] = img
        # 构建目标值
        y = np.zeros((self.batch_size,)+self.img_size+(1,),dtype='uint8')
        for j,path in enumerate(batch_target_img_paths):
            img = load_img(path,target_size=self.img_size,color_mode='grayscale')
            y[j] = np.expand_dims(img,2)
        return x,y

def load_data(path,batch_size,img_size):
    # 图像路径
    input_dir = path + 'segdata/images/'
    input_img_path = sorted([os.path.join(input_dir, fname)
                             for fname in os.listdir(input_dir) if fname.endswith('.jpg')])
    # 标注信息
    target_dir = path+'segdata/annotations/trimaps/'
    target_img_path = sorted([os.path.join(target_dir, fname) for fname in os.listdir(
        target_dir) if fname.endswith('.png') and not fname.startswith('.')])
    # 验证集数量
    val_samples = 1000
    # 打乱
    random.Random(1337).shuffle(input_img_path)
    random.Random(1337).shuffle(target_img_path)
    # 打乱
    random.Random(1337).shuffle(input_img_path)
    random.Random(1337).shuffle(target_img_path)
    # 划分数据集
    # 训练集
    train_input_img_paths = input_img_path[:-val_samples]
    train_target_img_paths = target_img_path[:-val_samples]
    # 验证集
    val_input_img_paths = input_img_path[-val_samples:]
    val_target_img_paths = target_img_path[-val_samples:]

    train_gen = OxfordPets(batch_size, img_size, train_input_img_paths, train_target_img_paths)
    val_gen = OxfordPets(batch_size,img_size,val_input_img_paths,val_target_img_paths)
    return train_gen,val_gen

if __name__ == '__main__':
    model = create_model((160,160),16,4)  # 减少批次大小以适应内存
    train_gen,val_gen = load_data('D:/project/dataset/',16,(160,160))  # 减少批次大小
    # 使用Adam优化器和更小的学习率
    model.compile(optimizer=Adam(learning_rate=1e-4), 
                  loss='sparse_categorical_crossentropy',
                  metrics=['acc'])
    # 增加训练轮数
    model.fit(train_gen,epochs=30,validation_data=val_gen,steps_per_epoch=100,validation_steps=50)
    model.save('D:/project/heimabigdata/计算机视觉(CV)/lesson5/图像分割/model.h5')