import numpy as np
import tensorflow as tf
import os
import re
from PIL import Image
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
def get_next_image_number(folder_path, new_name_prefix, processed_tag):
    max_number = 0
    files = os.listdir(folder_path)
    # 使用正则表达式匹配文件名中的编号
    pattern = re.compile(rf"{new_name_prefix}_(\d+){processed_tag}")

    for file in files:
        match = pattern.match(file)
        if match:
            number = int(match.group(1))
            if number > max_number:
                max_number = number

    return max_number + 1


def batch_rename_crop_tag_images(folder_path,saved_path, new_name_prefix, crop_box, processed_tag='_processed'):\
    # 当用于存储图片的文件夹不存在时，创建文件夹
    os.makedirs(saved_path,exist_ok=True)
    # # 获取下一个图像编号
    # next_image_number = get_next_image_number(saved_path, new_name_prefix, processed_tag)
    next_image_number=0
    # 获取文件夹中的所有文件名
    files = os.listdir(folder_path)
    # 过滤出图像文件（这里简单以扩展名来判断）
    image_files = [f for f in files if f.lower().endswith(('png', 'jpg', 'jpeg', 'bmp', 'gif'))]
    # 遍历每个图像文件
    for image_file in image_files:
        ## 检查文件名中是否已经包含处理标签，若包含则跳过处理
        #if processed_tag in image_file:
        #    print(f"Skipping already processed file: {image_file}")
        #    continue
        # 打开图像
        with Image.open(os.path.join(folder_path, image_file)) as img:
            # 裁剪图像
            cropped_img = img.crop(crop_box)
            cropped_img = cropped_img.resize((80,80))
            # 构造新的文件名
            new_file_name = f"{new_name_prefix}_{next_image_number}{processed_tag}.png"
            next_image_number += 1

            # 保存裁剪后的图像并重命名
            cropped_img.save(os.path.join(saved_path, new_file_name))

            print(f"Processed {image_file} -> {new_file_name}")


def load_images(file_path, p):
    # 获取文件路径下所有的文件列表
    image_files = [os.path.join(file_path, f) for f in os.listdir(file_path) if f.endswith(('png', 'jpg', 'jpeg'))]
    selected_images = np.random.choice(image_files, p, replace=False)
    images = []
    for image_file in selected_images:
        # 打开图像
        with Image.open(image_file) as img:
            # 将图像转换为numpy数组
            img_array = np.array(img)
            # 如果是灰度图，转换为 RGB
            if img_array.ndim == 2:
                img_array = np.stack((img_array,) * 3, axis=-1)
            # 如果有alpha通道，移除它
            if img_array.shape[-1] == 4:
                img_array = img_array[..., :3]
            images.append(img_array)

    # 转换为TensorFlow张量
    images_tensor = tf.convert_to_tensor(images, dtype=tf.float32)
    return images_tensor
def data_only_mix(x_row_data,y_row_data):
    x_train_raw, x_valid_test_raw,y_train_raw, y_valid_test_raw = train_test_split(x_row_data,y_row_data,
                                                 test_size=0.20,
                                                 random_state=42)
    x_test_raw,x_valid_raw,y_test_raw,y_valid_raw = train_test_split(x_valid_test_raw,y_valid_test_raw,
                                       test_size=0.5,
                                       random_state=42)
    return x_train_raw,x_test_raw,x_valid_raw,y_train_raw,y_test_raw,y_valid_raw
def data_load_and_mixed(data_dir_path,crop_box):
    # data_dir_path中含有所有的数据集
    # 读取数据集中的直方图数据，并将其混淆起来（shuffer），分割成训练集、验证集和测试集
    is_begin=0
    for dirs in os.listdir(data_dir_path):
        #for dir_name in dirs:
        sub_folder_path = os.path.join(data_dir_path, dirs)
        if not os.path.isdir(sub_folder_path):
            continue
        data_file_path = os.path.join(sub_folder_path, 'data.mat')
        img_folder_path = os.path.join(sub_folder_path, 'ground_truth')
        tof_ground_truth_file_path=os.path.join(sub_folder_path,'processed')
        batch_rename_crop_tag_images(img_folder_path,tof_ground_truth_file_path,'ground_truth',crop_box)
        y_data = img_normalization2array(load_images(tof_ground_truth_file_path, 1))
        if os.path.isfile(data_file_path):
            load_mat=loadmat(data_file_path)
            data=load_mat['row_data']
            y_data = np.repeat(y_data, data.shape[0], axis=0)
            if is_begin==0:
                x_raw=data
                y_raw=y_data
                is_begin+=1
            else:
                x_raw=np.vstack((x_raw,data))
                y_raw = np.vstack((y_raw, y_data))
            print(f"已导入 {data_file_path}")
        else:
            print(f"File not found in {sub_folder_path}")
            print("结束数据导入")
    indices = np.arange(x_raw.shape[0])
    np.random.shuffle(indices)

    x_raw = x_raw[indices]
    y_raw = y_raw[indices]


    x_train_raw, x_valid_test_raw,y_train_raw, y_valid_test_raw = train_test_split(x_raw,y_raw,
                                                 test_size=0.20,
                                                 random_state=42)
    x_test_raw,x_valid_raw,y_test_raw,y_valid_raw = train_test_split(x_valid_test_raw,y_valid_test_raw,
                                       test_size=0.5,
                                       random_state=42)
    x_train_raw = np.expand_dims(x_train_raw, axis=-1)
    x_valid_raw = np.expand_dims(x_valid_raw, axis=-1)
    x_test_raw  = np.expand_dims( x_test_raw, axis=-1)
    return x_train_raw,x_test_raw,x_valid_raw,y_train_raw,y_test_raw,y_valid_raw

# 定义训练的函数，包括模型训练、计时等，返回训练好的模型以及训练的历史信息
#def train(train_inputs,train_labels,val_inputs,val_labels,epochs,batch,
#        kern_int_e, kern_int_d, kern_reg=None, kse=7, ksd=5,feats=8):
#
#    optimizer = tf.keras.optimizers.Adam()
#    model = simulation_nn(feats, kern_int_e, kern_int_d, kern_reg, kse, ksd)
#    model.compile(loss=ssim_loss,
#                  optimizer=optimizer,
#                  metrics=['accuracy'])
#    start = time.time()
#    history = model.fit(train_inputs, train_labels, epochs=epochs, batch_size=batch,
#                        validation_data=(val_inputs, val_labels), verbose=1)
#    print(history.history)
#    end = time.time()
#    print('comp. time =', end - start)
#    acc = history.history['accuracy']
#    val_acc = history.history['val_accuracy']
#    loss = history.history['loss']
#    val_loss = history.history['val_loss']
#    acc_loss_log={'acc':acc,'val_acc':val_acc,'loss':loss,'val_loss':val_loss}
#
#    return model, history.history['loss'], history.history['val_loss'],acc_loss_log

def img_normalization2array(img):
    image_array = np.array(img, dtype=np.float32)
    global max
    max=np.max(image_array)
    new_img = image_array / max
    return new_img

def array2img(array):
    global max
    image_array = (array * max).clip(0, max).astype(np.uint16)
    # 将 NumPy 数组转换为 PIL 图像
    img = Image.fromarray(image_array)
    return img

def ssim_loss(y_true, y_pred):
    # 计算 SSIM 值
    y_true=y_true[:,:,:,0]
    y_pred=y_pred[:,:,:,0]
    ssim_value = tf.image.ssim(y_true, y_pred, max_val=1.0)
    # SSIM 越大表示越相似，因此需要转换为损失
    # 损失值越小越好，所以使用 1 - SSIM 作为损失
    return 1 - ssim_value