import os
import cv2
import numpy as np
import pandas as pd
from PIL import Image, ImageOps
from pathlib import Path
import shutil
import random
def del_file(path):
    for elm in Path(path).glob('*'):
        elm.unlink() if elm.is_file() else shutil.rmtree(elm)


def get_bounding_box(box):
    # 转换为 NumPy 数组，确保数据格式正确
    box = np.array(box).reshape(-1, 2)  # 变成二维数组，每行是 [x, y]

    # 获取 x 和 y 的最小、最大值
    x_min, y_min = np.min(box, axis=0)
    x_max, y_max = np.max(box, axis=0)

    # 计算中心点坐标--归一化
    center_x = (x_min + x_max) / (2 * 1600)
    center_y = (y_min + y_max) / (2 * 256)

    # 计算宽度和高度
    width = (x_max - x_min) / (2 * 1600)
    height = (y_max - y_min) / (2 * 256)

    return center_x, center_y, width, height


def mask_pil2xy(mask_pil, ImageId, ClassId, save_txtfolder):
    # 转换为 NumPy 数组
    mask_np = np.array(mask_pil)
    # 使用阈值化将图像转换为二值图像
    _, binary_image = cv2.threshold(mask_np, 200, 255, cv2.THRESH_BINARY)

    # 查找轮廓
    contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # 存储有效区域的顶点坐标
    valid_vertices = []

    # 设置最小轮廓点数阈值（根据需要调整）
    min_contour_points = 3

    label_name = os.path.splitext(ImageId)[0] + ".txt"
    save_path = os.path.join(save_txtfolder, label_name)
    # 保存到文件
    with open(save_path, 'a') as f:
        # 遍历每个轮廓
        for contour in contours:
            # 忽略太小的轮廓
            if len(contour) < min_contour_points:
                continue
            # 获取轮廓的顶点坐标
            approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True)
            if len(approx) >= min_contour_points:
                center_x, center_y, width, height = get_bounding_box(approx)  # 确保轮廓有足够的顶点来形成有效区域
                # 存储当前区域的yolo格式数据
                vertices_str = f"{ClassId-1} {center_x} {center_y} {width} {height}"

                valid_vertices.append(vertices_str + "\n")

        # 输出所有有效区域的yolo格式数据写入文件
        for vertices_str in valid_vertices:
            f.write(vertices_str)


def rle2mask(rle, imgshape):
    width = imgshape[1]
    height = imgshape[0]

    mask = np.zeros(width * height, dtype=np.uint8)

    array = np.asarray([int(x) for x in rle.split()])
    starts = array[0::2]
    lengths = array[1::2]

    for index, start in enumerate(starts):
        mask[int(start):int(start + lengths[index])] = 1

    # 将掩码数组重塑为图像尺寸
    mask = mask.reshape(height, width)
    # 将 numpy 数组转换为 PIL 图像
    mask_pil = Image.fromarray(mask * 255)
    # 旋转图像以横向显示（逆时针旋转90度）
    mask_pil = mask_pil.rotate(90, expand=True)
    # 保存掩码图像前进行垂直翻转
    mask_pil = ImageOps.flip(mask_pil)
    return mask_pil

def split_dataset(base_dir, train_ratio=0.8):
    sample_dir = os.path.join(base_dir, "sample")
    images_dir = os.path.join(sample_dir, "images")
    labels_dir = os.path.join(sample_dir, "labels")

    # 目标目录
    train_images_dir = os.path.join(sample_dir, "train/images")
    train_labels_dir = os.path.join(sample_dir, "train/labels")
    val_images_dir = os.path.join(sample_dir, "val/images")
    val_labels_dir = os.path.join(sample_dir, "val/labels")

    # 创建 train 和 val 结构
    os.makedirs(train_images_dir, exist_ok=True)
    os.makedirs(train_labels_dir, exist_ok=True)
    os.makedirs(val_images_dir, exist_ok=True)
    os.makedirs(val_labels_dir, exist_ok=True)

    # 获取所有图片文件（假设图片后缀为 .jpg 或 .png）
    image_files = sorted([f for f in os.listdir(images_dir) if f.endswith(('.jpg', '.png'))])

    # 打乱数据
    random.shuffle(image_files)
    num_train = int(len(image_files) * train_ratio)

    train_files = image_files[:num_train]
    val_files = image_files[num_train:]

    # 复制文件到 train
    for file in train_files:
        shutil.move(os.path.join(images_dir, file), os.path.join(train_images_dir, file))
        label_file = os.path.splitext(file)[0] + ".txt"  # 替换扩展名为 .txt
        if os.path.exists(os.path.join(labels_dir, label_file)):
            shutil.move(os.path.join(labels_dir, label_file), os.path.join(train_labels_dir, label_file))

    # 复制文件到 val
    for file in val_files:
        shutil.move(os.path.join(images_dir, file), os.path.join(val_images_dir, file))
        label_file = os.path.splitext(file)[0] + ".txt"  # 替换扩展名为 .txt
        if os.path.exists(os.path.join(labels_dir, label_file)):
            shutil.move(os.path.join(labels_dir, label_file), os.path.join(val_labels_dir, label_file))
    os.removedirs(images_dir)  # 删除空文件夹
    os.removedirs(labels_dir)  # 删除空文件夹
    print(f"数据集划分完成：训练集 {len(train_files)} 张，验证集 {len(val_files)} 张。")
# 路径
# 设置保存标签文件的路径
save_txtfolder = "../data/sample/labels"
os.makedirs(save_txtfolder, exist_ok=True)
save_ImageFloder = "../data/sample/images"
os.makedirs(save_ImageFloder, exist_ok=True)
del_file(save_ImageFloder)  # 清空images文件夹下的图片
del_file(save_txtfolder)  # 清空labels文件夹下的txt文件，避免追加
del_file("../data/sample/train")  # 清空train
del_file("../data/sample/val")  # 清空val

save_MaskFolder = "../data/steel_data/mask_images"
os.makedirs(save_MaskFolder, exist_ok=True)
del_file(save_MaskFolder)  # 清空mask_images文件夹下的图片


print("已将原有数据删除")
print("数据集正在更新")

# 读取CSV文件
csv_path = "../data/steel_data/train.csv"
df = pd.read_csv(csv_path)

# 过滤出带有有效掩码的数据
df_train = df[df['EncodedPixels'].notnull()].reset_index(drop=True)

# 处理每个带有有效掩码的样本
for index in range(len(df_train)):
    ImageId = df_train['ImageId'].iloc[index]  # 获取图像标识
    ClassId = df_train['ClassId'].iloc[index]  # 获取类别 ID
    maskName = ImageId.split(".")[0] + ".jpg"  # 生成保存的掩码文件名，去除后缀并添加文件扩展名

    # 生成掩码图像
    mask_pil = rle2mask(df_train['EncodedPixels'].iloc[index], (1600, 256))
    mask_pil2xy(mask_pil, ImageId, ClassId, save_txtfolder)
    # 保存mask图像
    save_path = os.path.join(save_MaskFolder, maskName)
    mask_pil.save(save_path)
    # 原数据-图像准备
    src_path = os.path.join("../data/steel_data/train_images", f"{ImageId}")
    dest_path = os.path.join("../data/sample/images", f"{ImageId}")
    shutil.copy(src_path, dest_path)
print("Process finished with exit code 0")

# 自动划分train与val
split_dataset("../data")