import os
import requests
from sklearn.model_selection import train_test_split
import pickle
from utils import extract_zip
def download_file(url, dest_path):
    if not os.path.exists(dest_path):  # 检查文件是否已存在
        response = requests.get(url, stream=True)
        if response.status_code == 200:
            with open(dest_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)
            return True
        return False
    else:
        print(f"File {dest_path} already exists. Skipping download.")
        return True  # 返回True，因为文件已存在，无需下载


def prepare_dataset(config):
    # 指定目标目录路径
    target_directory = config['DATASET_PATH']

    # 创建目标目录，如果目录不存在的话
    if not os.path.exists(target_directory):
        os.makedirs(target_directory)
        print(f"Directory created at {target_directory}")
    else:
        print(f"Directory already exists at {target_directory}")

    # 更改当前工作目录到目标目录
    os.chdir(target_directory)
    print("Current working directory:", os.getcwd())

    # 下载数据集 还可以通过torrent文件bt下载 bt文件放在文件夹中
    dataset_url = 'https://public.roboflow.com/ds/h3iAkaXWJi?key=H6IvXrMvC6'
    images_tar_path = os.path.join(target_directory, 'train.zip')#Uno Cards.v2-raw.voc.zip源文件名保存成train,这里我们不用他的其他数据,因为图片太多了.

    print("Downloading images dataset...")
    if download_file(dataset_url, images_tar_path):
        print("Images downloaded successfully")
        print("Extracting images...")
        extract_zip(images_tar_path, target_directory)
        print("Images extracted")
    else:
        print("Failed to download dataset")


    #训练数据和最终测试数据比例
    train_pred_split_rate =  config['SPLIT_RATE_TEST']
    #上面训练数据继续划分为训练数据和验证数据的比例
    train_valid_split_rate =  config['SPLIT_RATE_VALID']

    image_paths = config['IMAGE_PATH']
    label_paths = config['MASK_PATH']
    random_seed = config['RANDOM_SEED']
    cutoff_rate = config['CUTOFF_RATE']

    img_paths_sored = sorted([
        os.path.join(image_paths, name)
        for name in os.listdir(image_paths)
        if name.endswith('.jpg')
    ])
    img_paths_sored_cutoff = img_paths_sored[0:int(len(img_paths_sored) * cutoff_rate)]
    # Create a list of mask paths
    label_paths_sorted = sorted([
        os.path.join(label_paths, name)
        for name in os.listdir(label_paths)
        if not name.startswith('.') and name.endswith('.xml')
    ])

    label_paths_sorted_cutoff = label_paths_sorted[0:int(len(label_paths_sorted)*cutoff_rate)]

    # 划分训练集+验证集的合集 与   测试集
    train_valid_imgs, test_imgs, train_valid_masks, test_masks = train_test_split(
        img_paths_sored_cutoff, label_paths_sorted_cutoff, test_size=train_pred_split_rate, random_state=random_seed)
    # 划分训练集  和  验证集
    train_imgs, valid_imgs, train_masks, valid_masks = train_test_split(
        train_valid_imgs, train_valid_masks, test_size=train_valid_split_rate, random_state=random_seed)


    # 保存到pickle文件中
    train_test_valid_split_idx_file = config["SPLIT_FILE_IDX"]
    if not os.path.exists(train_test_valid_split_idx_file):
        with open(train_test_valid_split_idx_file, 'wb') as f:
                pickle.dump({
                    'train_imgs': train_imgs,
                    'test_imgs': test_imgs,
                    'valid_imgs': valid_imgs,
                    'train_masks': train_masks,
                    'test_masks': test_masks,
                    'valid_masks': valid_masks}, f)
        print(f"File '{train_test_valid_split_idx_file}' has been created and data saved.")
    else:
         # 文件已存在，跳过创建过程
        print(f"File '{train_test_valid_split_idx_file}' already exists. No need to create again.")



# 调用函数
from config import get_config_from_xml
if "__main__"==__name__:
    xml_file = "../configurations/config_fastrcnn_bce_epoch_50.xml"
    cfg = get_config_from_xml(xml_file)
    prepare_dataset(cfg)
