import yaml
import os
import pickle
import numpy as np
import sys
import shutil
import csv
from my_io import mkdir, readcsv

def check_dataset_size(dataset_path):
    '''
    检测数据集每个数据的数据量是否一致,返回数据量
    '''
    filename = os.path.join(dataset_path,"README.csv")
    assert os.path.exists(filename)
    csv_datas = readcsv(filename)
    assert 'Frames_Size' in csv_datas
    Frames_Size = int(csv_datas['Frames_Size'])
    data_size = len(os.listdir(os.path.join(dataset_path,"lidar_bin")))
    assert Frames_Size == data_size, "Frames_Size<{}> != lidar_bin_size<{}>".format(Frames_Size,data_size)
    data_size = len(os.listdir(os.path.join(dataset_path,"radar_bin")))
    assert Frames_Size == data_size, "Frames_Size<{}> != radar_bin_size<{}>".format(Frames_Size,data_size)
    # data_size = len(os.listdir(os.path.join(dataset_path,"label_txt")))
    # assert Frames_Size == data_size, "Frames_Size<{}> != label_txt_size<{}>".format(Frames_Size,data_size)
    camera_ids = os.listdir(os.path.join(dataset_path, "camera"))
    for camera_id in camera_ids:
        data_size = len(os.listdir(os.path.join(dataset_path, "camera", camera_id)))
        assert Frames_Size == data_size, "Frames_Size<{}> != {}_size<{}>".format(Frames_Size,camera_id,data_size)
    return Frames_Size

def check_sample_size(dataset_path):
    '''
    check sample_size, return sample_size
    '''
    filename = os.path.join(dataset_path,"README.csv")
    assert os.path.exists(filename)
    csv_datas = readcsv(filename)
    assert 'Samples_Size' in csv_datas
    Samples_Size = int(csv_datas['Samples_Size'])
    # data_size = len(os.listdir(os.path.join(dataset_path,"samples/lidar_bin")))
    # assert Samples_Size == data_size, "Samples_Size<{}> != lidar_bin_size<{}>".format(Samples_Size,data_size)
    data_size = len(os.listdir(os.path.join(dataset_path,"samples/label_txt")))
    assert Samples_Size == data_size, "Samples_Size<{}> != label_txt_size<{}>".format(Samples_Size,data_size)
    return Samples_Size

def check_dataset_is_splited(dataset_path):
    Frames_Size = check_sample_size(dataset_path)
    split_path = os.path.join(dataset_path,"samples/split_sets")
    if not os.path.exists(split_path):
        return False
    data_types=["train", "test", "val"]
    split_size = 0
    for data_type in data_types:
        data_type_txt = os.path.join(split_path,data_type+".txt")
        if not os.path.exists(data_type_txt):
            return False
        with open(data_type_txt,"r") as f:
            lines = f.readlines()
            split_size = split_size + len(lines)
        f.close()
    if split_size != Frames_Size:
        return False
    return True



