import os
from pprint import pprint
from my_io import readcsv

if __name__ =='__main__':
    datasets_root = '/media/adt/T7/ZWH/docker/files/data/motion/datasets'
    assert os.path.exists(datasets_root), f'{datasets_root} does not exist!'
    dataset_paths = []
    for dataset_path in sorted(os.listdir(datasets_root)):
        if dataset_path.startswith('__'):
            dataset_paths.append(dataset_path)

    datasets_info = {
        'bag_name':[],
        'samples ':[],
        'split   ':[],
    }


    data_splits = {
        'train':0,
        'val':0,
        'test':0,
    }
    for dataset_path in dataset_paths:
        read_me = os.path.join(datasets_root, dataset_path, 'README.csv')
        read_me_data = readcsv(read_me)
        if 'Split(train:val:test)' in read_me_data:
            datasets_info['bag_name'].append(read_me_data['Bag_Name'])
            datasets_info['samples '].append(read_me_data['Samples_Size'])
            split = read_me_data['Split(train:val:test)']
            parts = split[1:-1].split(':')
            parts = [int(part) for part in parts]
            parts_max = max(parts)
            id = parts.index(parts_max)
            split = ['train','val','test'][id]
            datasets_info['split   '].append(split)
            data_splits[split]+=parts_max
        elif 'Split(train:test:val)' in read_me_data:
            datasets_info['bag_name'].append(read_me_data['Bag_Name'])
            datasets_info['samples '].append(read_me_data['Samples_Size'])
            split = read_me_data['Split(train:test:val)']
            parts = split[1:-1].split(':')
            parts = [int(part) for part in parts]
            parts_max = max(parts)
            id = parts.index(parts_max)
            split = ['train','test','val'][id]
            datasets_info['split   '].append(split)
            data_splits[split]+=parts_max
        else:
            raise ValueError('Split(train:val:test) not recognized!')


    def print_datainfo(datasets_info):
        # 定义带序号的表头
        header = ['No', 'bag_name', 'samples', 'split']

        # 计算各列最大宽度（包含新增序号列）
        num_rows = len(datasets_info['bag_name'])
        serial_width = len(str(num_rows))  # 序号列宽度由数据行数决定

        column_widths = [serial_width]  # 初始化序号列宽度

        # 计算原始数据列的宽度
        for idx, key in enumerate(datasets_info):
            # 获取对应表头名称（跳过序号列）
            current_header = header[idx + 1]
            # 计算列宽：表头长度 vs 数据最大长度
            max_len = max(
                len(current_header),
                max(len(str(item)) for item in datasets_info[key]) if datasets_info[key] else 0
            )
            column_widths.append(max_len)

        # 打印带格式的表头
        header_format = "  ".join(f"{h:<{w}}" for h, w in zip(header, column_widths))
        print(header_format)

        # 打印带序号的数据行
        for i in range(num_rows):
            # 生成各列数据（包含序号）
            row_data = [
                str(i + 1),  # 序号从1开始
                *[str(datasets_info[key][i]) for key in datasets_info]
            ]
            # 按列宽格式化输出
            row_format = "  ".join(f"{d:<{w}}" for d, w in zip(row_data, column_widths))
            print(row_format)

    print_datainfo(datasets_info)

    # pprint(datasets_info, indent=4)
    pprint(data_splits, indent=4)

    pass
