# parallel_read_parts.py
from concurrent.futures import ProcessPoolExecutor
import pandas as pd
import glob
import time
from datetime import timedelta

def read_part(path):
    cols = ['user_id','item_id','category_id','behavior_type','timestamp']
    return pd.read_csv(path, names=cols, header=None, encoding='utf-8')

def read_parts_parallel(parts_dir):
    start = time.time()
    parts = sorted(glob.glob(parts_dir + r'\part_*.csv'))  # Windows path join
    print("找到分片数:", len(parts))
    with ProcessPoolExecutor() as ex:
        dfs = list(ex.map(read_part, parts))
    df = pd.concat(dfs, ignore_index=True)
    print("总行数:", df.shape[0])
    print("耗时:", timedelta(seconds=time.time() - start))
    return df

if __name__ == '__main__':
    # 先用 split_file 拆分，再在这里并行读
    parts_dir = r'D:\data\公开数据集\淘宝用户行为数据\split_parts'
    df = read_parts_parallel(parts_dir)
    print(df.head())
