calculation / parq.py
OzoneAsai's picture
Upload 3 files
ef90e54
raw
history blame
773 Bytes
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from tqdm import tqdm # tqdmをインポート
def process_chunk(chunk, idx):
# ここで各チャンクの処理を行う
# 例えば、欲しい項目を選択してParquetに変換する
selected_columns = ['instruction', 'output']
new_df = chunk[selected_columns].copy()
# Parquetファイルに変換
parquet_file_path = './output_chunk_{}.parquet'.format(idx)
table = pa.Table.from_pandas(new_df)
pq.write_table(table, parquet_file_path)
csv_file_path = './input.csv'
chunksize = 100000000 # 例: 100万行ごとに分割
df_chunks = pd.read_csv(csv_file_path, chunksize=chunksize)
for idx, chunk in tqdm(enumerate(df_chunks)):
process_chunk(chunk, idx)