|
import pandas as pd |
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
from tqdm import tqdm |
|
|
|
def process_chunk(chunk, idx): |
|
|
|
|
|
|
|
selected_columns = ['instruction', 'output'] |
|
new_df = chunk[selected_columns].copy() |
|
|
|
|
|
parquet_file_path = './output_chunk_{}.parquet'.format(idx) |
|
table = pa.Table.from_pandas(new_df) |
|
pq.write_table(table, parquet_file_path) |
|
|
|
csv_file_path = './input.csv' |
|
chunksize = 100000000 |
|
df_chunks = pd.read_csv(csv_file_path, chunksize=chunksize) |
|
|
|
for idx, chunk in tqdm(enumerate(df_chunks)): |
|
process_chunk(chunk, idx) |
|
|