OzoneAsai commited on
Commit
ef90e54
1 Parent(s): 42366bf

Upload 3 files

Browse files
Files changed (3) hide show
  1. ap.py +41 -0
  2. paShow.py +13 -0
  3. parq.py +23 -0
ap.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import time
3
+ from tqdm import tqdm
4
+
5
+ dataset = []
6
+
7
+ min_value = -10000
8
+ max_value = 10000
9
+
10
+ # Calculate the total number of combinations
11
+ total_combinations = (max_value - min_value + 1) ** 2
12
+
13
+ # Save the dataset to a CSV file
14
+ output_file = "output_dataset.csv"
15
+ with open(output_file, mode="w", newline="", encoding="utf-8") as file:
16
+ writer = csv.writer(file)
17
+ writer.writerow(["instruction", "output"]) # Write header
18
+
19
+ with tqdm(total=total_combinations, desc="Generating Dataset") as pbar:
20
+ start_time = time.time()
21
+ for a in range(min_value, max_value + 1):
22
+ for b in range(min_value, max_value + 1):
23
+ # Generate the instruction
24
+ if b < 0:
25
+ instruction = f"{a}-({abs(b)})"
26
+ else:
27
+ instruction = f"{a}+{b}"
28
+
29
+ # Calculate the output
30
+ output = a + b
31
+
32
+ # Write the row to the CSV file in real-time
33
+ writer.writerow([instruction, str(output)])
34
+
35
+ pbar.update(1)
36
+
37
+ end_time = time.time()
38
+ elapsed_time = end_time - start_time
39
+ print(f"Total time taken: {elapsed_time:.2f} seconds")
40
+
41
+ print(f"Dataset saved to {output_file}.")
paShow.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import pyarrow.parquet as pq
3
+
4
+ parquet_file_path = './output_chunk_0.parquet' # 読み込むParquetファイルのパスを指定
5
+
6
+ # Parquetファイルを読み込む
7
+ table = pq.read_table(parquet_file_path)
8
+
9
+ # PyArrowのTableをPandasのDataFrameに変換
10
+ df = table.to_pandas()
11
+
12
+ # 読み込んだデータを表示(例: 最初の10行を表示)
13
+ print(df.head(10))
parq.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import pyarrow as pa
3
+ import pyarrow.parquet as pq
4
+ from tqdm import tqdm # tqdmをインポート
5
+
6
+ def process_chunk(chunk, idx):
7
+ # ここで各チャンクの処理を行う
8
+ # 例えば、欲しい項目を選択してParquetに変換する
9
+
10
+ selected_columns = ['instruction', 'output']
11
+ new_df = chunk[selected_columns].copy()
12
+
13
+ # Parquetファイルに変換
14
+ parquet_file_path = './output_chunk_{}.parquet'.format(idx)
15
+ table = pa.Table.from_pandas(new_df)
16
+ pq.write_table(table, parquet_file_path)
17
+
18
+ csv_file_path = './input.csv'
19
+ chunksize = 100000000 # 例: 100万行ごとに分割
20
+ df_chunks = pd.read_csv(csv_file_path, chunksize=chunksize)
21
+
22
+ for idx, chunk in tqdm(enumerate(df_chunks)):
23
+ process_chunk(chunk, idx)