Upload code-data.py
Browse files- code-data.py +56 -0
code-data.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import pyarrow as pa
|
| 4 |
+
import pyarrow.parquet as pq
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
# 加载 CodeSearchNet 数据集
|
| 8 |
+
dataset = load_dataset('code_search_net', 'python') # 可以替换 'python' 为 'java', 'cpp' 等其他语言
|
| 9 |
+
|
| 10 |
+
# 查看数据集的字段结构
|
| 11 |
+
print(dataset['train'][0]) # 打印训练集的第一个样本,查看字段名
|
| 12 |
+
|
| 13 |
+
# 分词并按固定词长切分代码
|
| 14 |
+
def split_code_into_chunks(code, chunk_length):
|
| 15 |
+
"""将代码按空格分词,并按指定长度切分成多个块"""
|
| 16 |
+
words = code.split() # 按空格分词
|
| 17 |
+
chunks = []
|
| 18 |
+
|
| 19 |
+
# 将分词后的代码按指定长度切分
|
| 20 |
+
for i in range(0, len(words), chunk_length):
|
| 21 |
+
chunk = words[i:i + chunk_length]
|
| 22 |
+
chunks.append(' '.join(chunk)) # 将切分后的词块连接成一个新的片段
|
| 23 |
+
return chunks
|
| 24 |
+
|
| 25 |
+
# 对整个数据集进行切分
|
| 26 |
+
def split_code_samples(dataset, chunk_length=32):
|
| 27 |
+
"""对数据集中的每个样本进行切分"""
|
| 28 |
+
chunks = []
|
| 29 |
+
for item in dataset:
|
| 30 |
+
# 字段名是 'func_code_string'
|
| 31 |
+
code = item['func_code_string']
|
| 32 |
+
# 分词并按 chunk_length 切分
|
| 33 |
+
code_chunks = split_code_into_chunks(code, chunk_length)
|
| 34 |
+
chunks.extend(code_chunks)
|
| 35 |
+
return chunks
|
| 36 |
+
|
| 37 |
+
# 对整个训练数据集生成 32 词、64 词、128 词、256词长的片段
|
| 38 |
+
code_chunks_32 = split_code_samples(dataset['train'], chunk_length=32)
|
| 39 |
+
code_chunks_64 = split_code_samples(dataset['train'], chunk_length=64)
|
| 40 |
+
code_chunks_128 = split_code_samples(dataset['train'], chunk_length=128)
|
| 41 |
+
code_chunks_256 = split_code_samples(dataset['train'], chunk_length=256)
|
| 42 |
+
|
| 43 |
+
# 将数据保存为 Parquet 文件
|
| 44 |
+
def save_to_parquet(data, filename):
|
| 45 |
+
# 使用 pandas 创建 DataFrame
|
| 46 |
+
df = pd.DataFrame(data, columns=["code"])
|
| 47 |
+
|
| 48 |
+
# 将 DataFrame 转换为 Parquet 格式
|
| 49 |
+
df.to_parquet(filename, engine='pyarrow', compression='snappy')
|
| 50 |
+
print(f"Saved {filename} as Parquet file.")
|
| 51 |
+
|
| 52 |
+
# 保存为 Parquet 文件
|
| 53 |
+
save_to_parquet(code_chunks_32, "code_chunks_32.parquet")
|
| 54 |
+
save_to_parquet(code_chunks_64, "code_chunks_64.parquet")
|
| 55 |
+
save_to_parquet(code_chunks_128, "code_chunks_128.parquet")
|
| 56 |
+
save_to_parquet(code_chunks_256, "code_chunks_256.parquet")
|