Datasets:
Tasks:
Translation
Modalities:
Text
Formats:
json
Languages:
English
Size:
10K - 100K
Tags:
code
License:
from random_data import generate_random_byte_array | |
import json | |
import os | |
import random | |
import base64 | |
def base64_encode_byte_array(byte_array): | |
return base64.b64encode(byte_array).decode('utf-8') | |
def generate_dataset_item(seed): | |
length = random.Random(seed + 1000).randint(0, 127) | |
byte_array = generate_random_byte_array(length=length, seed=seed + 1001) | |
input_formats = [ | |
'hex', | |
'json' | |
] | |
input_format = random.Random(seed + 1002).choice(input_formats) | |
names_hex = [ | |
'Hexadecimal', | |
'hexadecimal', | |
'hex', | |
'Hex', | |
'HEX', | |
] | |
names_json = [ | |
'Json', | |
'json', | |
'JSON', | |
] | |
name_input = None | |
if input_format == 'hex': | |
name_input = random.Random(seed + 1003).choice(names_hex) | |
else: | |
if input_format == 'json': | |
name_input = random.Random(seed + 1004).choice(names_json) | |
name_outputs = [ | |
'base64', | |
'Base64', | |
'BASE64', | |
] | |
name_output = random.Random(seed + 1005).choice(name_outputs) | |
instructions = [ | |
f'Encode {name_input} to {name_output}', | |
f'encode {name_input} to {name_output}', | |
f'convert {name_input} to {name_output}', | |
f'Convert {name_input} to {name_output}', | |
f'Transform {name_input} to {name_output}', | |
f'transform {name_input} to {name_output}', | |
f'Change {name_input} to {name_output}', | |
f'change {name_input} to {name_output}', | |
f'{name_input} to {name_output}', | |
f'{name_output} from {name_input}', | |
] | |
instruction = random.Random(seed + 1006).choice(instructions) | |
output = base64_encode_byte_array(byte_array) | |
input = None | |
if input_format == 'hex': | |
input = byte_array.hex() | |
else: | |
if input_format == 'json': | |
input = json.dumps(list(byte_array), separators=(',', ':')) | |
dict = { | |
'instruction': instruction, | |
'input': input, | |
'output': output | |
} | |
return dict | |
def generate_dataset(max_num_samples=1000, max_byte_size=1024*1024, seed_start=500000000): | |
dataset = [] | |
dataset_byte_size = 0 | |
for i in range(max_num_samples): | |
item = generate_dataset_item(seed_start + i) | |
bytes = len(json.dumps(item)) | |
if dataset_byte_size + bytes > max_byte_size: | |
break | |
dataset_byte_size += bytes | |
dataset.append(item) | |
return dataset | |
dataset = generate_dataset( | |
max_num_samples=50000, | |
max_byte_size=1024*1024*20, | |
) | |
# Save dataset to file | |
filename = 'data.jsonl' | |
with open(filename, 'w') as f: | |
for item in dataset: | |
f.write(json.dumps(item) + '\n') | |
# Summary | |
file_size = os.path.getsize(filename) | |
print(f"Generated {len(dataset)} samples, saved to {filename}, file size: {file_size} bytes.") | |