sa8 commited on
Commit
21be7bb
·
1 Parent(s): df2d2e9

Upload split_tokens.py

Browse files
Files changed (1) hide show
  1. split_tokens.py +102 -0
split_tokens.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+
4
+ ## split the code into multiple tokens so that it is not too long
5
+
6
+ def count_tokens(code):
7
+ # Count the number of tokens in the code
8
+ #tokens = re.findall(r'\b\w+\b', code)
9
+ return len(code)
10
+
11
+ def split_code(code, max_tokens=2048):
12
+ # Split the code into smaller chunks based on classes, functions, and other logical units
13
+ class_pattern = re.compile(r'(?:class|struct|interface)\s+\w+\s*[\(\{]')
14
+ function_pattern = re.compile(r'(?:def|function|func|fn)\s+\w+\s*\([^)]*\)\s*\{')
15
+ import_pattern = re.compile(r'(?:import|include|require)\s+')
16
+ comment_pattern = re.compile(r'(?://|#).*')
17
+
18
+ # Remove comments from the code
19
+ code = comment_pattern.sub('', code)
20
+
21
+ # Find the starting points of classes, functions, and other logical units
22
+ class_starts = [match.start() for match in class_pattern.finditer(code)]
23
+ function_starts = [match.start() for match in function_pattern.finditer(code)]
24
+ import_starts = [match.start() for match in import_pattern.finditer(code)]
25
+ logical_units = sorted(class_starts + function_starts + import_starts + [len(code)])
26
+
27
+ chunks = []
28
+ start_index = 0
29
+ for end_index in logical_units:
30
+ chunk = code[start_index:end_index].strip()
31
+ if chunk:
32
+ token_count = count_tokens(chunk)
33
+ if token_count <= max_tokens:
34
+ chunks.append(chunk)
35
+ start_index = end_index
36
+ else:
37
+ # If the chunk is too long, split it into smaller chunks
38
+ sub_chunks = split_chunk(chunk, max_tokens)
39
+ chunks.extend(sub_chunks)
40
+ start_index = end_index
41
+
42
+ return chunks
43
+
44
+ def split_chunk(chunk, max_tokens):
45
+ # Split a chunk into smaller sub-chunks based on the maximum token limit
46
+ sub_chunks = []
47
+ start_index = 0
48
+ while start_index < len(chunk):
49
+ end_index = start_index + max_tokens
50
+ sub_chunk = chunk[start_index:end_index].strip()
51
+ sub_chunks.append(sub_chunk)
52
+ start_index = end_index
53
+ return sub_chunks
54
+
55
+ def find_split_index(code, max_tokens):
56
+ # Find the index to split the code chunk
57
+ token_count = 0
58
+ for i, char in enumerate(code):
59
+ if char.isspace():
60
+ token_count += 1
61
+ if token_count > max_tokens:
62
+ return i
63
+ return len(code)
64
+
65
+ def process_json_file(input_file, output_file):
66
+ # Read the input JSON file
67
+ with open(input_file, 'r') as file:
68
+ data = json.load(file)
69
+
70
+ # Process each entry in the JSON data
71
+ output_data = []
72
+ for entry in data:
73
+ code = entry['content']
74
+ token_count = count_tokens(code)
75
+ if code != "":
76
+ if token_count > 2048:
77
+ # Split the code into smaller chunks
78
+ chunks = split_code(code)
79
+
80
+ # Create separate entries for each chunk
81
+ for chunk in chunks:
82
+ output_data.append(json.dumps({"text": chunk}))
83
+ if len(chunk)>2048:
84
+ print("Chunks len: ", len(chunk))
85
+ print(chunk)
86
+ break
87
+ else:
88
+ # Create a single entry for the code
89
+ output_data.append(json.dumps({"text": code}))
90
+
91
+ # Save the output data to a new JSON file without square brackets
92
+ with open(output_file, 'w') as file:
93
+ file.write('\n'.join(output_data))
94
+
95
+ print(f"Processing completed. Results saved to '{output_file}'.")
96
+
97
+ # Specify the input and output file paths
98
+ input_file = '../zkml-dataset/dataset.json'
99
+ output_file = 'tokenized_code_data.json'
100
+
101
+ # Process the JSON file
102
+ process_json_file(input_file, output_file)