Refactor parse_files
Browse files
app.py
CHANGED
|
@@ -9,7 +9,8 @@ tokenizer = RobertaTokenizer.from_pretrained("mamiksik/CommitPredictorT5PL", rev
|
|
| 9 |
model = T5ForConditionalGeneration.from_pretrained("mamiksik/CommitPredictorT5PL", revision="fb08d01")
|
| 10 |
|
| 11 |
|
| 12 |
-
def parse_files(
|
|
|
|
| 13 |
lines = patch.splitlines()
|
| 14 |
|
| 15 |
filename_before = None
|
|
@@ -43,14 +44,11 @@ def parse_files(accumulator, patch):
|
|
| 43 |
|
| 44 |
accumulator.append(line)
|
| 45 |
|
| 46 |
-
return accumulator
|
| 47 |
|
| 48 |
|
| 49 |
def predict(patch, max_length, min_length, num_beams, prediction_count):
|
| 50 |
-
|
| 51 |
-
parse_files(accumulator, patch)
|
| 52 |
-
input_text = '\n'.join(accumulator)
|
| 53 |
-
|
| 54 |
with torch.no_grad():
|
| 55 |
token_count = tokenizer(input_text, return_tensors="pt").input_ids.shape[1]
|
| 56 |
|
|
|
|
| 9 |
model = T5ForConditionalGeneration.from_pretrained("mamiksik/CommitPredictorT5PL", revision="fb08d01")
|
| 10 |
|
| 11 |
|
| 12 |
+
def parse_files(patch):
|
| 13 |
+
accumulator = []
|
| 14 |
lines = patch.splitlines()
|
| 15 |
|
| 16 |
filename_before = None
|
|
|
|
| 44 |
|
| 45 |
accumulator.append(line)
|
| 46 |
|
| 47 |
+
return '\n'.join(accumulator)
|
| 48 |
|
| 49 |
|
| 50 |
def predict(patch, max_length, min_length, num_beams, prediction_count):
|
| 51 |
+
input_text = parse_files(patch)
|
|
|
|
|
|
|
|
|
|
| 52 |
with torch.no_grad():
|
| 53 |
token_count = tokenizer(input_text, return_tensors="pt").input_ids.shape[1]
|
| 54 |
|