Merge branch 'zyf'
Browse fileszyf branch have some mtp, so merge it into the main brand to manage them
This view is limited to 50 files because it contains too many changes.
See raw diff
- Code-Code/Clone-detection-BigCloneBench/code/eval.sh +19 -0
- Code-Code/Clone-detection-BigCloneBench/code/evaluate.sh +3 -0
- Code-Code/Clone-detection-BigCloneBench/code/evaluator.py +53 -0
- Code-Code/Clone-detection-BigCloneBench/code/model.py +62 -0
- Code-Code/Clone-detection-BigCloneBench/code/run.py +649 -0
- Code-Code/Clone-detection-BigCloneBench/code/train.log +0 -0
- Code-Code/Clone-detection-BigCloneBench/code/train.sh +18 -0
- Code-Code/Clone-detection-BigCloneBench/dataset.zip +3 -0
- Code-Code/Clone-detection-BigCloneBench/model/epoch_1/subject_model.pth +3 -0
- Code-Code/Clone-detection-BigCloneBench/model/epoch_2/subject_model.pth +3 -0
- Code-Code/Clone-detection-POJ-104/code/eval.sh +17 -0
- Code-Code/Clone-detection-POJ-104/code/evaluate.sh +6 -0
- Code-Code/Clone-detection-POJ-104/code/evaluator.py +64 -0
- Code-Code/Clone-detection-POJ-104/code/extract_answers.py +39 -0
- Code-Code/Clone-detection-POJ-104/code/model.py +48 -0
- Code-Code/Clone-detection-POJ-104/code/run.py +632 -0
- Code-Code/Clone-detection-POJ-104/code/test.sh +17 -0
- Code-Code/Clone-detection-POJ-104/code/train.sh +18 -0
- Code-Code/Clone-detection-POJ-104/dataset.zip +3 -0
- Code-Code/Clone-detection-POJ-104/model/epoch_0/subject_model.pth +3 -0
- Code-Code/Clone-detection-POJ-104/model/epoch_1/subject_model.pth +3 -0
- Code-Code/CodeCompletion-token/code/beam.py +118 -0
- Code-Code/CodeCompletion-token/code/dataset.py +261 -0
- Code-Code/CodeCompletion-token/code/eval.sh +20 -0
- Code-Code/CodeCompletion-token/code/evaluate.sh +3 -0
- Code-Code/CodeCompletion-token/code/evaluator.py +36 -0
- Code-Code/CodeCompletion-token/code/model.py +68 -0
- Code-Code/CodeCompletion-token/code/run_lm.py +728 -0
- Code-Code/CodeCompletion-token/code/train.sh +31 -0
- Code-Code/CodeCompletion-token/data.zip +3 -0
- Code-Code/CodeCompletion-token/model/javaCorpus/epoch_1/subject_model.pth +3 -0
- Code-Code/CodeCompletion-token/model/javaCorpus/epoch_2/subject_model.pth +3 -0
- Code-Code/CodeCompletion-token/model/javaCorpus/epoch_3/subject_model.pth +3 -0
- Code-Code/CodeCompletion-token/model/javaCorpus/epoch_4/subject_model.pth +3 -0
- Code-Code/CodeCompletion-token/model/javaCorpus/epoch_5/subject_model.pth +3 -0
- Code-Code/Defect-detection/code/eval.sh +18 -0
- Code-Code/Defect-detection/code/evaluate.sh +1 -0
- Code-Code/Defect-detection/code/evaluator.py +52 -0
- Code-Code/Defect-detection/code/model.py +45 -0
- Code-Code/Defect-detection/code/run.py +598 -0
- Code-Code/Defect-detection/code/train.sh +17 -0
- Code-Code/Defect-detection/dataset.zip +3 -0
- Code-Code/Defect-detection/model/epoch_1/subject_model.pth +3 -0
- Code-Code/Defect-detection/model/epoch_2/subject_model.pth +3 -0
- Code-Code/Defect-detection/model/epoch_3/subject_model.pth +3 -0
- Code-Code/Defect-detection/model/epoch_4/subject_model.pth +3 -0
- Code-Code/Defect-detection/model/epoch_5/subject_model.pth +3 -0
- Code-Code/code-refinement/code/bleu.py +134 -0
- Code-Code/code-refinement/code/eval.sh +17 -0
- Code-Code/code-refinement/code/evaluate.sh +3 -0
Code-Code/Clone-detection-BigCloneBench/code/eval.sh
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0,1 python run.py \
|
| 2 |
+
--output_dir=../model \
|
| 3 |
+
--model_type=roberta \
|
| 4 |
+
--config_name=microsoft/codebert-base \
|
| 5 |
+
--model_name_or_path=microsoft/codebert-base \
|
| 6 |
+
--tokenizer_name=roberta-base \
|
| 7 |
+
--do_eval \
|
| 8 |
+
--do_test \
|
| 9 |
+
--train_data_file=../dataset/train.txt \
|
| 10 |
+
--eval_data_file=../dataset/valid.txt \
|
| 11 |
+
--test_data_file=../dataset/valid.txt \
|
| 12 |
+
--epoch 2 \
|
| 13 |
+
--block_size 400 \
|
| 14 |
+
--train_batch_size 16 \
|
| 15 |
+
--eval_batch_size 32 \
|
| 16 |
+
--learning_rate 5e-5 \
|
| 17 |
+
--max_grad_norm 1.0 \
|
| 18 |
+
--evaluate_during_training \
|
| 19 |
+
--seed 123456
|
Code-Code/Clone-detection-BigCloneBench/code/evaluate.sh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
python evaluator.py \
|
| 2 |
+
-a ../dataset/valid.txt \
|
| 3 |
+
-p ../model/predictions.txt
|
Code-Code/Clone-detection-BigCloneBench/code/evaluator.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
import logging
|
| 4 |
+
import sys
|
| 5 |
+
from sklearn.metrics import recall_score,precision_score,f1_score
|
| 6 |
+
|
| 7 |
+
def read_answers(filename):
|
| 8 |
+
answers={}
|
| 9 |
+
with open(filename) as f:
|
| 10 |
+
for line in f:
|
| 11 |
+
line=line.strip()
|
| 12 |
+
idx1,idx2,label=line.split()
|
| 13 |
+
answers[(idx1,idx2)]=int(label)
|
| 14 |
+
return answers
|
| 15 |
+
|
| 16 |
+
def read_predictions(filename):
|
| 17 |
+
predictions={}
|
| 18 |
+
with open(filename) as f:
|
| 19 |
+
for line in f:
|
| 20 |
+
line=line.strip()
|
| 21 |
+
idx1,idx2,label=line.split()
|
| 22 |
+
predictions[(idx1,idx2)]=int(label)
|
| 23 |
+
return predictions
|
| 24 |
+
|
| 25 |
+
def calculate_scores(answers,predictions):
|
| 26 |
+
y_trues,y_preds=[],[]
|
| 27 |
+
for key in answers:
|
| 28 |
+
if key not in predictions:
|
| 29 |
+
logging.error("Missing prediction for ({},{}) pair.".format(key[0],key[1]))
|
| 30 |
+
sys.exit()
|
| 31 |
+
y_trues.append(answers[key])
|
| 32 |
+
y_preds.append(predictions[key])
|
| 33 |
+
scores={}
|
| 34 |
+
scores['Recall']=recall_score(y_trues, y_preds)
|
| 35 |
+
scores['Precision']=precision_score(y_trues, y_preds)
|
| 36 |
+
scores['F1']=f1_score(y_trues, y_preds)
|
| 37 |
+
return scores
|
| 38 |
+
|
| 39 |
+
def main():
|
| 40 |
+
import argparse
|
| 41 |
+
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for BigCloneBench dataset.')
|
| 42 |
+
parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
|
| 43 |
+
parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
args = parser.parse_args()
|
| 47 |
+
answers=read_answers(args.answers)
|
| 48 |
+
predictions=read_predictions(args.predictions)
|
| 49 |
+
scores=calculate_scores(answers,predictions)
|
| 50 |
+
print(scores)
|
| 51 |
+
|
| 52 |
+
if __name__ == '__main__':
|
| 53 |
+
main()
|
Code-Code/Clone-detection-BigCloneBench/code/model.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch
|
| 6 |
+
from torch.autograd import Variable
|
| 7 |
+
import copy
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
from torch.nn import CrossEntropyLoss, MSELoss
|
| 10 |
+
|
| 11 |
+
class RobertaClassificationHead(nn.Module):
|
| 12 |
+
"""Head for sentence-level classification tasks."""
|
| 13 |
+
|
| 14 |
+
def __init__(self, config):
|
| 15 |
+
super().__init__()
|
| 16 |
+
self.dense = nn.Linear(config.hidden_size*2, config.hidden_size)
|
| 17 |
+
self.dropout = nn.Dropout(config.hidden_dropout_prob)
|
| 18 |
+
self.out_proj = nn.Linear(config.hidden_size, 2)
|
| 19 |
+
|
| 20 |
+
def forward(self, features, **kwargs):
|
| 21 |
+
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
|
| 22 |
+
x = x.reshape(-1,x.size(-1)*2)
|
| 23 |
+
x = self.dropout(x)
|
| 24 |
+
x = self.dense(x)
|
| 25 |
+
x = torch.tanh(x)
|
| 26 |
+
x = self.dropout(x)
|
| 27 |
+
x = self.out_proj(x)
|
| 28 |
+
return x
|
| 29 |
+
|
| 30 |
+
class Model(nn.Module):
|
| 31 |
+
def __init__(self, encoder,config,tokenizer,args):
|
| 32 |
+
super(Model, self).__init__()
|
| 33 |
+
self.encoder = encoder
|
| 34 |
+
self.config=config
|
| 35 |
+
self.tokenizer=tokenizer
|
| 36 |
+
self.classifier=RobertaClassificationHead(config)
|
| 37 |
+
self.args=args
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def forward(self, input_ids=None,labels=None, return_vec=None):
|
| 41 |
+
input_ids=input_ids.view(-1,self.args.block_size)
|
| 42 |
+
outputs = self.encoder(input_ids= input_ids,attention_mask=input_ids.ne(1))
|
| 43 |
+
|
| 44 |
+
if return_vec:
|
| 45 |
+
return outputs.pooler_output
|
| 46 |
+
|
| 47 |
+
outputs = outputs[0]
|
| 48 |
+
logits=self.classifier(outputs)
|
| 49 |
+
prob=F.softmax(logits)
|
| 50 |
+
|
| 51 |
+
if labels is not None:
|
| 52 |
+
loss_fct = CrossEntropyLoss()
|
| 53 |
+
loss = loss_fct(logits, labels)
|
| 54 |
+
return loss,prob
|
| 55 |
+
else:
|
| 56 |
+
return prob
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
Code-Code/Clone-detection-BigCloneBench/code/run.py
ADDED
|
@@ -0,0 +1,649 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""
|
| 17 |
+
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
|
| 18 |
+
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
|
| 19 |
+
using a masked language modeling (MLM) loss.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
from __future__ import absolute_import, division, print_function
|
| 23 |
+
|
| 24 |
+
import argparse
|
| 25 |
+
import glob
|
| 26 |
+
import logging
|
| 27 |
+
import os
|
| 28 |
+
import pickle
|
| 29 |
+
import random
|
| 30 |
+
import re
|
| 31 |
+
import shutil
|
| 32 |
+
import json
|
| 33 |
+
import numpy as np
|
| 34 |
+
import torch
|
| 35 |
+
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
|
| 36 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 40 |
+
except:
|
| 41 |
+
from tensorboardX import SummaryWriter
|
| 42 |
+
|
| 43 |
+
from tqdm import tqdm, trange
|
| 44 |
+
import multiprocessing
|
| 45 |
+
from model import Model
|
| 46 |
+
|
| 47 |
+
cpu_cont = 16
|
| 48 |
+
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
|
| 49 |
+
BertConfig, BertForMaskedLM, BertTokenizer,
|
| 50 |
+
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
|
| 51 |
+
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
|
| 52 |
+
RobertaConfig, RobertaModel, RobertaTokenizer,
|
| 53 |
+
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
|
| 54 |
+
|
| 55 |
+
logger = logging.getLogger(__name__)
|
| 56 |
+
|
| 57 |
+
MODEL_CLASSES = {
|
| 58 |
+
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
|
| 59 |
+
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
|
| 60 |
+
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
|
| 61 |
+
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
|
| 62 |
+
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
def get_example(item):
|
| 66 |
+
url1,url2,label,tokenizer,args,cache,url_to_code=item
|
| 67 |
+
if url1 in cache:
|
| 68 |
+
code1=cache[url1].copy()
|
| 69 |
+
else:
|
| 70 |
+
try:
|
| 71 |
+
code=' '.join(url_to_code[url1].split())
|
| 72 |
+
except:
|
| 73 |
+
code=""
|
| 74 |
+
code1=tokenizer.tokenize(code)
|
| 75 |
+
if url2 in cache:
|
| 76 |
+
code2=cache[url2].copy()
|
| 77 |
+
else:
|
| 78 |
+
try:
|
| 79 |
+
code=' '.join(url_to_code[url2].split())
|
| 80 |
+
except:
|
| 81 |
+
code=""
|
| 82 |
+
code2=tokenizer.tokenize(code)
|
| 83 |
+
|
| 84 |
+
return convert_examples_to_features(code1,code2,label,url1,url2,tokenizer,args,cache)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class InputFeatures(object):
|
| 88 |
+
"""A single training/test features for a example."""
|
| 89 |
+
def __init__(self,
|
| 90 |
+
input_tokens,
|
| 91 |
+
input_ids,
|
| 92 |
+
label,
|
| 93 |
+
url1,
|
| 94 |
+
url2
|
| 95 |
+
|
| 96 |
+
):
|
| 97 |
+
self.input_tokens = input_tokens
|
| 98 |
+
self.input_ids = input_ids
|
| 99 |
+
self.label=label
|
| 100 |
+
self.url1=url1
|
| 101 |
+
self.url2=url2
|
| 102 |
+
|
| 103 |
+
def convert_examples_to_features(code1_tokens,code2_tokens,label,url1,url2,tokenizer,args,cache):
|
| 104 |
+
#source
|
| 105 |
+
code1_tokens=code1_tokens[:args.block_size-2]
|
| 106 |
+
code1_tokens =[tokenizer.cls_token]+code1_tokens+[tokenizer.sep_token]
|
| 107 |
+
code2_tokens=code2_tokens[:args.block_size-2]
|
| 108 |
+
code2_tokens =[tokenizer.cls_token]+code2_tokens+[tokenizer.sep_token]
|
| 109 |
+
|
| 110 |
+
code1_ids=tokenizer.convert_tokens_to_ids(code1_tokens)
|
| 111 |
+
padding_length = args.block_size - len(code1_ids)
|
| 112 |
+
code1_ids+=[tokenizer.pad_token_id]*padding_length
|
| 113 |
+
|
| 114 |
+
code2_ids=tokenizer.convert_tokens_to_ids(code2_tokens)
|
| 115 |
+
padding_length = args.block_size - len(code2_ids)
|
| 116 |
+
code2_ids+=[tokenizer.pad_token_id]*padding_length
|
| 117 |
+
|
| 118 |
+
source_tokens=code1_tokens+code2_tokens
|
| 119 |
+
source_ids=code1_ids+code2_ids
|
| 120 |
+
return InputFeatures(source_tokens,source_ids,label,url1,url2)
|
| 121 |
+
|
| 122 |
+
class TextDataset(Dataset):
|
| 123 |
+
def __init__(self, tokenizer, args, file_path='train', block_size=512,pool=None):
|
| 124 |
+
postfix=file_path.split('/')[-1].split('.txt')[0]
|
| 125 |
+
self.examples = []
|
| 126 |
+
index_filename=file_path
|
| 127 |
+
logger.info("Creating features from index file at %s ", index_filename)
|
| 128 |
+
url_to_code={}
|
| 129 |
+
with open('/'.join(index_filename.split('/')[:-1])+'/data.jsonl') as f:
|
| 130 |
+
for line in f:
|
| 131 |
+
line=line.strip()
|
| 132 |
+
js=json.loads(line)
|
| 133 |
+
url_to_code[js['idx']]=js['func']
|
| 134 |
+
|
| 135 |
+
data=[]
|
| 136 |
+
cache={}
|
| 137 |
+
f=open(index_filename)
|
| 138 |
+
with open(index_filename) as f:
|
| 139 |
+
for line in f:
|
| 140 |
+
line=line.strip()
|
| 141 |
+
url1,url2,label=line.split('\t')
|
| 142 |
+
if url1 not in url_to_code or url2 not in url_to_code:
|
| 143 |
+
continue
|
| 144 |
+
if label=='0':
|
| 145 |
+
label=0
|
| 146 |
+
else:
|
| 147 |
+
label=1
|
| 148 |
+
data.append((url1,url2,label,tokenizer, args,cache,url_to_code))
|
| 149 |
+
if 'test' not in postfix:
|
| 150 |
+
data=random.sample(data,int(len(data)*0.1))
|
| 151 |
+
|
| 152 |
+
self.examples=pool.map(get_example,tqdm(data,total=len(data)))
|
| 153 |
+
if 'train' in postfix:
|
| 154 |
+
for idx, example in enumerate(self.examples[:3]):
|
| 155 |
+
logger.info("*** Example ***")
|
| 156 |
+
logger.info("idx: {}".format(idx))
|
| 157 |
+
logger.info("label: {}".format(example.label))
|
| 158 |
+
logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
|
| 159 |
+
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def __len__(self):
|
| 164 |
+
return len(self.examples)
|
| 165 |
+
|
| 166 |
+
def __getitem__(self, item):
|
| 167 |
+
|
| 168 |
+
return torch.tensor(self.examples[item].input_ids),torch.tensor(self.examples[item].label)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def load_and_cache_examples(args, tokenizer, evaluate=False,test=False,pool=None):
|
| 172 |
+
dataset = TextDataset(tokenizer, args, file_path=args.test_data_file if test else (args.eval_data_file if evaluate else args.train_data_file),block_size=args.block_size,pool=pool)
|
| 173 |
+
return dataset
|
| 174 |
+
|
| 175 |
+
def set_seed(seed=42):
|
| 176 |
+
random.seed(seed)
|
| 177 |
+
os.environ['PYHTONHASHSEED'] = str(seed)
|
| 178 |
+
np.random.seed(seed)
|
| 179 |
+
torch.manual_seed(seed)
|
| 180 |
+
torch.cuda.manual_seed(seed)
|
| 181 |
+
torch.backends.cudnn.deterministic = True
|
| 182 |
+
|
| 183 |
+
def train(args, train_dataset, model, tokenizer,pool):
|
| 184 |
+
""" Train the model """
|
| 185 |
+
|
| 186 |
+
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
|
| 187 |
+
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
|
| 188 |
+
|
| 189 |
+
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
|
| 190 |
+
args.max_steps=args.epoch*len( train_dataloader)
|
| 191 |
+
args.save_steps=len( train_dataloader)
|
| 192 |
+
args.warmup_steps=len( train_dataloader)
|
| 193 |
+
args.logging_steps=len( train_dataloader)
|
| 194 |
+
args.num_train_epochs=args.epoch
|
| 195 |
+
model.to(args.device)
|
| 196 |
+
# Prepare optimizer and schedule (linear warmup and decay)
|
| 197 |
+
no_decay = ['bias', 'LayerNorm.weight']
|
| 198 |
+
optimizer_grouped_parameters = [
|
| 199 |
+
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
| 200 |
+
'weight_decay': args.weight_decay},
|
| 201 |
+
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
| 202 |
+
]
|
| 203 |
+
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
|
| 204 |
+
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
|
| 205 |
+
num_training_steps=args.max_steps)
|
| 206 |
+
if args.fp16:
|
| 207 |
+
try:
|
| 208 |
+
from apex import amp
|
| 209 |
+
except ImportError:
|
| 210 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
| 211 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
| 212 |
+
|
| 213 |
+
# multi-gpu training (should be after apex fp16 initialization)
|
| 214 |
+
if args.n_gpu > 1:
|
| 215 |
+
model = torch.nn.DataParallel(model)
|
| 216 |
+
|
| 217 |
+
# Distributed training (should be after apex fp16 initialization)
|
| 218 |
+
if args.local_rank != -1:
|
| 219 |
+
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
|
| 220 |
+
output_device=args.local_rank,
|
| 221 |
+
find_unused_parameters=True)
|
| 222 |
+
|
| 223 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 224 |
+
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
|
| 225 |
+
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
|
| 226 |
+
if os.path.exists(scheduler_last):
|
| 227 |
+
scheduler.load_state_dict(torch.load(scheduler_last))
|
| 228 |
+
if os.path.exists(optimizer_last):
|
| 229 |
+
optimizer.load_state_dict(torch.load(optimizer_last))
|
| 230 |
+
# Train!
|
| 231 |
+
logger.info("***** Running training *****")
|
| 232 |
+
logger.info(" Num examples = %d", len(train_dataset))
|
| 233 |
+
logger.info(" Num Epochs = %d", args.num_train_epochs)
|
| 234 |
+
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
|
| 235 |
+
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
|
| 236 |
+
args.train_batch_size * args.gradient_accumulation_steps * (
|
| 237 |
+
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
|
| 238 |
+
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
|
| 239 |
+
logger.info(" Total optimization steps = %d", args.max_steps)
|
| 240 |
+
|
| 241 |
+
global_step = args.start_step
|
| 242 |
+
tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
|
| 243 |
+
best_mrr=0.0
|
| 244 |
+
best_f1=0
|
| 245 |
+
# model.resize_token_embeddings(len(tokenizer))
|
| 246 |
+
model.zero_grad()
|
| 247 |
+
set_seed(args.seed) # Added here for reproducibility (even between python 2 and 3)
|
| 248 |
+
|
| 249 |
+
for idx in range(args.start_epoch, int(args.num_train_epochs)):
|
| 250 |
+
bar = tqdm(train_dataloader,total=len(train_dataloader))
|
| 251 |
+
tr_num=0
|
| 252 |
+
train_loss=0
|
| 253 |
+
for step, batch in enumerate(bar):
|
| 254 |
+
inputs = batch[0].to(args.device)
|
| 255 |
+
labels=batch[1].to(args.device)
|
| 256 |
+
model.train()
|
| 257 |
+
loss,logits = model(inputs,labels)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
if args.n_gpu > 1:
|
| 261 |
+
loss = loss.mean() # mean() to average on multi-gpu parallel training
|
| 262 |
+
if args.gradient_accumulation_steps > 1:
|
| 263 |
+
loss = loss / args.gradient_accumulation_steps
|
| 264 |
+
|
| 265 |
+
if args.fp16:
|
| 266 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
| 267 |
+
scaled_loss.backward()
|
| 268 |
+
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
|
| 269 |
+
else:
|
| 270 |
+
loss.backward()
|
| 271 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
|
| 272 |
+
|
| 273 |
+
tr_loss += loss.item()
|
| 274 |
+
tr_num+=1
|
| 275 |
+
train_loss+=loss.item()
|
| 276 |
+
if avg_loss==0:
|
| 277 |
+
avg_loss=tr_loss
|
| 278 |
+
avg_loss=round(train_loss/tr_num,5)
|
| 279 |
+
bar.set_description("epoch {} loss {}".format(idx,avg_loss))
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
if (step + 1) % args.gradient_accumulation_steps == 0:
|
| 283 |
+
optimizer.step()
|
| 284 |
+
optimizer.zero_grad()
|
| 285 |
+
scheduler.step()
|
| 286 |
+
global_step += 1
|
| 287 |
+
output_flag=True
|
| 288 |
+
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
|
| 289 |
+
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
|
| 290 |
+
logging_loss = tr_loss
|
| 291 |
+
tr_nb=global_step
|
| 292 |
+
|
| 293 |
+
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
|
| 294 |
+
|
| 295 |
+
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
|
| 296 |
+
results = evaluate(args, model, tokenizer,pool=pool,eval_when_training=True)
|
| 297 |
+
# Save model checkpoint
|
| 298 |
+
|
| 299 |
+
if results['eval_f1']>best_f1:
|
| 300 |
+
best_f1=results['eval_f1']
|
| 301 |
+
logger.info(" "+"*"*20)
|
| 302 |
+
logger.info(" Best f1:%s",round(best_f1,4))
|
| 303 |
+
logger.info(" "+"*"*20)
|
| 304 |
+
|
| 305 |
+
checkpoint_prefix = 'checkpoint-best-f1'
|
| 306 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 307 |
+
if not os.path.exists(output_dir):
|
| 308 |
+
os.makedirs(output_dir)
|
| 309 |
+
model_to_save = model.module if hasattr(model,'module') else model
|
| 310 |
+
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
|
| 311 |
+
torch.save(model_to_save.state_dict(), output_dir)
|
| 312 |
+
logger.info("Saving model checkpoint to %s", output_dir)
|
| 313 |
+
|
| 314 |
+
# 每一轮记录checkpoint
|
| 315 |
+
output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
|
| 316 |
+
if not os.path.exists(output_dir):
|
| 317 |
+
os.makedirs(output_dir)
|
| 318 |
+
model_to_save = model.module if hasattr(model, 'module') else model
|
| 319 |
+
ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
|
| 320 |
+
logger.info("Saving model checkpoint to %s", ckpt_output_path)
|
| 321 |
+
torch.save(model_to_save.state_dict(), ckpt_output_path)
|
| 322 |
+
|
| 323 |
+
if args.max_steps > 0 and global_step > args.max_steps:
|
| 324 |
+
train_iterator.close()
|
| 325 |
+
break
|
| 326 |
+
return global_step, tr_loss / global_step
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def evaluate(args, model, tokenizer, prefix="",pool=None,eval_when_training=False):
|
| 330 |
+
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
| 331 |
+
eval_output_dir = args.output_dir
|
| 332 |
+
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True,pool=pool)
|
| 333 |
+
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
|
| 334 |
+
os.makedirs(eval_output_dir)
|
| 335 |
+
|
| 336 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 337 |
+
# Note that DistributedSampler samples randomly
|
| 338 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 339 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
|
| 340 |
+
|
| 341 |
+
# multi-gpu evaluate
|
| 342 |
+
if args.n_gpu > 1 and eval_when_training is False:
|
| 343 |
+
model = torch.nn.DataParallel(model)
|
| 344 |
+
|
| 345 |
+
# Eval!
|
| 346 |
+
logger.info("***** Running evaluation {} *****".format(prefix))
|
| 347 |
+
logger.info(" Num examples = %d", len(eval_dataset))
|
| 348 |
+
logger.info(" Batch size = %d", args.eval_batch_size)
|
| 349 |
+
eval_loss = 0.0
|
| 350 |
+
nb_eval_steps = 0
|
| 351 |
+
model.eval()
|
| 352 |
+
logits=[]
|
| 353 |
+
y_trues=[]
|
| 354 |
+
for batch in eval_dataloader:
|
| 355 |
+
inputs = batch[0].to(args.device)
|
| 356 |
+
labels=batch[1].to(args.device)
|
| 357 |
+
with torch.no_grad():
|
| 358 |
+
lm_loss,logit = model(inputs,labels)
|
| 359 |
+
eval_loss += lm_loss.mean().item()
|
| 360 |
+
logits.append(logit.cpu().numpy())
|
| 361 |
+
y_trues.append(labels.cpu().numpy())
|
| 362 |
+
nb_eval_steps += 1
|
| 363 |
+
logits=np.concatenate(logits,0)
|
| 364 |
+
y_trues=np.concatenate(y_trues,0)
|
| 365 |
+
best_threshold=0
|
| 366 |
+
best_f1=0
|
| 367 |
+
for i in range(1,100):
|
| 368 |
+
threshold=i/100
|
| 369 |
+
y_preds=logits[:,1]>threshold
|
| 370 |
+
from sklearn.metrics import recall_score
|
| 371 |
+
recall=recall_score(y_trues, y_preds)
|
| 372 |
+
from sklearn.metrics import precision_score
|
| 373 |
+
precision=precision_score(y_trues, y_preds)
|
| 374 |
+
from sklearn.metrics import f1_score
|
| 375 |
+
f1=f1_score(y_trues, y_preds)
|
| 376 |
+
if f1>best_f1:
|
| 377 |
+
best_f1=f1
|
| 378 |
+
best_threshold=threshold
|
| 379 |
+
|
| 380 |
+
y_preds=logits[:,1]>best_threshold
|
| 381 |
+
from sklearn.metrics import recall_score
|
| 382 |
+
recall=recall_score(y_trues, y_preds)
|
| 383 |
+
from sklearn.metrics import precision_score
|
| 384 |
+
precision=precision_score(y_trues, y_preds)
|
| 385 |
+
from sklearn.metrics import f1_score
|
| 386 |
+
f1=f1_score(y_trues, y_preds)
|
| 387 |
+
result = {
|
| 388 |
+
"eval_recall": float(recall),
|
| 389 |
+
"eval_precision": float(precision),
|
| 390 |
+
"eval_f1": float(f1),
|
| 391 |
+
"eval_threshold":best_threshold,
|
| 392 |
+
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
logger.info("***** Eval results {} *****".format(prefix))
|
| 396 |
+
for key in sorted(result.keys()):
|
| 397 |
+
logger.info(" %s = %s", key, str(round(result[key],4)))
|
| 398 |
+
|
| 399 |
+
return result
|
| 400 |
+
|
| 401 |
+
def test(args, model, tokenizer, prefix="",pool=None,best_threshold=0):
|
| 402 |
+
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
| 403 |
+
eval_dataset = load_and_cache_examples(args, tokenizer, test=True,pool=pool)
|
| 404 |
+
|
| 405 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 406 |
+
# Note that DistributedSampler samples randomly
|
| 407 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 408 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
|
| 409 |
+
|
| 410 |
+
# multi-gpu evaluate
|
| 411 |
+
if args.n_gpu > 1:
|
| 412 |
+
model = torch.nn.DataParallel(model)
|
| 413 |
+
|
| 414 |
+
# Eval!
|
| 415 |
+
logger.info("***** Running Test {} *****".format(prefix))
|
| 416 |
+
logger.info(" Num examples = %d", len(eval_dataset))
|
| 417 |
+
logger.info(" Batch size = %d", args.eval_batch_size)
|
| 418 |
+
eval_loss = 0.0
|
| 419 |
+
nb_eval_steps = 0
|
| 420 |
+
model.eval()
|
| 421 |
+
logits=[]
|
| 422 |
+
y_trues=[]
|
| 423 |
+
for batch in eval_dataloader:
|
| 424 |
+
inputs = batch[0].to(args.device)
|
| 425 |
+
labels=batch[1].to(args.device)
|
| 426 |
+
with torch.no_grad():
|
| 427 |
+
lm_loss,logit = model(inputs,labels)
|
| 428 |
+
eval_loss += lm_loss.mean().item()
|
| 429 |
+
logits.append(logit.cpu().numpy())
|
| 430 |
+
y_trues.append(labels.cpu().numpy())
|
| 431 |
+
nb_eval_steps += 1
|
| 432 |
+
logits=np.concatenate(logits,0)
|
| 433 |
+
y_preds=logits[:,1]>best_threshold
|
| 434 |
+
with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f:
|
| 435 |
+
for example,pred in zip(eval_dataset.examples,y_preds):
|
| 436 |
+
if pred:
|
| 437 |
+
f.write(example.url1+'\t'+example.url2+'\t'+'1'+'\n')
|
| 438 |
+
else:
|
| 439 |
+
f.write(example.url1+'\t'+example.url2+'\t'+'0'+'\n')
|
| 440 |
+
|
| 441 |
+
def main():
|
| 442 |
+
parser = argparse.ArgumentParser()
|
| 443 |
+
|
| 444 |
+
## Required parameters
|
| 445 |
+
parser.add_argument("--train_data_file", default=None, type=str, required=True,
|
| 446 |
+
help="The input training data file (a text file).")
|
| 447 |
+
parser.add_argument("--output_dir", default=None, type=str, required=True,
|
| 448 |
+
help="The output directory where the model predictions and checkpoints will be written.")
|
| 449 |
+
|
| 450 |
+
## Other parameters
|
| 451 |
+
parser.add_argument("--eval_data_file", default=None, type=str,
|
| 452 |
+
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
|
| 453 |
+
parser.add_argument("--test_data_file", default=None, type=str,
|
| 454 |
+
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
|
| 455 |
+
|
| 456 |
+
parser.add_argument("--model_type", default="bert", type=str,
|
| 457 |
+
help="The model architecture to be fine-tuned.")
|
| 458 |
+
parser.add_argument("--model_name_or_path", default=None, type=str,
|
| 459 |
+
help="The model checkpoint for weights initialization.")
|
| 460 |
+
|
| 461 |
+
parser.add_argument("--mlm", action='store_true',
|
| 462 |
+
help="Train with masked-language modeling loss instead of language modeling.")
|
| 463 |
+
parser.add_argument("--mlm_probability", type=float, default=0.15,
|
| 464 |
+
help="Ratio of tokens to mask for masked language modeling loss")
|
| 465 |
+
|
| 466 |
+
parser.add_argument("--config_name", default="", type=str,
|
| 467 |
+
help="Optional pretrained config name or path if not the same as model_name_or_path")
|
| 468 |
+
parser.add_argument("--tokenizer_name", default="", type=str,
|
| 469 |
+
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
|
| 470 |
+
parser.add_argument("--cache_dir", default="", type=str,
|
| 471 |
+
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
|
| 472 |
+
parser.add_argument("--block_size", default=-1, type=int,
|
| 473 |
+
help="Optional input sequence length after tokenization."
|
| 474 |
+
"The training dataset will be truncated in block of this size for training."
|
| 475 |
+
"Default to the model max input length for single sentence inputs (take into account special tokens).")
|
| 476 |
+
parser.add_argument("--do_train", action='store_true',
|
| 477 |
+
help="Whether to run training.")
|
| 478 |
+
parser.add_argument("--do_eval", action='store_true',
|
| 479 |
+
help="Whether to run eval on the dev set.")
|
| 480 |
+
parser.add_argument("--do_test", action='store_true',
|
| 481 |
+
help="Whether to run eval on the dev set.")
|
| 482 |
+
parser.add_argument("--evaluate_during_training", action='store_true',
|
| 483 |
+
help="Run evaluation during training at each logging step.")
|
| 484 |
+
parser.add_argument("--do_lower_case", action='store_true',
|
| 485 |
+
help="Set this flag if you are using an uncased model.")
|
| 486 |
+
|
| 487 |
+
parser.add_argument("--train_batch_size", default=4, type=int,
|
| 488 |
+
help="Batch size per GPU/CPU for training.")
|
| 489 |
+
parser.add_argument("--eval_batch_size", default=4, type=int,
|
| 490 |
+
help="Batch size per GPU/CPU for evaluation.")
|
| 491 |
+
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
|
| 492 |
+
help="Number of updates steps to accumulate before performing a backward/update pass.")
|
| 493 |
+
parser.add_argument("--learning_rate", default=5e-5, type=float,
|
| 494 |
+
help="The initial learning rate for Adam.")
|
| 495 |
+
parser.add_argument("--weight_decay", default=0.0, type=float,
|
| 496 |
+
help="Weight deay if we apply some.")
|
| 497 |
+
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
|
| 498 |
+
help="Epsilon for Adam optimizer.")
|
| 499 |
+
parser.add_argument("--max_grad_norm", default=1.0, type=float,
|
| 500 |
+
help="Max gradient norm.")
|
| 501 |
+
parser.add_argument("--num_train_epochs", default=1.0, type=float,
|
| 502 |
+
help="Total number of training epochs to perform.")
|
| 503 |
+
parser.add_argument("--max_steps", default=-1, type=int,
|
| 504 |
+
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
|
| 505 |
+
parser.add_argument("--warmup_steps", default=0, type=int,
|
| 506 |
+
help="Linear warmup over warmup_steps.")
|
| 507 |
+
|
| 508 |
+
parser.add_argument('--logging_steps', type=int, default=50,
|
| 509 |
+
help="Log every X updates steps.")
|
| 510 |
+
parser.add_argument('--save_steps', type=int, default=50,
|
| 511 |
+
help="Save checkpoint every X updates steps.")
|
| 512 |
+
parser.add_argument('--save_total_limit', type=int, default=None,
|
| 513 |
+
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
|
| 514 |
+
parser.add_argument("--eval_all_checkpoints", action='store_true',
|
| 515 |
+
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
|
| 516 |
+
parser.add_argument("--no_cuda", action='store_true',
|
| 517 |
+
help="Avoid using CUDA when available")
|
| 518 |
+
parser.add_argument('--overwrite_output_dir', action='store_true',
|
| 519 |
+
help="Overwrite the content of the output directory")
|
| 520 |
+
parser.add_argument('--overwrite_cache', action='store_true',
|
| 521 |
+
help="Overwrite the cached training and evaluation sets")
|
| 522 |
+
parser.add_argument('--seed', type=int, default=42,
|
| 523 |
+
help="random seed for initialization")
|
| 524 |
+
parser.add_argument('--epoch', type=int, default=42,
|
| 525 |
+
help="random seed for initialization")
|
| 526 |
+
parser.add_argument('--fp16', action='store_true',
|
| 527 |
+
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
|
| 528 |
+
parser.add_argument('--fp16_opt_level', type=str, default='O1',
|
| 529 |
+
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
|
| 530 |
+
"See details at https://nvidia.github.io/apex/amp.html")
|
| 531 |
+
parser.add_argument("--local_rank", type=int, default=-1,
|
| 532 |
+
help="For distributed training: local_rank")
|
| 533 |
+
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
|
| 534 |
+
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
pool = multiprocessing.Pool(cpu_cont)
|
| 538 |
+
args = parser.parse_args()
|
| 539 |
+
|
| 540 |
+
# Setup distant debugging if needed
|
| 541 |
+
if args.server_ip and args.server_port:
|
| 542 |
+
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
|
| 543 |
+
import ptvsd
|
| 544 |
+
print("Waiting for debugger attach")
|
| 545 |
+
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
|
| 546 |
+
ptvsd.wait_for_attach()
|
| 547 |
+
|
| 548 |
+
# Setup CUDA, GPU & distributed training
|
| 549 |
+
if args.local_rank == -1 or args.no_cuda:
|
| 550 |
+
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
|
| 551 |
+
args.n_gpu = torch.cuda.device_count()
|
| 552 |
+
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
|
| 553 |
+
torch.cuda.set_device(args.local_rank)
|
| 554 |
+
device = torch.device("cuda", args.local_rank)
|
| 555 |
+
torch.distributed.init_process_group(backend='nccl')
|
| 556 |
+
args.n_gpu = 1
|
| 557 |
+
args.device = device
|
| 558 |
+
args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
|
| 559 |
+
args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
|
| 560 |
+
# Setup logging
|
| 561 |
+
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
|
| 562 |
+
datefmt='%m/%d/%Y %H:%M:%S',
|
| 563 |
+
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
|
| 564 |
+
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
|
| 565 |
+
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
|
| 566 |
+
|
| 567 |
+
# Set seed
|
| 568 |
+
set_seed(args.seed)
|
| 569 |
+
|
| 570 |
+
# Load pretrained model and tokenizer
|
| 571 |
+
if args.local_rank not in [-1, 0]:
|
| 572 |
+
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
|
| 573 |
+
|
| 574 |
+
args.start_epoch = 0
|
| 575 |
+
args.start_step = 0
|
| 576 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 577 |
+
if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
|
| 578 |
+
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
|
| 579 |
+
args.config_name = os.path.join(checkpoint_last, 'config.json')
|
| 580 |
+
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
|
| 581 |
+
with open(idx_file, encoding='utf-8') as idxf:
|
| 582 |
+
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
|
| 583 |
+
|
| 584 |
+
step_file = os.path.join(checkpoint_last, 'step_file.txt')
|
| 585 |
+
if os.path.exists(step_file):
|
| 586 |
+
with open(step_file, encoding='utf-8') as stepf:
|
| 587 |
+
args.start_step = int(stepf.readlines()[0].strip())
|
| 588 |
+
|
| 589 |
+
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
|
| 590 |
+
|
| 591 |
+
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
|
| 592 |
+
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
|
| 593 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 594 |
+
config.num_labels=2
|
| 595 |
+
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
|
| 596 |
+
do_lower_case=args.do_lower_case,
|
| 597 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 598 |
+
if args.block_size <= 0:
|
| 599 |
+
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
|
| 600 |
+
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
|
| 601 |
+
if args.model_name_or_path:
|
| 602 |
+
model = model_class.from_pretrained(args.model_name_or_path,
|
| 603 |
+
from_tf=bool('.ckpt' in args.model_name_or_path),
|
| 604 |
+
config=config,
|
| 605 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 606 |
+
else:
|
| 607 |
+
model = model_class(config)
|
| 608 |
+
|
| 609 |
+
model=Model(model,config,tokenizer,args)
|
| 610 |
+
if args.local_rank == 0:
|
| 611 |
+
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
|
| 612 |
+
|
| 613 |
+
logger.info("Training/evaluation parameters %s", args)
|
| 614 |
+
|
| 615 |
+
# Training
|
| 616 |
+
if args.do_train:
|
| 617 |
+
if args.local_rank not in [-1, 0]:
|
| 618 |
+
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
|
| 619 |
+
|
| 620 |
+
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False,pool=pool)
|
| 621 |
+
|
| 622 |
+
if args.local_rank == 0:
|
| 623 |
+
torch.distributed.barrier()
|
| 624 |
+
|
| 625 |
+
global_step, tr_loss = train(args, train_dataset, model, tokenizer,pool)
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
# Evaluation
|
| 629 |
+
results = {}
|
| 630 |
+
if args.do_eval and args.local_rank in [-1, 0]:
|
| 631 |
+
checkpoint_prefix = 'epoch_2/subject_model.pth'
|
| 632 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 633 |
+
model.load_state_dict(torch.load(output_dir))
|
| 634 |
+
model.to(args.device)
|
| 635 |
+
result=evaluate(args, model, tokenizer,pool=pool)
|
| 636 |
+
|
| 637 |
+
if args.do_test and args.local_rank in [-1, 0]:
|
| 638 |
+
checkpoint_prefix = 'epoch_2/subject_model.pth'
|
| 639 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 640 |
+
model.load_state_dict(torch.load(output_dir))
|
| 641 |
+
model.to(args.device)
|
| 642 |
+
test(args, model, tokenizer,pool=pool,best_threshold=0.5)
|
| 643 |
+
|
| 644 |
+
return results
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
if __name__ == "__main__":
|
| 648 |
+
main()
|
| 649 |
+
|
Code-Code/Clone-detection-BigCloneBench/code/train.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Code-Code/Clone-detection-BigCloneBench/code/train.sh
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0,1 python run.py \
|
| 2 |
+
--output_dir=../model \
|
| 3 |
+
--model_type=roberta \
|
| 4 |
+
--config_name=microsoft/codebert-base \
|
| 5 |
+
--model_name_or_path=microsoft/codebert-base \
|
| 6 |
+
--tokenizer_name=roberta-base \
|
| 7 |
+
--do_train \
|
| 8 |
+
--train_data_file=../dataset/train.txt \
|
| 9 |
+
--eval_data_file=../dataset/valid.txt \
|
| 10 |
+
--test_data_file=../dataset/test.txt \
|
| 11 |
+
--epoch 2 \
|
| 12 |
+
--block_size 400 \
|
| 13 |
+
--train_batch_size 16 \
|
| 14 |
+
--eval_batch_size 32 \
|
| 15 |
+
--learning_rate 5e-5 \
|
| 16 |
+
--max_grad_norm 1.0 \
|
| 17 |
+
--evaluate_during_training \
|
| 18 |
+
--seed 123456
|
Code-Code/Clone-detection-BigCloneBench/dataset.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:922ad328fff2df059476a791c55ce23f2444af7d4ec72da93bc33ed81d456572
|
| 3 |
+
size 13203888
|
Code-Code/Clone-detection-BigCloneBench/model/epoch_1/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d65eea402b4555d0f66471a658926f7c78fb60dd1f43a7fda9ad7e1feeaab80
|
| 3 |
+
size 503395254
|
Code-Code/Clone-detection-BigCloneBench/model/epoch_2/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d99799719a23da09dd75b6e509e09dd228f859a31479b1fd8c1462d67c0ed4dd
|
| 3 |
+
size 503395254
|
Code-Code/Clone-detection-POJ-104/code/eval.sh
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0,1 python run.py \
|
| 2 |
+
--output_dir=../model \
|
| 3 |
+
--model_type=roberta \
|
| 4 |
+
--config_name=microsoft/codebert-base \
|
| 5 |
+
--model_name_or_path=microsoft/codebert-base \
|
| 6 |
+
--tokenizer_name=roberta-base \
|
| 7 |
+
--do_eval \
|
| 8 |
+
--train_data_file=../dataset/train.jsonl \
|
| 9 |
+
--eval_data_file=../dataset/valid.jsonl \
|
| 10 |
+
--epoch 2 \
|
| 11 |
+
--block_size 400 \
|
| 12 |
+
--train_batch_size 8 \
|
| 13 |
+
--eval_batch_size 16 \
|
| 14 |
+
--learning_rate 2e-5 \
|
| 15 |
+
--max_grad_norm 1.0 \
|
| 16 |
+
--evaluate_during_training \
|
| 17 |
+
--seed 123456
|
Code-Code/Clone-detection-POJ-104/code/evaluate.sh
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
python extract_answers.py \
|
| 2 |
+
-c ../dataset/valid.jsonl \
|
| 3 |
+
-o ../model/answers.jsonl
|
| 4 |
+
python evaluator.py \
|
| 5 |
+
-a ../model/answers.jsonl \
|
| 6 |
+
-p ../model/predictions.jsonl
|
Code-Code/Clone-detection-POJ-104/code/evaluator.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
import logging
|
| 4 |
+
import sys,json
|
| 5 |
+
import numpy as np
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
def read_answers(filename):
|
| 9 |
+
answers={}
|
| 10 |
+
with open(filename) as f:
|
| 11 |
+
for line in f:
|
| 12 |
+
line=line.strip()
|
| 13 |
+
js=json.loads(line)
|
| 14 |
+
answers[js['index']]=js['answers']
|
| 15 |
+
return answers
|
| 16 |
+
|
| 17 |
+
def read_predictions(filename):
|
| 18 |
+
predictions={}
|
| 19 |
+
with open(filename) as f:
|
| 20 |
+
for line in f:
|
| 21 |
+
line=line.strip()
|
| 22 |
+
js=json.loads(line)
|
| 23 |
+
predictions[js['index']]=js['answers']
|
| 24 |
+
return predictions
|
| 25 |
+
|
| 26 |
+
def calculate_scores(answers,predictions):
|
| 27 |
+
scores=[]
|
| 28 |
+
for key in answers:
|
| 29 |
+
if key not in predictions:
|
| 30 |
+
logging.error("Missing prediction for index {}.".format(key))
|
| 31 |
+
sys.exit()
|
| 32 |
+
|
| 33 |
+
if len(answers[key])!=len(predictions[key]):
|
| 34 |
+
logging.error("Mismatch the number of answers for index {}.".format(key))
|
| 35 |
+
sys.exit()
|
| 36 |
+
|
| 37 |
+
answer = set(answers[key])
|
| 38 |
+
|
| 39 |
+
Avep = []
|
| 40 |
+
for k, p in enumerate(predictions[key]):
|
| 41 |
+
if p in answer:
|
| 42 |
+
Avep.append((len(Avep)+1)/(k+1))
|
| 43 |
+
|
| 44 |
+
scores.append(sum(Avep)/len(answer))
|
| 45 |
+
|
| 46 |
+
result={}
|
| 47 |
+
result['MAP@R']= round(np.mean(scores),4)
|
| 48 |
+
return result
|
| 49 |
+
|
| 50 |
+
def main():
|
| 51 |
+
import argparse
|
| 52 |
+
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for POJ-104 dataset.')
|
| 53 |
+
parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
|
| 54 |
+
parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
args = parser.parse_args()
|
| 58 |
+
answers=read_answers(args.answers)
|
| 59 |
+
predictions=read_predictions(args.predictions)
|
| 60 |
+
scores=calculate_scores(answers,predictions)
|
| 61 |
+
print(scores)
|
| 62 |
+
|
| 63 |
+
if __name__ == '__main__':
|
| 64 |
+
main()
|
Code-Code/Clone-detection-POJ-104/code/extract_answers.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
import json
|
| 4 |
+
|
| 5 |
+
def extract_answers(filename):
|
| 6 |
+
cluster={}
|
| 7 |
+
with open(filename) as f:
|
| 8 |
+
for line in f:
|
| 9 |
+
line=line.strip()
|
| 10 |
+
js=json.loads(line)
|
| 11 |
+
if js['label'] not in cluster:
|
| 12 |
+
cluster[js['label']]=set()
|
| 13 |
+
cluster[js['label']].add(js['index'])
|
| 14 |
+
answers=[]
|
| 15 |
+
for key in cluster:
|
| 16 |
+
for idx1 in cluster[key]:
|
| 17 |
+
temp={}
|
| 18 |
+
temp['index']=idx1
|
| 19 |
+
temp['answers']=[]
|
| 20 |
+
for idx2 in cluster[key]:
|
| 21 |
+
if idx1!=idx2:
|
| 22 |
+
temp['answers'].append(idx2)
|
| 23 |
+
answers.append(temp)
|
| 24 |
+
return answers
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def main():
|
| 28 |
+
import argparse
|
| 29 |
+
parser = argparse.ArgumentParser(description='Extract answers from code files.')
|
| 30 |
+
parser.add_argument('--codefile', '-c',help="filename of the code examples.")
|
| 31 |
+
parser.add_argument('--outfile', '-o',help="filename of output.")
|
| 32 |
+
args = parser.parse_args()
|
| 33 |
+
answers=extract_answers(args.codefile)
|
| 34 |
+
with open(args.outfile,'w') as f:
|
| 35 |
+
for line in answers:
|
| 36 |
+
f.write(json.dumps(line)+'\n')
|
| 37 |
+
|
| 38 |
+
if __name__ == '__main__':
|
| 39 |
+
main()
|
Code-Code/Clone-detection-POJ-104/code/model.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT License.
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch
|
| 6 |
+
from torch.autograd import Variable
|
| 7 |
+
import copy
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
from torch.nn import CrossEntropyLoss, MSELoss
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Model(nn.Module):
|
| 14 |
+
def __init__(self, encoder,config,tokenizer,args):
|
| 15 |
+
super(Model, self).__init__()
|
| 16 |
+
self.encoder = encoder
|
| 17 |
+
self.config=config
|
| 18 |
+
self.tokenizer=tokenizer
|
| 19 |
+
self.args=args
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def forward(self, input_ids=None,p_input_ids=None,n_input_ids=None,labels=None):
|
| 23 |
+
bs,_=input_ids.size()
|
| 24 |
+
input_ids=torch.cat((input_ids,p_input_ids,n_input_ids),0)
|
| 25 |
+
|
| 26 |
+
outputs=self.encoder(input_ids,attention_mask=input_ids.ne(1))
|
| 27 |
+
if len(outputs) > 1:
|
| 28 |
+
outputs = outputs[1]
|
| 29 |
+
else:
|
| 30 |
+
outputs = outputs[0][:, 0, :]
|
| 31 |
+
outputs=outputs.split(bs,0)
|
| 32 |
+
|
| 33 |
+
prob_1=(outputs[0]*outputs[1]).sum(-1)
|
| 34 |
+
prob_2=(outputs[0]*outputs[2]).sum(-1)
|
| 35 |
+
temp=torch.cat((outputs[0],outputs[1]),0)
|
| 36 |
+
temp_labels=torch.cat((labels,labels),0)
|
| 37 |
+
prob_3= torch.mm(outputs[0],temp.t())
|
| 38 |
+
mask=labels[:,None]==temp_labels[None,:]
|
| 39 |
+
prob_3=prob_3*(1-mask.float())-1e9*mask.float()
|
| 40 |
+
|
| 41 |
+
prob=torch.softmax(torch.cat((prob_1[:,None],prob_2[:,None],prob_3),-1),-1)
|
| 42 |
+
loss=torch.log(prob[:,0]+1e-10)
|
| 43 |
+
loss=-loss.mean()
|
| 44 |
+
return loss,outputs[0]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
|
Code-Code/Clone-detection-POJ-104/code/run.py
ADDED
|
@@ -0,0 +1,632 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""
|
| 17 |
+
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
|
| 18 |
+
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
|
| 19 |
+
using a masked language modeling (MLM) loss.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
from __future__ import absolute_import, division, print_function
|
| 23 |
+
|
| 24 |
+
import argparse
|
| 25 |
+
import glob
|
| 26 |
+
import logging
|
| 27 |
+
import os
|
| 28 |
+
import pickle
|
| 29 |
+
import random
|
| 30 |
+
import re
|
| 31 |
+
import shutil
|
| 32 |
+
|
| 33 |
+
import numpy as np
|
| 34 |
+
import torch
|
| 35 |
+
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
|
| 36 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 37 |
+
import json
|
| 38 |
+
try:
|
| 39 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 40 |
+
except:
|
| 41 |
+
from tensorboardX import SummaryWriter
|
| 42 |
+
|
| 43 |
+
from tqdm import tqdm, trange
|
| 44 |
+
import multiprocessing
|
| 45 |
+
from model import Model
|
| 46 |
+
cpu_cont = multiprocessing.cpu_count()
|
| 47 |
+
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
|
| 48 |
+
BertConfig, BertModel, BertTokenizer,
|
| 49 |
+
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
|
| 50 |
+
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
|
| 51 |
+
RobertaConfig, RobertaModel, RobertaTokenizer,
|
| 52 |
+
DistilBertConfig, DistilBertModel, DistilBertTokenizer)
|
| 53 |
+
|
| 54 |
+
logger = logging.getLogger(__name__)
|
| 55 |
+
|
| 56 |
+
MODEL_CLASSES = {
|
| 57 |
+
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
|
| 58 |
+
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
|
| 59 |
+
'bert': (BertConfig, BertModel, BertTokenizer),
|
| 60 |
+
'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
|
| 61 |
+
'distilbert': (DistilBertConfig, DistilBertModel, DistilBertTokenizer)
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class InputFeatures(object):
|
| 66 |
+
"""A single training/test features for a example."""
|
| 67 |
+
def __init__(self,
|
| 68 |
+
input_tokens,
|
| 69 |
+
input_ids,
|
| 70 |
+
index,
|
| 71 |
+
label,
|
| 72 |
+
|
| 73 |
+
):
|
| 74 |
+
self.input_tokens = input_tokens
|
| 75 |
+
self.input_ids = input_ids
|
| 76 |
+
self.index=index
|
| 77 |
+
self.label=label
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def convert_examples_to_features(js,tokenizer,args):
|
| 81 |
+
#source
|
| 82 |
+
code=' '.join(js['code'].split())
|
| 83 |
+
code_tokens=tokenizer.tokenize(code)[:args.block_size-2]
|
| 84 |
+
source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
|
| 85 |
+
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
|
| 86 |
+
padding_length = args.block_size - len(source_ids)
|
| 87 |
+
source_ids+=[tokenizer.pad_token_id]*padding_length
|
| 88 |
+
return InputFeatures(source_tokens,source_ids,js['index'],int(js['label']))
|
| 89 |
+
|
| 90 |
+
class TextDataset(Dataset):
|
| 91 |
+
def __init__(self, tokenizer, args, file_path=None):
|
| 92 |
+
self.examples = []
|
| 93 |
+
data=[]
|
| 94 |
+
with open(file_path) as f:
|
| 95 |
+
for line in f:
|
| 96 |
+
line=line.strip()
|
| 97 |
+
js=json.loads(line)
|
| 98 |
+
data.append(js)
|
| 99 |
+
for js in data:
|
| 100 |
+
self.examples.append(convert_examples_to_features(js,tokenizer,args))
|
| 101 |
+
if 'train' in file_path:
|
| 102 |
+
for idx, example in enumerate(self.examples[:3]):
|
| 103 |
+
logger.info("*** Example ***")
|
| 104 |
+
logger.info("idx: {}".format(idx))
|
| 105 |
+
logger.info("label: {}".format(example.label))
|
| 106 |
+
logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
|
| 107 |
+
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
|
| 108 |
+
self.label_examples={}
|
| 109 |
+
for e in self.examples:
|
| 110 |
+
if e.label not in self.label_examples:
|
| 111 |
+
self.label_examples[e.label]=[]
|
| 112 |
+
self.label_examples[e.label].append(e)
|
| 113 |
+
|
| 114 |
+
def __len__(self):
|
| 115 |
+
return len(self.examples)
|
| 116 |
+
|
| 117 |
+
def __getitem__(self, i):
|
| 118 |
+
label=self.examples[i].label
|
| 119 |
+
index=self.examples[i].index
|
| 120 |
+
labels=list(self.label_examples)
|
| 121 |
+
labels.remove(label)
|
| 122 |
+
while True:
|
| 123 |
+
shuffle_example=random.sample(self.label_examples[label],1)[0]
|
| 124 |
+
if shuffle_example.index!=index:
|
| 125 |
+
p_example=shuffle_example
|
| 126 |
+
break
|
| 127 |
+
n_example=random.sample(self.label_examples[random.sample(labels,1)[0]],1)[0]
|
| 128 |
+
|
| 129 |
+
return (torch.tensor(self.examples[i].input_ids),torch.tensor(p_example.input_ids),
|
| 130 |
+
torch.tensor(n_example.input_ids),torch.tensor(label))
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def set_seed(seed=42):
|
| 134 |
+
random.seed(seed)
|
| 135 |
+
os.environ['PYHTONHASHSEED'] = str(seed)
|
| 136 |
+
np.random.seed(seed)
|
| 137 |
+
torch.manual_seed(seed)
|
| 138 |
+
torch.cuda.manual_seed(seed)
|
| 139 |
+
torch.backends.cudnn.deterministic = True
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def train(args, train_dataset, model, tokenizer):
|
| 143 |
+
""" Train the model """
|
| 144 |
+
|
| 145 |
+
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
|
| 146 |
+
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
|
| 147 |
+
|
| 148 |
+
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
|
| 149 |
+
batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
|
| 150 |
+
args.max_steps=args.epoch*len( train_dataloader)
|
| 151 |
+
args.save_steps=len( train_dataloader)
|
| 152 |
+
args.warmup_steps=len( train_dataloader)
|
| 153 |
+
args.logging_steps=len( train_dataloader)
|
| 154 |
+
args.num_train_epochs=args.epoch
|
| 155 |
+
model.to(args.device)
|
| 156 |
+
# Prepare optimizer and schedule (linear warmup and decay)
|
| 157 |
+
no_decay = ['bias', 'LayerNorm.weight']
|
| 158 |
+
optimizer_grouped_parameters = [
|
| 159 |
+
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
| 160 |
+
'weight_decay': args.weight_decay},
|
| 161 |
+
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
| 162 |
+
]
|
| 163 |
+
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
|
| 164 |
+
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
|
| 165 |
+
num_training_steps=args.max_steps)
|
| 166 |
+
if args.fp16:
|
| 167 |
+
try:
|
| 168 |
+
from apex import amp
|
| 169 |
+
except ImportError:
|
| 170 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
| 171 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
| 172 |
+
|
| 173 |
+
# multi-gpu training (should be after apex fp16 initialization)
|
| 174 |
+
if args.n_gpu > 1:
|
| 175 |
+
model = torch.nn.DataParallel(model)
|
| 176 |
+
|
| 177 |
+
# Distributed training (should be after apex fp16 initialization)
|
| 178 |
+
if args.local_rank != -1:
|
| 179 |
+
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
|
| 180 |
+
output_device=args.local_rank,
|
| 181 |
+
find_unused_parameters=True)
|
| 182 |
+
|
| 183 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 184 |
+
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
|
| 185 |
+
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
|
| 186 |
+
if os.path.exists(scheduler_last):
|
| 187 |
+
scheduler.load_state_dict(torch.load(scheduler_last))
|
| 188 |
+
if os.path.exists(optimizer_last):
|
| 189 |
+
optimizer.load_state_dict(torch.load(optimizer_last))
|
| 190 |
+
# Train!
|
| 191 |
+
logger.info("***** Running training *****")
|
| 192 |
+
logger.info(" Num examples = %d", len(train_dataset))
|
| 193 |
+
logger.info(" Num Epochs = %d", args.num_train_epochs)
|
| 194 |
+
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
|
| 195 |
+
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
|
| 196 |
+
args.train_batch_size * args.gradient_accumulation_steps * (
|
| 197 |
+
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
|
| 198 |
+
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
|
| 199 |
+
logger.info(" Total optimization steps = %d", args.max_steps)
|
| 200 |
+
|
| 201 |
+
global_step = args.start_step
|
| 202 |
+
tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
|
| 203 |
+
best_acc=0.0
|
| 204 |
+
# model.resize_token_embeddings(len(tokenizer))
|
| 205 |
+
model.zero_grad()
|
| 206 |
+
for idx in range(args.start_epoch, int(args.num_train_epochs)):
|
| 207 |
+
bar = train_dataloader
|
| 208 |
+
tr_num=0
|
| 209 |
+
train_loss=0
|
| 210 |
+
for step, batch in enumerate(bar):
|
| 211 |
+
inputs = batch[0].to(args.device)
|
| 212 |
+
p_inputs = batch[1].to(args.device)
|
| 213 |
+
n_inputs = batch[2].to(args.device)
|
| 214 |
+
labels = batch[3].to(args.device)
|
| 215 |
+
model.train()
|
| 216 |
+
loss,vec = model(inputs,p_inputs,n_inputs,labels)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
if args.n_gpu > 1:
|
| 220 |
+
loss = loss.mean() # mean() to average on multi-gpu parallel training
|
| 221 |
+
if args.gradient_accumulation_steps > 1:
|
| 222 |
+
loss = loss / args.gradient_accumulation_steps
|
| 223 |
+
|
| 224 |
+
if args.fp16:
|
| 225 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
| 226 |
+
scaled_loss.backward()
|
| 227 |
+
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
|
| 228 |
+
else:
|
| 229 |
+
loss.backward()
|
| 230 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
|
| 231 |
+
|
| 232 |
+
tr_loss += loss.item()
|
| 233 |
+
tr_num+=1
|
| 234 |
+
train_loss+=loss.item()
|
| 235 |
+
if avg_loss==0:
|
| 236 |
+
avg_loss=tr_loss
|
| 237 |
+
avg_loss=round(train_loss/tr_num,5)
|
| 238 |
+
if (step+1)% 100==0:
|
| 239 |
+
logger.info("epoch {} step {} loss {}".format(idx,step+1,avg_loss))
|
| 240 |
+
#bar.set_description("epoch {} loss {}".format(idx,avg_loss))
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
if (step + 1) % args.gradient_accumulation_steps == 0:
|
| 244 |
+
optimizer.step()
|
| 245 |
+
optimizer.zero_grad()
|
| 246 |
+
scheduler.step()
|
| 247 |
+
global_step += 1
|
| 248 |
+
output_flag=True
|
| 249 |
+
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
|
| 250 |
+
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
|
| 251 |
+
logging_loss = tr_loss
|
| 252 |
+
tr_nb=global_step
|
| 253 |
+
|
| 254 |
+
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
|
| 255 |
+
|
| 256 |
+
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
|
| 257 |
+
results = evaluate(args, model, tokenizer,eval_when_training=True)
|
| 258 |
+
for key, value in results.items():
|
| 259 |
+
logger.info(" %s = %s", key, round(value,4))
|
| 260 |
+
# Save model checkpoint
|
| 261 |
+
tr_num=0
|
| 262 |
+
train_loss=0
|
| 263 |
+
|
| 264 |
+
if results['eval_map']>best_acc:
|
| 265 |
+
best_acc=results['eval_map']
|
| 266 |
+
logger.info(" "+"*"*20)
|
| 267 |
+
logger.info(" Best map:%s",round(best_acc,4))
|
| 268 |
+
logger.info(" "+"*"*20)
|
| 269 |
+
|
| 270 |
+
checkpoint_prefix = 'checkpoint-best-map'
|
| 271 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 272 |
+
if not os.path.exists(output_dir):
|
| 273 |
+
os.makedirs(output_dir)
|
| 274 |
+
model_to_save = model.module if hasattr(model,'module') else model
|
| 275 |
+
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
|
| 276 |
+
torch.save(model_to_save.state_dict(), output_dir)
|
| 277 |
+
logger.info("Saving model checkpoint to %s", output_dir)
|
| 278 |
+
|
| 279 |
+
# 每一轮记录checkpoint
|
| 280 |
+
output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx))
|
| 281 |
+
if not os.path.exists(output_dir):
|
| 282 |
+
os.makedirs(output_dir)
|
| 283 |
+
model_to_save = model.module if hasattr(model, 'module') else model
|
| 284 |
+
ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
|
| 285 |
+
logger.info("Saving model checkpoint to %s", ckpt_output_path)
|
| 286 |
+
torch.save(model_to_save.state_dict(), ckpt_output_path)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
eval_dataset=None
|
| 290 |
+
def evaluate(args, model, tokenizer,eval_when_training=False):
|
| 291 |
+
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
| 292 |
+
eval_output_dir = args.output_dir
|
| 293 |
+
global eval_dataset
|
| 294 |
+
if eval_dataset is None:
|
| 295 |
+
eval_dataset = TextDataset(tokenizer, args,args.eval_data_file)
|
| 296 |
+
|
| 297 |
+
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
|
| 298 |
+
os.makedirs(eval_output_dir)
|
| 299 |
+
|
| 300 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 301 |
+
# Note that DistributedSampler samples randomly
|
| 302 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 303 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
|
| 304 |
+
|
| 305 |
+
# multi-gpu evaluate
|
| 306 |
+
if args.n_gpu > 1 and eval_when_training is False:
|
| 307 |
+
model = torch.nn.DataParallel(model)
|
| 308 |
+
|
| 309 |
+
# Eval!
|
| 310 |
+
logger.info("***** Running evaluation *****")
|
| 311 |
+
logger.info(" Num examples = %d", len(eval_dataset))
|
| 312 |
+
logger.info(" Batch size = %d", args.eval_batch_size)
|
| 313 |
+
eval_loss = 0.0
|
| 314 |
+
nb_eval_steps = 0
|
| 315 |
+
model.eval()
|
| 316 |
+
vecs=[]
|
| 317 |
+
labels=[]
|
| 318 |
+
for batch in eval_dataloader:
|
| 319 |
+
inputs = batch[0].to(args.device)
|
| 320 |
+
p_inputs = batch[1].to(args.device)
|
| 321 |
+
n_inputs = batch[2].to(args.device)
|
| 322 |
+
label = batch[3].to(args.device)
|
| 323 |
+
with torch.no_grad():
|
| 324 |
+
lm_loss,vec = model(inputs,p_inputs,n_inputs,label)
|
| 325 |
+
eval_loss += lm_loss.mean().item()
|
| 326 |
+
vecs.append(vec.cpu().numpy())
|
| 327 |
+
labels.append(label.cpu().numpy())
|
| 328 |
+
nb_eval_steps += 1
|
| 329 |
+
vecs=np.concatenate(vecs,0)
|
| 330 |
+
labels=np.concatenate(labels,0)
|
| 331 |
+
eval_loss = eval_loss / nb_eval_steps
|
| 332 |
+
perplexity = torch.tensor(eval_loss)
|
| 333 |
+
|
| 334 |
+
scores=np.matmul(vecs,vecs.T)
|
| 335 |
+
dic={}
|
| 336 |
+
for i in range(scores.shape[0]):
|
| 337 |
+
scores[i,i]=-1000000
|
| 338 |
+
if int(labels[i]) not in dic:
|
| 339 |
+
dic[int(labels[i])]=-1
|
| 340 |
+
dic[int(labels[i])]+=1
|
| 341 |
+
sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
|
| 342 |
+
MAP=[]
|
| 343 |
+
for i in range(scores.shape[0]):
|
| 344 |
+
cont=0
|
| 345 |
+
label=int(labels[i])
|
| 346 |
+
Avep = []
|
| 347 |
+
for j in range(dic[label]):
|
| 348 |
+
index=sort_ids[i,j]
|
| 349 |
+
if int(labels[index])==label:
|
| 350 |
+
Avep.append((len(Avep)+1)/(j+1))
|
| 351 |
+
MAP.append(sum(Avep)/dic[label])
|
| 352 |
+
|
| 353 |
+
result = {
|
| 354 |
+
"eval_loss": float(perplexity),
|
| 355 |
+
"eval_map":float(np.mean(MAP))
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
return result
|
| 360 |
+
|
| 361 |
+
def test(args, model, tokenizer):
|
| 362 |
+
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
| 363 |
+
eval_dataset = TextDataset(tokenizer, args,args.test_data_file)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 367 |
+
# Note that DistributedSampler samples randomly
|
| 368 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 369 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
|
| 370 |
+
|
| 371 |
+
# multi-gpu evaluate
|
| 372 |
+
if args.n_gpu > 1:
|
| 373 |
+
model = torch.nn.DataParallel(model)
|
| 374 |
+
|
| 375 |
+
# Eval!
|
| 376 |
+
logger.info("***** Running Test *****")
|
| 377 |
+
logger.info(" Num examples = %d", len(eval_dataset))
|
| 378 |
+
logger.info(" Batch size = %d", args.eval_batch_size)
|
| 379 |
+
eval_loss = 0.0
|
| 380 |
+
nb_eval_steps = 0
|
| 381 |
+
model.eval()
|
| 382 |
+
vecs=[]
|
| 383 |
+
labels=[]
|
| 384 |
+
for batch in eval_dataloader:
|
| 385 |
+
inputs = batch[0].to(args.device)
|
| 386 |
+
p_inputs = batch[1].to(args.device)
|
| 387 |
+
n_inputs = batch[2].to(args.device)
|
| 388 |
+
label = batch[3].to(args.device)
|
| 389 |
+
with torch.no_grad():
|
| 390 |
+
lm_loss,vec = model(inputs,p_inputs,n_inputs,label)
|
| 391 |
+
eval_loss += lm_loss.mean().item()
|
| 392 |
+
vecs.append(vec.cpu().numpy())
|
| 393 |
+
labels.append(label.cpu().numpy())
|
| 394 |
+
nb_eval_steps += 1
|
| 395 |
+
vecs=np.concatenate(vecs,0)
|
| 396 |
+
labels=np.concatenate(labels,0)
|
| 397 |
+
eval_loss = eval_loss / nb_eval_steps
|
| 398 |
+
perplexity = torch.tensor(eval_loss)
|
| 399 |
+
|
| 400 |
+
scores=np.matmul(vecs,vecs.T)
|
| 401 |
+
for i in range(scores.shape[0]):
|
| 402 |
+
scores[i,i]=-1000000
|
| 403 |
+
sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
|
| 404 |
+
indexs=[]
|
| 405 |
+
for example in eval_dataset.examples:
|
| 406 |
+
indexs.append(example.index)
|
| 407 |
+
with open(os.path.join(args.output_dir,"predictions.jsonl"),'w') as f:
|
| 408 |
+
for index,sort_id in zip(indexs,sort_ids):
|
| 409 |
+
js={}
|
| 410 |
+
js['index']=index
|
| 411 |
+
js['answers']=[]
|
| 412 |
+
for idx in sort_id[:499]:
|
| 413 |
+
js['answers'].append(indexs[int(idx)])
|
| 414 |
+
f.write(json.dumps(js)+'\n')
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
|
| 418 |
+
def main():
|
| 419 |
+
parser = argparse.ArgumentParser()
|
| 420 |
+
|
| 421 |
+
## Required parameters
|
| 422 |
+
parser.add_argument("--train_data_file", default=None, type=str, required=True,
|
| 423 |
+
help="The input training data file (a text file).")
|
| 424 |
+
parser.add_argument("--output_dir", default=None, type=str, required=True,
|
| 425 |
+
help="The output directory where the model predictions and checkpoints will be written.")
|
| 426 |
+
|
| 427 |
+
## Other parameters
|
| 428 |
+
parser.add_argument("--eval_data_file", default=None, type=str,
|
| 429 |
+
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
|
| 430 |
+
parser.add_argument("--test_data_file", default=None, type=str,
|
| 431 |
+
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
|
| 432 |
+
|
| 433 |
+
parser.add_argument("--model_type", default="bert", type=str,
|
| 434 |
+
help="The model architecture to be fine-tuned.")
|
| 435 |
+
parser.add_argument("--model_name_or_path", default=None, type=str,
|
| 436 |
+
help="The model checkpoint for weights initialization.")
|
| 437 |
+
|
| 438 |
+
parser.add_argument("--mlm", action='store_true',
|
| 439 |
+
help="Train with masked-language modeling loss instead of language modeling.")
|
| 440 |
+
parser.add_argument("--mlm_probability", type=float, default=0.15,
|
| 441 |
+
help="Ratio of tokens to mask for masked language modeling loss")
|
| 442 |
+
|
| 443 |
+
parser.add_argument("--config_name", default="", type=str,
|
| 444 |
+
help="Optional pretrained config name or path if not the same as model_name_or_path")
|
| 445 |
+
parser.add_argument("--tokenizer_name", default="", type=str,
|
| 446 |
+
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
|
| 447 |
+
parser.add_argument("--cache_dir", default="", type=str,
|
| 448 |
+
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
|
| 449 |
+
parser.add_argument("--block_size", default=-1, type=int,
|
| 450 |
+
help="Optional input sequence length after tokenization."
|
| 451 |
+
"The training dataset will be truncated in block of this size for training."
|
| 452 |
+
"Default to the model max input length for single sentence inputs (take into account special tokens).")
|
| 453 |
+
parser.add_argument("--do_train", action='store_true',
|
| 454 |
+
help="Whether to run training.")
|
| 455 |
+
parser.add_argument("--do_eval", action='store_true',
|
| 456 |
+
help="Whether to run eval on the dev set.")
|
| 457 |
+
parser.add_argument("--do_test", action='store_true',
|
| 458 |
+
help="Whether to run eval on the dev set.")
|
| 459 |
+
parser.add_argument("--evaluate_during_training", action='store_true',
|
| 460 |
+
help="Run evaluation during training at each logging step.")
|
| 461 |
+
parser.add_argument("--do_lower_case", action='store_true',
|
| 462 |
+
help="Set this flag if you are using an uncased model.")
|
| 463 |
+
|
| 464 |
+
parser.add_argument("--train_batch_size", default=4, type=int,
|
| 465 |
+
help="Batch size per GPU/CPU for training.")
|
| 466 |
+
parser.add_argument("--eval_batch_size", default=4, type=int,
|
| 467 |
+
help="Batch size per GPU/CPU for evaluation.")
|
| 468 |
+
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
|
| 469 |
+
help="Number of updates steps to accumulate before performing a backward/update pass.")
|
| 470 |
+
parser.add_argument("--learning_rate", default=5e-5, type=float,
|
| 471 |
+
help="The initial learning rate for Adam.")
|
| 472 |
+
parser.add_argument("--weight_decay", default=0.0, type=float,
|
| 473 |
+
help="Weight deay if we apply some.")
|
| 474 |
+
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
|
| 475 |
+
help="Epsilon for Adam optimizer.")
|
| 476 |
+
parser.add_argument("--max_grad_norm", default=1.0, type=float,
|
| 477 |
+
help="Max gradient norm.")
|
| 478 |
+
parser.add_argument("--num_train_epochs", default=1.0, type=float,
|
| 479 |
+
help="Total number of training epochs to perform.")
|
| 480 |
+
parser.add_argument("--max_steps", default=-1, type=int,
|
| 481 |
+
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
|
| 482 |
+
parser.add_argument("--warmup_steps", default=0, type=int,
|
| 483 |
+
help="Linear warmup over warmup_steps.")
|
| 484 |
+
|
| 485 |
+
parser.add_argument('--logging_steps', type=int, default=50,
|
| 486 |
+
help="Log every X updates steps.")
|
| 487 |
+
parser.add_argument('--save_steps', type=int, default=50,
|
| 488 |
+
help="Save checkpoint every X updates steps.")
|
| 489 |
+
parser.add_argument('--save_total_limit', type=int, default=None,
|
| 490 |
+
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
|
| 491 |
+
parser.add_argument("--eval_all_checkpoints", action='store_true',
|
| 492 |
+
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
|
| 493 |
+
parser.add_argument("--no_cuda", action='store_true',
|
| 494 |
+
help="Avoid using CUDA when available")
|
| 495 |
+
parser.add_argument('--overwrite_output_dir', action='store_true',
|
| 496 |
+
help="Overwrite the content of the output directory")
|
| 497 |
+
parser.add_argument('--overwrite_cache', action='store_true',
|
| 498 |
+
help="Overwrite the cached training and evaluation sets")
|
| 499 |
+
parser.add_argument('--seed', type=int, default=42,
|
| 500 |
+
help="random seed for initialization")
|
| 501 |
+
parser.add_argument('--epoch', type=int, default=42,
|
| 502 |
+
help="random seed for initialization")
|
| 503 |
+
parser.add_argument('--fp16', action='store_true',
|
| 504 |
+
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
|
| 505 |
+
parser.add_argument('--fp16_opt_level', type=str, default='O1',
|
| 506 |
+
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
|
| 507 |
+
"See details at https://nvidia.github.io/apex/amp.html")
|
| 508 |
+
parser.add_argument("--local_rank", type=int, default=-1,
|
| 509 |
+
help="For distributed training: local_rank")
|
| 510 |
+
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
|
| 511 |
+
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
args = parser.parse_args()
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
# Setup distant debugging if needed
|
| 518 |
+
if args.server_ip and args.server_port:
|
| 519 |
+
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
|
| 520 |
+
import ptvsd
|
| 521 |
+
print("Waiting for debugger attach")
|
| 522 |
+
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
|
| 523 |
+
ptvsd.wait_for_attach()
|
| 524 |
+
|
| 525 |
+
# Setup CUDA, GPU & distributed training
|
| 526 |
+
if args.local_rank == -1 or args.no_cuda:
|
| 527 |
+
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
|
| 528 |
+
args.n_gpu = torch.cuda.device_count()
|
| 529 |
+
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
|
| 530 |
+
torch.cuda.set_device(args.local_rank)
|
| 531 |
+
device = torch.device("cuda", args.local_rank)
|
| 532 |
+
torch.distributed.init_process_group(backend='nccl')
|
| 533 |
+
args.n_gpu = 1
|
| 534 |
+
args.device = device
|
| 535 |
+
args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
|
| 536 |
+
args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
|
| 537 |
+
# Setup logging
|
| 538 |
+
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
|
| 539 |
+
datefmt='%m/%d/%Y %H:%M:%S',
|
| 540 |
+
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
|
| 541 |
+
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
|
| 542 |
+
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
# Set seed
|
| 546 |
+
set_seed(args.seed)
|
| 547 |
+
|
| 548 |
+
# Load pretrained model and tokenizer
|
| 549 |
+
if args.local_rank not in [-1, 0]:
|
| 550 |
+
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
|
| 551 |
+
|
| 552 |
+
args.start_epoch = 0
|
| 553 |
+
args.start_step = 0
|
| 554 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 555 |
+
if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
|
| 556 |
+
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
|
| 557 |
+
args.config_name = os.path.join(checkpoint_last, 'config.json')
|
| 558 |
+
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
|
| 559 |
+
with open(idx_file, encoding='utf-8') as idxf:
|
| 560 |
+
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
|
| 561 |
+
|
| 562 |
+
step_file = os.path.join(checkpoint_last, 'step_file.txt')
|
| 563 |
+
if os.path.exists(step_file):
|
| 564 |
+
with open(step_file, encoding='utf-8') as stepf:
|
| 565 |
+
args.start_step = int(stepf.readlines()[0].strip())
|
| 566 |
+
|
| 567 |
+
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
|
| 568 |
+
|
| 569 |
+
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
|
| 570 |
+
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
|
| 571 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 572 |
+
config.num_labels=1
|
| 573 |
+
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
|
| 574 |
+
do_lower_case=args.do_lower_case,
|
| 575 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 576 |
+
if args.block_size <= 0:
|
| 577 |
+
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
|
| 578 |
+
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
|
| 579 |
+
if args.model_name_or_path:
|
| 580 |
+
model = model_class.from_pretrained(args.model_name_or_path,
|
| 581 |
+
from_tf=bool('.ckpt' in args.model_name_or_path),
|
| 582 |
+
config=config,
|
| 583 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 584 |
+
else:
|
| 585 |
+
model = model_class(config)
|
| 586 |
+
|
| 587 |
+
model=Model(model,config,tokenizer,args)
|
| 588 |
+
if args.local_rank == 0:
|
| 589 |
+
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
|
| 590 |
+
|
| 591 |
+
logger.info("Training/evaluation parameters %s", args)
|
| 592 |
+
|
| 593 |
+
# Training
|
| 594 |
+
if args.do_train:
|
| 595 |
+
if args.local_rank not in [-1, 0]:
|
| 596 |
+
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
|
| 597 |
+
|
| 598 |
+
train_dataset = TextDataset(tokenizer, args,args.train_data_file)
|
| 599 |
+
if args.local_rank == 0:
|
| 600 |
+
torch.distributed.barrier()
|
| 601 |
+
|
| 602 |
+
train(args, train_dataset, model, tokenizer)
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
# Evaluation
|
| 607 |
+
results = {}
|
| 608 |
+
if args.do_eval and args.local_rank in [-1, 0]:
|
| 609 |
+
checkpoint_prefix = 'epoch_1/subject_model.pth' #'checkpoint-best-map/model.bin'
|
| 610 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 611 |
+
model.load_state_dict(torch.load(output_dir),strict=False)
|
| 612 |
+
model.to(args.device)
|
| 613 |
+
result=evaluate(args, model, tokenizer)
|
| 614 |
+
logger.info("***** Eval results *****")
|
| 615 |
+
for key in sorted(result.keys()):
|
| 616 |
+
logger.info(" %s = %s", key, str(round(result[key],4)))
|
| 617 |
+
|
| 618 |
+
if args.do_test and args.local_rank in [-1, 0]:
|
| 619 |
+
checkpoint_prefix = 'epoch_1/subject_model.pth' #'checkpoint-best-map/model.bin'
|
| 620 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 621 |
+
model.load_state_dict(torch.load(output_dir),strict=False)
|
| 622 |
+
model.to(args.device)
|
| 623 |
+
test(args, model, tokenizer)
|
| 624 |
+
|
| 625 |
+
return results
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
if __name__ == "__main__":
|
| 629 |
+
main()
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
|
Code-Code/Clone-detection-POJ-104/code/test.sh
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0,1 python run.py \
|
| 2 |
+
--output_dir=../model \
|
| 3 |
+
--model_type=roberta \
|
| 4 |
+
--config_name=microsoft/codebert-base \
|
| 5 |
+
--model_name_or_path=microsoft/codebert-base \
|
| 6 |
+
--tokenizer_name=roberta-base \
|
| 7 |
+
--do_test \
|
| 8 |
+
--train_data_file=../dataset/train.jsonl \
|
| 9 |
+
--test_data_file=../dataset/valid.jsonl \
|
| 10 |
+
--epoch 2 \
|
| 11 |
+
--block_size 400 \
|
| 12 |
+
--train_batch_size 8 \
|
| 13 |
+
--eval_batch_size 16 \
|
| 14 |
+
--learning_rate 2e-5 \
|
| 15 |
+
--max_grad_norm 1.0 \
|
| 16 |
+
--evaluate_during_training \
|
| 17 |
+
--seed 123456
|
Code-Code/Clone-detection-POJ-104/code/train.sh
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0,1 python run.py \
|
| 2 |
+
--output_dir=../model \
|
| 3 |
+
--model_type=roberta \
|
| 4 |
+
--config_name=microsoft/codebert-base \
|
| 5 |
+
--model_name_or_path=microsoft/codebert-base \
|
| 6 |
+
--tokenizer_name=roberta-base \
|
| 7 |
+
--do_train \
|
| 8 |
+
--train_data_file=../dataset/train.jsonl \
|
| 9 |
+
--eval_data_file=../dataset/valid.jsonl \
|
| 10 |
+
--test_data_file=../dataset/test.jsonl \
|
| 11 |
+
--epoch 2 \
|
| 12 |
+
--block_size 400 \
|
| 13 |
+
--train_batch_size 8 \
|
| 14 |
+
--eval_batch_size 16 \
|
| 15 |
+
--learning_rate 2e-5 \
|
| 16 |
+
--max_grad_norm 1.0 \
|
| 17 |
+
--evaluate_during_training \
|
| 18 |
+
--seed 123456
|
Code-Code/Clone-detection-POJ-104/dataset.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4c13009574c8c3c85c4ec26f6e33e53765479f41fa20239578b473fd11df4d01
|
| 3 |
+
size 7269797
|
Code-Code/Clone-detection-POJ-104/model/epoch_0/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:44aeb4dcda7ca079f6948aafc8038ceaad81d0e13a4e698b6587729c06ad1bc7
|
| 3 |
+
size 498665958
|
Code-Code/Clone-detection-POJ-104/model/epoch_1/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cc0fa86663c23b599349322702b3aa1f2451a71b00943c7913f4f85ce98c40f4
|
| 3 |
+
size 498665958
|
Code-Code/CodeCompletion-token/code/beam.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch
|
| 4 |
+
from torch.autograd import Variable
|
| 5 |
+
import copy
|
| 6 |
+
|
| 7 |
+
class Beam(object):
|
| 8 |
+
def __init__(self, size, sos, eos):
|
| 9 |
+
self.size = size
|
| 10 |
+
self.tt = torch.cuda
|
| 11 |
+
# The score for each translation on the beam.
|
| 12 |
+
self.scores = self.tt.FloatTensor(size).zero_()
|
| 13 |
+
# The backpointers at each time-step.
|
| 14 |
+
self.prevKs = []
|
| 15 |
+
# The outputs at each time-step.
|
| 16 |
+
self.nextYs = [self.tt.LongTensor(size)
|
| 17 |
+
.fill_(0)]
|
| 18 |
+
self.nextYs[0][:] = sos
|
| 19 |
+
# Has EOS topped the beam yet.
|
| 20 |
+
self._eos = eos
|
| 21 |
+
self.eosTop = False
|
| 22 |
+
# Time and k pair for finished.
|
| 23 |
+
self.finished = []
|
| 24 |
+
|
| 25 |
+
def getCurrentState(self):
|
| 26 |
+
"Get the outputs for the current timestep."
|
| 27 |
+
batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
|
| 28 |
+
return batch
|
| 29 |
+
|
| 30 |
+
def getCurrentOrigin(self):
|
| 31 |
+
"Get the backpointers for the current timestep."
|
| 32 |
+
return self.prevKs[-1]
|
| 33 |
+
|
| 34 |
+
def advance(self, wordLk):
|
| 35 |
+
"""
|
| 36 |
+
Given prob over words for every last beam `wordLk` and attention
|
| 37 |
+
`attnOut`: Compute and update the beam search.
|
| 38 |
+
|
| 39 |
+
Parameters:
|
| 40 |
+
|
| 41 |
+
* `wordLk`- probs of advancing from the last step (K x words)
|
| 42 |
+
* `attnOut`- attention at the last step
|
| 43 |
+
|
| 44 |
+
Returns: True if beam search is complete.
|
| 45 |
+
"""
|
| 46 |
+
numWords = wordLk.size(1)
|
| 47 |
+
|
| 48 |
+
# Sum the previous scores.
|
| 49 |
+
if len(self.prevKs) > 0:
|
| 50 |
+
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
|
| 51 |
+
|
| 52 |
+
# Don't let EOS have children.
|
| 53 |
+
for i in range(self.nextYs[-1].size(0)):
|
| 54 |
+
if self.nextYs[-1][i] in self._eos:
|
| 55 |
+
beamLk[i] = -1e20
|
| 56 |
+
else:
|
| 57 |
+
beamLk = wordLk[0]
|
| 58 |
+
flatBeamLk = beamLk.view(-1)
|
| 59 |
+
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
|
| 60 |
+
|
| 61 |
+
self.scores = bestScores
|
| 62 |
+
|
| 63 |
+
# bestScoresId is flattened beam x word array, so calculate which
|
| 64 |
+
# word and beam each score came from
|
| 65 |
+
prevK = bestScoresId // numWords
|
| 66 |
+
self.prevKs.append(prevK)
|
| 67 |
+
self.nextYs.append((bestScoresId - prevK * numWords))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
for i in range(self.nextYs[-1].size(0)):
|
| 71 |
+
if self.nextYs[-1][i] in self._eos:
|
| 72 |
+
s = self.scores[i]
|
| 73 |
+
self.finished.append((s, len(self.nextYs) - 1, i))
|
| 74 |
+
|
| 75 |
+
# End condition is when top-of-beam is EOS and no global score.
|
| 76 |
+
if self.nextYs[-1][0] in self._eos:
|
| 77 |
+
self.eosTop = True
|
| 78 |
+
|
| 79 |
+
def done(self):
|
| 80 |
+
return self.eosTop and len(self.finished) >=self.size
|
| 81 |
+
|
| 82 |
+
def getFinal(self):
|
| 83 |
+
if len(self.finished) == 0:
|
| 84 |
+
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
|
| 85 |
+
self.finished.sort(key=lambda a: -a[0])
|
| 86 |
+
if len(self.finished) != self.size:
|
| 87 |
+
unfinished=[]
|
| 88 |
+
for i in range(self.nextYs[-1].size(0)):
|
| 89 |
+
if self.nextYs[-1][i] not in self._eos:
|
| 90 |
+
s = self.scores[i]
|
| 91 |
+
unfinished.append((s, len(self.nextYs) - 1, i))
|
| 92 |
+
unfinished.sort(key=lambda a: -a[0])
|
| 93 |
+
self.finished+=unfinished[:self.size-len(self.finished)]
|
| 94 |
+
return self.finished[:self.size]
|
| 95 |
+
|
| 96 |
+
def getHyp(self, beam_res):
|
| 97 |
+
"""
|
| 98 |
+
Walk back to construct the full hypothesis.
|
| 99 |
+
"""
|
| 100 |
+
hyps=[]
|
| 101 |
+
for _,timestep, k in beam_res:
|
| 102 |
+
hyp = []
|
| 103 |
+
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
|
| 104 |
+
hyp.append(self.nextYs[j+1][k])
|
| 105 |
+
k = self.prevKs[j][k]
|
| 106 |
+
hyps.append(hyp[::-1])
|
| 107 |
+
return hyps
|
| 108 |
+
|
| 109 |
+
def buildTargetTokens(self, preds):
|
| 110 |
+
sentence=[]
|
| 111 |
+
for pred in preds:
|
| 112 |
+
tokens = []
|
| 113 |
+
for tok in pred:
|
| 114 |
+
tokens.append(tok)
|
| 115 |
+
if tok in self._eos:
|
| 116 |
+
break
|
| 117 |
+
sentence.append(tokens)
|
| 118 |
+
return sentence
|
Code-Code/CodeCompletion-token/code/dataset.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT License.
|
| 3 |
+
from __future__ import absolute_import, division, print_function
|
| 4 |
+
|
| 5 |
+
import argparse
|
| 6 |
+
import glob
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
import pickle
|
| 10 |
+
import random
|
| 11 |
+
import re
|
| 12 |
+
import gc
|
| 13 |
+
import shutil
|
| 14 |
+
import json
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
|
| 19 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 20 |
+
|
| 21 |
+
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
|
| 22 |
+
BertConfig, BertForMaskedLM, BertTokenizer,
|
| 23 |
+
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
|
| 24 |
+
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
|
| 25 |
+
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
|
| 26 |
+
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
|
| 27 |
+
|
| 28 |
+
class TextDataset(Dataset):
|
| 29 |
+
def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
|
| 30 |
+
if args.local_rank==-1:
|
| 31 |
+
local_rank=0
|
| 32 |
+
world_size=1
|
| 33 |
+
else:
|
| 34 |
+
local_rank=args.local_rank
|
| 35 |
+
world_size=torch.distributed.get_world_size()
|
| 36 |
+
|
| 37 |
+
if not os.path.exists(args.output_dir):
|
| 38 |
+
os.makedirs(args.output_dir)
|
| 39 |
+
cached_file = os.path.join(args.output_dir, file_type+"_langs_%s"%(args.langs)+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
|
| 40 |
+
if os.path.exists(cached_file) and not args.overwrite_cache:
|
| 41 |
+
if file_type == 'train':
|
| 42 |
+
logger.warning("Loading features from cached file %s", cached_file)
|
| 43 |
+
with open(cached_file, 'rb') as handle:
|
| 44 |
+
self.inputs = pickle.load(handle)
|
| 45 |
+
|
| 46 |
+
else:
|
| 47 |
+
self.inputs = []
|
| 48 |
+
if args.langs == 'all':
|
| 49 |
+
langs = os.listdir(args.data_dir)
|
| 50 |
+
else:
|
| 51 |
+
langs = [args.langs]
|
| 52 |
+
|
| 53 |
+
data=[]
|
| 54 |
+
for lang in langs:
|
| 55 |
+
datafile = os.path.join(args.data_dir, lang, file_type+'.pkl')
|
| 56 |
+
if file_type == 'train':
|
| 57 |
+
logger.warning("Creating features from dataset file at %s", datafile)
|
| 58 |
+
# with open(datafile) as f:
|
| 59 |
+
# data.extend([json.loads(x)['code'] for idx,x in enumerate(f.readlines()) if idx%world_size==local_rank])
|
| 60 |
+
dataset = pickle.load(open(datafile, 'rb'))
|
| 61 |
+
data.extend(['<s> '+' '.join(x['function'].split())+' </s>' for idx,x in enumerate(dataset) if idx%world_size==local_rank])
|
| 62 |
+
|
| 63 |
+
# random.shuffle(data)
|
| 64 |
+
data = data
|
| 65 |
+
length = len(data)
|
| 66 |
+
logger.warning("Data size: %d"%(length))
|
| 67 |
+
input_ids = []
|
| 68 |
+
for idx,x in enumerate(data):
|
| 69 |
+
try:
|
| 70 |
+
input_ids.extend(tokenizer.encode(x))
|
| 71 |
+
except Exception:
|
| 72 |
+
pass
|
| 73 |
+
if idx % (length//10) == 0:
|
| 74 |
+
percent = idx / (length//10) * 10
|
| 75 |
+
logger.warning("Rank %d, load %d"%(local_rank, percent))
|
| 76 |
+
del data
|
| 77 |
+
gc.collect()
|
| 78 |
+
|
| 79 |
+
length = len(input_ids)
|
| 80 |
+
for i in range(0, length-block_size, block_size):
|
| 81 |
+
self.inputs.append(input_ids[i : i + block_size])
|
| 82 |
+
del input_ids
|
| 83 |
+
gc.collect()
|
| 84 |
+
|
| 85 |
+
if file_type == 'train':
|
| 86 |
+
logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
|
| 87 |
+
logger.warning("Saving features into cached file %s", cached_file)
|
| 88 |
+
with open(cached_file, 'wb') as handle:
|
| 89 |
+
pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
| 90 |
+
|
| 91 |
+
def __len__(self):
|
| 92 |
+
return len(self.inputs)
|
| 93 |
+
|
| 94 |
+
def __getitem__(self, item):
|
| 95 |
+
return torch.tensor(self.inputs[item])
|
| 96 |
+
|
| 97 |
+
class finetuneDataset(Dataset):
|
| 98 |
+
def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
|
| 99 |
+
if args.local_rank==-1:
|
| 100 |
+
local_rank=0
|
| 101 |
+
world_size=1
|
| 102 |
+
else:
|
| 103 |
+
local_rank=args.local_rank
|
| 104 |
+
world_size=torch.distributed.get_world_size()
|
| 105 |
+
|
| 106 |
+
if not os.path.exists(args.output_dir):
|
| 107 |
+
os.makedirs(args.output_dir)
|
| 108 |
+
cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
|
| 109 |
+
if os.path.exists(cached_file) and not args.overwrite_cache:
|
| 110 |
+
if file_type == 'train':
|
| 111 |
+
logger.warning("Loading features from cached file %s", cached_file)
|
| 112 |
+
with open(cached_file, 'rb') as handle:
|
| 113 |
+
self.inputs = pickle.load(handle)
|
| 114 |
+
|
| 115 |
+
else:
|
| 116 |
+
self.inputs = []
|
| 117 |
+
|
| 118 |
+
datafile = os.path.join(args.data_dir, f"{file_type}.txt")
|
| 119 |
+
if file_type == 'train':
|
| 120 |
+
logger.warning("Creating features from dataset file at %s", datafile)
|
| 121 |
+
with open(datafile) as f:
|
| 122 |
+
data = f.readlines()
|
| 123 |
+
|
| 124 |
+
length = len(data)
|
| 125 |
+
logger.info("Data size: %d"%(length))
|
| 126 |
+
input_ids = []
|
| 127 |
+
for idx,x in enumerate(data):
|
| 128 |
+
x = x.strip()
|
| 129 |
+
if x.startswith("<s>") and x.endswith("</s>"):
|
| 130 |
+
pass
|
| 131 |
+
else:
|
| 132 |
+
x = "<s> " + x + " </s>"
|
| 133 |
+
try:
|
| 134 |
+
input_ids.extend(tokenizer.encode(x))
|
| 135 |
+
except Exception:
|
| 136 |
+
pass
|
| 137 |
+
if idx % (length//10) == 0:
|
| 138 |
+
percent = idx / (length//10) * 10
|
| 139 |
+
logger.warning("Rank %d, load %d"%(local_rank, percent))
|
| 140 |
+
del data
|
| 141 |
+
gc.collect()
|
| 142 |
+
|
| 143 |
+
length = len(input_ids) // world_size
|
| 144 |
+
logger.info(f"tokens: {length*world_size}")
|
| 145 |
+
input_ids = input_ids[local_rank*length: (local_rank+1)*length]
|
| 146 |
+
|
| 147 |
+
for i in range(0, length-block_size, block_size):
|
| 148 |
+
self.inputs.append(input_ids[i : i + block_size])
|
| 149 |
+
del input_ids
|
| 150 |
+
gc.collect()
|
| 151 |
+
|
| 152 |
+
if file_type == 'train':
|
| 153 |
+
logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
|
| 154 |
+
logger.warning("Saving features into cached file %s", cached_file)
|
| 155 |
+
with open(cached_file, 'wb') as handle:
|
| 156 |
+
pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
| 157 |
+
|
| 158 |
+
def __len__(self):
|
| 159 |
+
return len(self.inputs)
|
| 160 |
+
|
| 161 |
+
def __getitem__(self, item):
|
| 162 |
+
return torch.tensor(self.inputs[item])
|
| 163 |
+
|
| 164 |
+
class EvalDataset(Dataset):
|
| 165 |
+
def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
|
| 166 |
+
if not os.path.exists(args.output_dir):
|
| 167 |
+
os.makedirs(args.output_dir)
|
| 168 |
+
cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size))
|
| 169 |
+
if os.path.exists(cached_file) and not args.overwrite_cache:
|
| 170 |
+
with open(cached_file, 'rb') as handle:
|
| 171 |
+
self.inputs = pickle.load(handle)
|
| 172 |
+
|
| 173 |
+
else:
|
| 174 |
+
self.inputs = []
|
| 175 |
+
|
| 176 |
+
datafile = os.path.join(args.data_dir, f"{file_type}.txt")
|
| 177 |
+
with open(datafile) as f:
|
| 178 |
+
data = f.readlines()
|
| 179 |
+
|
| 180 |
+
length = len(data)
|
| 181 |
+
logger.info("Data size: %d"%(length))
|
| 182 |
+
input_ids = []
|
| 183 |
+
for idx,x in enumerate(data):
|
| 184 |
+
x = x.strip()
|
| 185 |
+
if x.startswith("<s>") and x.endswith("</s>"):
|
| 186 |
+
pass
|
| 187 |
+
else:
|
| 188 |
+
x = "<s> " + x + " </s>"
|
| 189 |
+
try:
|
| 190 |
+
input_ids.extend(tokenizer.encode(x))
|
| 191 |
+
except Exception:
|
| 192 |
+
pass
|
| 193 |
+
if idx % (length//10) == 0:
|
| 194 |
+
percent = idx / (length//10) * 10
|
| 195 |
+
logger.warning("load %d"%(percent))
|
| 196 |
+
del data
|
| 197 |
+
gc.collect()
|
| 198 |
+
|
| 199 |
+
logger.info(f"tokens: {len(input_ids)}")
|
| 200 |
+
self.split(input_ids, tokenizer, logger, block_size=block_size)
|
| 201 |
+
del input_ids
|
| 202 |
+
gc.collect()
|
| 203 |
+
|
| 204 |
+
with open(cached_file, 'wb') as handle:
|
| 205 |
+
pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
| 206 |
+
|
| 207 |
+
def split(self, input_ids, tokenizer, logger, block_size=1024):
|
| 208 |
+
sample = []
|
| 209 |
+
i = 0
|
| 210 |
+
while i < len(input_ids):
|
| 211 |
+
sample = input_ids[i: i+block_size]
|
| 212 |
+
if len(sample) == block_size:
|
| 213 |
+
for j in range(block_size):
|
| 214 |
+
if tokenizer.convert_ids_to_tokens(sample[block_size-1-j])[0] == '\u0120' or tokenizer.convert_ids_to_tokens(sample[block_size-1-j]).startswith("<NUM_LIT"):
|
| 215 |
+
break
|
| 216 |
+
if sample[block_size-1-j] in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id]:
|
| 217 |
+
if sample[block_size-1-j] != tokenizer.bos_token_id:
|
| 218 |
+
j -= 1
|
| 219 |
+
break
|
| 220 |
+
if j == block_size-1:
|
| 221 |
+
print(tokenizer.decode(sample))
|
| 222 |
+
exit()
|
| 223 |
+
sample = sample[: block_size-1-j]
|
| 224 |
+
# print(len(sample))
|
| 225 |
+
i += len(sample)
|
| 226 |
+
pad_len = block_size-len(sample)
|
| 227 |
+
sample += [tokenizer.pad_token_id]*pad_len
|
| 228 |
+
self.inputs.append(sample)
|
| 229 |
+
|
| 230 |
+
if len(self.inputs) % 10000 == 0:
|
| 231 |
+
logger.info(f"{len(self.inputs)} samples")
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def __len__(self):
|
| 235 |
+
return len(self.inputs)
|
| 236 |
+
|
| 237 |
+
def __getitem__(self, item):
|
| 238 |
+
return torch.tensor(self.inputs[item])
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class lineDataset(Dataset):
|
| 243 |
+
def __init__(self, tokenizer, args, logger, file_type='test', block_size=924):
|
| 244 |
+
datafile = os.path.join(args.data_dir, f"{file_type}.json")
|
| 245 |
+
with open(datafile) as f:
|
| 246 |
+
datas = f.readlines()
|
| 247 |
+
|
| 248 |
+
length = len(datas)
|
| 249 |
+
logger.info("Data size: %d"%(length))
|
| 250 |
+
self.inputs = []
|
| 251 |
+
self.gts = []
|
| 252 |
+
for data in datas:
|
| 253 |
+
data = json.loads(data.strip())
|
| 254 |
+
self.inputs.append(tokenizer.encode(data["input"])[-block_size:])
|
| 255 |
+
self.gts.append(data["gt"])
|
| 256 |
+
|
| 257 |
+
def __len__(self):
|
| 258 |
+
return len(self.inputs)
|
| 259 |
+
|
| 260 |
+
def __getitem__(self, item):
|
| 261 |
+
return torch.tensor(self.inputs[item]), self.gts[item]
|
Code-Code/CodeCompletion-token/code/eval.sh
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LANG=java # set python for py150
|
| 2 |
+
DATADIR=../dataset/javaCorpus/token_completion
|
| 3 |
+
LITFILE=../dataset/javaCorpus/literals.json
|
| 4 |
+
OUTPUTDIR=../model/javaCorpus
|
| 5 |
+
PRETRAINDIR=microsoft/CodeGPT-small-java # microsoft/CodeGPT-small-py for py150
|
| 6 |
+
LOGFILE=eval_javaCorpus.log
|
| 7 |
+
|
| 8 |
+
CUDA_VISIBLE_DEVICES=0 python run_lm.py \
|
| 9 |
+
--data_dir=$DATADIR \
|
| 10 |
+
--lit_file=$LITFILE \
|
| 11 |
+
--langs=$LANG \
|
| 12 |
+
--output_dir=$OUTPUTDIR \
|
| 13 |
+
--pretrain_dir=$OUTPUTDIR \
|
| 14 |
+
--log_file=$LOGFILE \
|
| 15 |
+
--model_type=gpt2 \
|
| 16 |
+
--block_size=512 \
|
| 17 |
+
--do_eval \
|
| 18 |
+
--per_gpu_eval_batch_size=16 \
|
| 19 |
+
--logging_steps=100 \
|
| 20 |
+
--seed=42
|
Code-Code/CodeCompletion-token/code/evaluate.sh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
python evaluator.py \
|
| 2 |
+
-a=../dataset/javaCorpus/token_completion/test.txt \
|
| 3 |
+
-p=../model/javaCorpus/predictions.txt
|
Code-Code/CodeCompletion-token/code/evaluator.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
import os
|
| 4 |
+
import logging
|
| 5 |
+
import argparse
|
| 6 |
+
|
| 7 |
+
logger = logging.getLogger(__name__)
|
| 8 |
+
logging.basicConfig(level=logging.INFO)
|
| 9 |
+
|
| 10 |
+
def main():
|
| 11 |
+
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for code completion (token level).')
|
| 12 |
+
parser.add_argument('--answers', '-a', required=True, help="filename of the labels, in txt format.")
|
| 13 |
+
parser.add_argument('--predictions', '-p', required=True, help="filename of the leaderboard predictions, in txt format.")
|
| 14 |
+
args = parser.parse_args()
|
| 15 |
+
|
| 16 |
+
preds = open(args.predictions, "r").readlines()
|
| 17 |
+
gts = open(args.answers, "r").readlines()
|
| 18 |
+
|
| 19 |
+
assert len(preds) == len(gts), f"Samples of predictions and answers are not equal, {len(preds)}: {len(gts)}"
|
| 20 |
+
|
| 21 |
+
total = 0
|
| 22 |
+
correct = 0.0
|
| 23 |
+
for pred, gt in zip(preds, gts):
|
| 24 |
+
pred = pred.split()
|
| 25 |
+
gt = gt.split()
|
| 26 |
+
assert len(pred) == len(gt), f"Sequence length of prediction and answer are not equal, {len(pred)}: {len(gt)}"
|
| 27 |
+
for x, y in zip(pred, gt):
|
| 28 |
+
if y not in ["<s>", "</s>", "<EOL>", "<pad>"]:
|
| 29 |
+
total += 1
|
| 30 |
+
if x == y:
|
| 31 |
+
correct += 1
|
| 32 |
+
|
| 33 |
+
logger.info(f"Total {total} tokens, accuracy: {round(correct/total*100, 2)}")
|
| 34 |
+
|
| 35 |
+
if __name__ == "__main__":
|
| 36 |
+
main()
|
Code-Code/CodeCompletion-token/code/model.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT License.
|
| 3 |
+
import math
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
class RNNModel(nn.Module):
|
| 9 |
+
"""Container module with an encoder, a recurrent module, and a decoder."""
|
| 10 |
+
|
| 11 |
+
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
|
| 12 |
+
super(RNNModel, self).__init__()
|
| 13 |
+
self.ntoken = ntoken
|
| 14 |
+
self.drop = nn.Dropout(dropout)
|
| 15 |
+
self.encoder = nn.Embedding(ntoken, ninp)
|
| 16 |
+
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout, batch_first=True)
|
| 17 |
+
self.decoder = nn.Linear(nhid, ntoken)
|
| 18 |
+
self.criterion = nn.CrossEntropyLoss()
|
| 19 |
+
|
| 20 |
+
# Optionally tie weights as in:
|
| 21 |
+
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
|
| 22 |
+
# https://arxiv.org/abs/1608.05859
|
| 23 |
+
# and
|
| 24 |
+
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
|
| 25 |
+
# https://arxiv.org/abs/1611.01462
|
| 26 |
+
if tie_weights:
|
| 27 |
+
if nhid != ninp:
|
| 28 |
+
raise ValueError('When using the tied flag, nhid must be equal to emsize')
|
| 29 |
+
self.decoder.weight = self.encoder.weight
|
| 30 |
+
|
| 31 |
+
self.init_weights()
|
| 32 |
+
|
| 33 |
+
self.nhid = nhid
|
| 34 |
+
self.nlayers = nlayers
|
| 35 |
+
|
| 36 |
+
def init_weights(self):
|
| 37 |
+
initrange = 0.1
|
| 38 |
+
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
|
| 39 |
+
nn.init.zeros_(self.decoder.weight)
|
| 40 |
+
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
|
| 41 |
+
|
| 42 |
+
def forward(self, input, hidden=None, labels=None):
|
| 43 |
+
emb = self.encoder(input)
|
| 44 |
+
if hidden is not None:
|
| 45 |
+
output, hidden = self.rnn(emb, hidden)
|
| 46 |
+
else:
|
| 47 |
+
output, hidden = self.rnn(emb)
|
| 48 |
+
output = self.drop(output)
|
| 49 |
+
output = self.decoder(output)
|
| 50 |
+
# decoded = decoded.view(-1, self.ntoken)
|
| 51 |
+
# output = F.log_softmax(decoded, dim=1)
|
| 52 |
+
if labels is not None:
|
| 53 |
+
shift_logits = output[..., :-1, :].contiguous()
|
| 54 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 55 |
+
loss = self.criterion(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
| 56 |
+
return loss, output, hidden
|
| 57 |
+
else:
|
| 58 |
+
return output, hidden
|
| 59 |
+
|
| 60 |
+
def init_hidden(self, bsz):
|
| 61 |
+
weight = next(self.parameters())
|
| 62 |
+
if self.rnn_type == 'LSTM':
|
| 63 |
+
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
|
| 64 |
+
weight.new_zeros(self.nlayers, bsz, self.nhid))
|
| 65 |
+
else:
|
| 66 |
+
return weight.new_zeros(self.nlayers, bsz, self.nhid)
|
| 67 |
+
|
| 68 |
+
|
Code-Code/CodeCompletion-token/code/run_lm.py
ADDED
|
@@ -0,0 +1,728 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""
|
| 17 |
+
Code completion (both token level and line level) pipeline in CodeXGLUE
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
from __future__ import absolute_import, division, print_function
|
| 21 |
+
|
| 22 |
+
import argparse
|
| 23 |
+
import glob
|
| 24 |
+
import logging
|
| 25 |
+
import os
|
| 26 |
+
import pickle
|
| 27 |
+
import random
|
| 28 |
+
import re
|
| 29 |
+
import shutil
|
| 30 |
+
import json
|
| 31 |
+
|
| 32 |
+
import numpy as np
|
| 33 |
+
import torch
|
| 34 |
+
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
|
| 35 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 36 |
+
from dataset import TextDataset, finetuneDataset, EvalDataset, lineDataset
|
| 37 |
+
from beam import Beam
|
| 38 |
+
|
| 39 |
+
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
|
| 40 |
+
BertConfig, BertForMaskedLM, BertTokenizer,
|
| 41 |
+
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
|
| 42 |
+
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
|
| 43 |
+
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
|
| 44 |
+
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
|
| 45 |
+
from model import RNNModel
|
| 46 |
+
|
| 47 |
+
# logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
|
| 48 |
+
# datefmt='%m/%d/%Y %H:%M:%S',
|
| 49 |
+
# level=logging.INFO)
|
| 50 |
+
logger = logging.getLogger(__name__)
|
| 51 |
+
|
| 52 |
+
MODEL_CLASSES = {
|
| 53 |
+
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
|
| 54 |
+
'rnn': (GPT2Config, RNNModel, GPT2Tokenizer),
|
| 55 |
+
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
|
| 56 |
+
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
|
| 57 |
+
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
|
| 58 |
+
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def load_and_cache_examples(args, tokenizer, evaluate=False):
|
| 64 |
+
if args.not_pretrain:
|
| 65 |
+
dataset = finetuneDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
|
| 66 |
+
block_size=args.block_size)
|
| 67 |
+
else:
|
| 68 |
+
dataset = TextDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
|
| 69 |
+
block_size=args.block_size)
|
| 70 |
+
return dataset
|
| 71 |
+
|
| 72 |
+
def set_seed(args):
|
| 73 |
+
random.seed(args.seed)
|
| 74 |
+
np.random.seed(args.seed)
|
| 75 |
+
torch.manual_seed(args.seed)
|
| 76 |
+
if args.n_gpu > 0:
|
| 77 |
+
torch.cuda.manual_seed_all(args.seed)
|
| 78 |
+
|
| 79 |
+
def update_config(args, config):
|
| 80 |
+
# config.n_positions = config.n_ctx = args.block_size
|
| 81 |
+
config.vocab_size = args.vocab_size
|
| 82 |
+
|
| 83 |
+
def get_special_tokens(path):
|
| 84 |
+
lits = json.load(open(path))
|
| 85 |
+
tokens = ["<STR_LIT>", "<NUM_LIT>", "<CHAR_LIT>"]
|
| 86 |
+
for lit in lits["str"]:
|
| 87 |
+
tokens.append(f"<STR_LIT:{lit}>")
|
| 88 |
+
for lit in lits["num"]:
|
| 89 |
+
tokens.append(f"<NUM_LIT:{lit}>")
|
| 90 |
+
for lit in lits["char"]:
|
| 91 |
+
tokens.append(f"<CHAR_LIT:{lit}>")
|
| 92 |
+
return tokens
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def train(args, train_dataset, model, tokenizer, fh, pool):
|
| 97 |
+
""" Train the model """
|
| 98 |
+
if args.local_rank in [-1, 0]:
|
| 99 |
+
args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard')
|
| 100 |
+
if not os.path.exists(args.tensorboard_dir):
|
| 101 |
+
os.makedirs(args.tensorboard_dir)
|
| 102 |
+
|
| 103 |
+
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
|
| 104 |
+
train_sampler = RandomSampler(train_dataset)
|
| 105 |
+
|
| 106 |
+
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size, drop_last=True)
|
| 107 |
+
total_examples = len(train_dataset) * (
|
| 108 |
+
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
|
| 109 |
+
batch_size = args.batch_size * args.gradient_accumulation_steps * (
|
| 110 |
+
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
|
| 111 |
+
# if args.max_steps > 0:
|
| 112 |
+
# t_total = args.max_steps
|
| 113 |
+
# args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
|
| 114 |
+
if args.num_train_epochs > 0:
|
| 115 |
+
t_total = total_examples // batch_size * args.num_train_epochs
|
| 116 |
+
args.max_steps = t_total
|
| 117 |
+
model.to(args.device)
|
| 118 |
+
if args.local_rank not in [-1, 0]:
|
| 119 |
+
torch.distributed.barrier()
|
| 120 |
+
# Prepare optimizer and schedule (linear warmup and decay)
|
| 121 |
+
no_decay = ['bias', 'LayerNorm.weight']
|
| 122 |
+
optimizer_grouped_parameters = [
|
| 123 |
+
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
| 124 |
+
'weight_decay': args.weight_decay},
|
| 125 |
+
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
| 126 |
+
]
|
| 127 |
+
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
|
| 128 |
+
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
|
| 129 |
+
num_training_steps=t_total)
|
| 130 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 131 |
+
# scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
|
| 132 |
+
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
|
| 133 |
+
# if os.path.exists(scheduler_last):
|
| 134 |
+
# scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu"))
|
| 135 |
+
if os.path.exists(optimizer_last):
|
| 136 |
+
logger.warning(f"Loading optimizer from {optimizer_last}")
|
| 137 |
+
optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu"))
|
| 138 |
+
if args.local_rank == 0:
|
| 139 |
+
torch.distributed.barrier()
|
| 140 |
+
if args.fp16:
|
| 141 |
+
try:
|
| 142 |
+
from apex import amp
|
| 143 |
+
except ImportError:
|
| 144 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
| 145 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
| 146 |
+
|
| 147 |
+
# multi-gpu training (should be after apex fp16 initialization)
|
| 148 |
+
if args.n_gpu > 1:
|
| 149 |
+
model = torch.nn.DataParallel(model)
|
| 150 |
+
|
| 151 |
+
# Distributed training (should be after apex fp16 initialization)
|
| 152 |
+
if args.local_rank != -1:
|
| 153 |
+
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
|
| 154 |
+
output_device=args.local_rank%args.gpu_per_node)
|
| 155 |
+
|
| 156 |
+
# Train!
|
| 157 |
+
logger.info("***** Running training *****")
|
| 158 |
+
logger.info(" Num examples = %d", total_examples )
|
| 159 |
+
logger.info(" Num epoch = %d", t_total*batch_size//total_examples)
|
| 160 |
+
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
|
| 161 |
+
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", batch_size)
|
| 162 |
+
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
|
| 163 |
+
logger.info(" Total optimization steps = %d", t_total)
|
| 164 |
+
|
| 165 |
+
global_step = args.start_step
|
| 166 |
+
tr_loss, logging_loss,avg_loss,tr_nb = 0.0, 0.0, 0.0, global_step
|
| 167 |
+
# model.resize_token_embeddings(len(tokenizer))
|
| 168 |
+
model.zero_grad()
|
| 169 |
+
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
|
| 170 |
+
|
| 171 |
+
for idx in range(args.start_epoch, int(args.num_train_epochs)):
|
| 172 |
+
for step, batch in enumerate(train_dataloader):
|
| 173 |
+
inputs, labels = (batch, batch)
|
| 174 |
+
inputs = inputs.to(args.device)
|
| 175 |
+
labels = labels.to(args.device)
|
| 176 |
+
model.train()
|
| 177 |
+
outputs = model(inputs, labels=labels)
|
| 178 |
+
loss = outputs[0]
|
| 179 |
+
|
| 180 |
+
if args.n_gpu > 1:
|
| 181 |
+
loss = loss.mean() # mean() to average on multi-gpu parallel training
|
| 182 |
+
if args.gradient_accumulation_steps > 1:
|
| 183 |
+
loss = loss / args.gradient_accumulation_steps
|
| 184 |
+
|
| 185 |
+
if args.fp16:
|
| 186 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
| 187 |
+
scaled_loss.backward()
|
| 188 |
+
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
|
| 189 |
+
else:
|
| 190 |
+
loss.backward()
|
| 191 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
|
| 192 |
+
|
| 193 |
+
tr_loss += loss.item()
|
| 194 |
+
|
| 195 |
+
if (step + 1) % args.gradient_accumulation_steps == 0:
|
| 196 |
+
optimizer.step()
|
| 197 |
+
optimizer.zero_grad()
|
| 198 |
+
scheduler.step()
|
| 199 |
+
global_step += 1
|
| 200 |
+
output_flag=True
|
| 201 |
+
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
|
| 202 |
+
if global_step % args.logging_steps == 0:
|
| 203 |
+
logger.info(" steps: %s ppl: %s lr: %s", global_step, round(avg_loss,5), scheduler.get_last_lr()[0])
|
| 204 |
+
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
|
| 205 |
+
# Log metrics
|
| 206 |
+
logging_loss = tr_loss
|
| 207 |
+
tr_nb=global_step
|
| 208 |
+
|
| 209 |
+
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
|
| 210 |
+
checkpoint_prefix = "checkpoint"
|
| 211 |
+
# Save model checkpoint
|
| 212 |
+
if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
|
| 213 |
+
results = evaluate(args, model, tokenizer, eval_when_training=True)
|
| 214 |
+
for key, value in results.items():
|
| 215 |
+
logger.info(" %s = %s", key, round(value,4))
|
| 216 |
+
output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(results['perplexity'],4)))
|
| 217 |
+
else:
|
| 218 |
+
output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
|
| 219 |
+
if not os.path.exists(output_dir):
|
| 220 |
+
os.makedirs(output_dir)
|
| 221 |
+
model_to_save = (
|
| 222 |
+
model.module if hasattr(model, "module") else model
|
| 223 |
+
) # Take care of distributed/parallel training
|
| 224 |
+
if args.model_type == "rnn":
|
| 225 |
+
torch.save(model_to_save.state_dict(), os.path.join(output_dir, "model.pt"))
|
| 226 |
+
else:
|
| 227 |
+
model_to_save.save_pretrained(output_dir)
|
| 228 |
+
tokenizer.save_pretrained(output_dir)
|
| 229 |
+
|
| 230 |
+
torch.save(args, os.path.join(output_dir, "training_args.bin"))
|
| 231 |
+
logger.info("Saving model checkpoint to %s", output_dir)
|
| 232 |
+
|
| 233 |
+
# _rotate_checkpoints(args, checkpoint_prefix)
|
| 234 |
+
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
|
| 235 |
+
if not os.path.exists(last_output_dir):
|
| 236 |
+
os.makedirs(last_output_dir)
|
| 237 |
+
if args.model_type == "rnn":
|
| 238 |
+
torch.save(model_to_save.state_dict(), os.path.join(last_output_dir, "model.pt"))
|
| 239 |
+
else:
|
| 240 |
+
model_to_save.save_pretrained(last_output_dir)
|
| 241 |
+
tokenizer.save_pretrained(last_output_dir)
|
| 242 |
+
idx_file = os.path.join(last_output_dir, 'idx_file.txt')
|
| 243 |
+
with open(idx_file, 'w', encoding='utf-8') as idxf:
|
| 244 |
+
idxf.write(str(0) + '\n')
|
| 245 |
+
|
| 246 |
+
torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
|
| 247 |
+
# torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
|
| 248 |
+
logger.info("Saving optimizer and scheduler states to %s", last_output_dir)
|
| 249 |
+
|
| 250 |
+
step_file = os.path.join(last_output_dir, 'step_file.txt')
|
| 251 |
+
with open(step_file, 'w', encoding='utf-8') as stepf:
|
| 252 |
+
stepf.write(str(global_step) + '\n')
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
if args.max_steps > 0 and global_step > args.max_steps:
|
| 256 |
+
break
|
| 257 |
+
|
| 258 |
+
# 每一轮记录checkpoint
|
| 259 |
+
output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
|
| 260 |
+
if not os.path.exists(output_dir):
|
| 261 |
+
os.makedirs(output_dir)
|
| 262 |
+
model_to_save = model.module if hasattr(model, 'module') else model
|
| 263 |
+
ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
|
| 264 |
+
logger.info("Saving model checkpoint to %s", ckpt_output_path)
|
| 265 |
+
torch.save(model_to_save.state_dict(), ckpt_output_path)
|
| 266 |
+
|
| 267 |
+
if args.max_steps > 0 and global_step > args.max_steps:
|
| 268 |
+
break
|
| 269 |
+
|
| 270 |
+
return global_step, tr_loss / global_step
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def evaluate(args, model, tokenizer, prefix="", eval_when_training=False):
|
| 274 |
+
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
| 275 |
+
eval_output_dir = args.output_dir
|
| 276 |
+
|
| 277 |
+
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
|
| 278 |
+
|
| 279 |
+
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
|
| 280 |
+
os.makedirs(eval_output_dir)
|
| 281 |
+
|
| 282 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 283 |
+
# Note that DistributedSampler samples randomly
|
| 284 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 285 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, drop_last=True)
|
| 286 |
+
|
| 287 |
+
# multi-gpu evaluate
|
| 288 |
+
if args.n_gpu > 1 and eval_when_training is False:
|
| 289 |
+
model = torch.nn.DataParallel(model)
|
| 290 |
+
|
| 291 |
+
# Eval!
|
| 292 |
+
#logger.info("***** Running evaluation {} *****".format(prefix))
|
| 293 |
+
#logger.info(" Num examples = %d", len(eval_dataset))
|
| 294 |
+
#logger.info(" Batch size = %d", args.eval_batch_size)
|
| 295 |
+
eval_loss = 0.0
|
| 296 |
+
nb_eval_steps = 0
|
| 297 |
+
model.eval()
|
| 298 |
+
|
| 299 |
+
for batch in eval_dataloader:
|
| 300 |
+
inputs, labels = (batch, batch)
|
| 301 |
+
inputs = inputs.to(args.device)
|
| 302 |
+
labels = labels.to(args.device)
|
| 303 |
+
|
| 304 |
+
with torch.no_grad():
|
| 305 |
+
outputs = model(inputs, labels=labels)
|
| 306 |
+
lm_loss = outputs[0]
|
| 307 |
+
eval_loss += lm_loss.mean().item()
|
| 308 |
+
nb_eval_steps += 1
|
| 309 |
+
|
| 310 |
+
eval_loss = eval_loss / nb_eval_steps
|
| 311 |
+
perplexity = torch.exp(torch.tensor(eval_loss))
|
| 312 |
+
|
| 313 |
+
result = {
|
| 314 |
+
"perplexity": float(perplexity)
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
|
| 318 |
+
with open(output_eval_file, "w") as writer:
|
| 319 |
+
#logger.info("***** Eval results {} *****".format(prefix))
|
| 320 |
+
for key in sorted(result.keys()):
|
| 321 |
+
#logger.info(" %s = %s", key, str(result[key]))
|
| 322 |
+
writer.write("%s = %s\n" % (key, str(result[key])))
|
| 323 |
+
|
| 324 |
+
return result
|
| 325 |
+
|
| 326 |
+
def eval_acc(args, model, tokenizer, file_type='test'):
|
| 327 |
+
"""
|
| 328 |
+
Evaluate token level code completion on accuracy.
|
| 329 |
+
|
| 330 |
+
This function can only used to evaluate accuracy, but not inference, because the inputs are previous sub-tokens but not tokens.
|
| 331 |
+
But it can be guaranteed that the accuracy in this function is the same as the real token level completion.
|
| 332 |
+
The reason is:
|
| 333 |
+
Assuming the inputs are "context_len = 100 <EOL> masks = np . zeros (", and the ground truth is "context_len".
|
| 334 |
+
Due to our bpe encoding, the model have to outputs "context", "_" and "len" in 3 time step, i.e. gt0="context", gt1="_", gt2="len".
|
| 335 |
+
In a real inference scenario:
|
| 336 |
+
time step 0, inputs "context_len = 100 <EOL> masks = np . zeros ( ", model outputs: out0;
|
| 337 |
+
time step 1, inputs: in1=out0, outputs: out1
|
| 338 |
+
... until the model outputs a complete token
|
| 339 |
+
But in this function, no matter out0 is, in1=gt0="context".
|
| 340 |
+
That is to say, in this function, we feed ground truth but not output sub-token when we predict the next token which is split by bpe.
|
| 341 |
+
So obviouly we would get different predictions from the real token completion scenario.
|
| 342 |
+
However, if we calculate token leval accuracy,
|
| 343 |
+
if and only if the model predicts every sub-token correctly, the complete token can be seen correct.
|
| 344 |
+
In this situation, out0==gt0, out1==gt1, so it doesn't matter we feed gt or output to model.
|
| 345 |
+
In summary, this function can make models oupout the same complete token if this token equals to ground truth,
|
| 346 |
+
if not, the model might predict a different token from the real completion scenario, but all wrong.
|
| 347 |
+
So it would not affect the token level accuracy.
|
| 348 |
+
|
| 349 |
+
I use this trick to speed up evaluation due to the large test set.
|
| 350 |
+
"""
|
| 351 |
+
eval_dataset = EvalDataset(tokenizer, args, logger, file_type=file_type, block_size=args.block_size)
|
| 352 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 353 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 354 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
|
| 355 |
+
model.to(args.device)
|
| 356 |
+
# multi-gpu training (should be after apex fp16 initialization)
|
| 357 |
+
if args.n_gpu > 1:
|
| 358 |
+
model = torch.nn.DataParallel(model)
|
| 359 |
+
|
| 360 |
+
# Distributed training (should be after apex fp16 initialization)
|
| 361 |
+
if args.local_rank != -1:
|
| 362 |
+
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
|
| 363 |
+
output_device=args.local_rank%args.gpu_per_node)
|
| 364 |
+
|
| 365 |
+
def DecodeIds(idxs):
|
| 366 |
+
codes = ""
|
| 367 |
+
for idx in idxs:
|
| 368 |
+
to_add = tokenizer.convert_ids_to_tokens(idx)
|
| 369 |
+
if tokenizer.convert_ids_to_tokens(idx)[0] == '\u0120':
|
| 370 |
+
if not codes.endswith(" "):
|
| 371 |
+
codes += " " + to_add[1:]
|
| 372 |
+
else:
|
| 373 |
+
codes += to_add[1:]
|
| 374 |
+
elif (
|
| 375 |
+
idx in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id] or
|
| 376 |
+
tokenizer.convert_ids_to_tokens(idx).startswith("<NUM_LIT")
|
| 377 |
+
):
|
| 378 |
+
codes += " " + to_add + " "
|
| 379 |
+
else:
|
| 380 |
+
codes += to_add
|
| 381 |
+
return codes.strip(" ")
|
| 382 |
+
|
| 383 |
+
model.eval()
|
| 384 |
+
|
| 385 |
+
correct = 0.0
|
| 386 |
+
total = 0
|
| 387 |
+
|
| 388 |
+
total_pred = []
|
| 389 |
+
total_gt = []
|
| 390 |
+
|
| 391 |
+
for step, batch in enumerate(eval_dataloader):
|
| 392 |
+
inputs = batch.to(args.device)
|
| 393 |
+
|
| 394 |
+
with torch.no_grad():
|
| 395 |
+
outputs = model(inputs)
|
| 396 |
+
pred_scores = outputs[0]
|
| 397 |
+
pred_ids = pred_scores.argmax(-1)
|
| 398 |
+
|
| 399 |
+
all_pred = []
|
| 400 |
+
all_gt = []
|
| 401 |
+
prev_pred = None
|
| 402 |
+
for pred, gt in zip(pred_ids, inputs):
|
| 403 |
+
pred = pred.cpu().tolist()
|
| 404 |
+
gt = gt.cpu().tolist()
|
| 405 |
+
|
| 406 |
+
for i, y in enumerate(gt):
|
| 407 |
+
if i == 0:
|
| 408 |
+
if y in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id]:
|
| 409 |
+
now_gt = [y]
|
| 410 |
+
now_pred = [0] if prev_pred is None else [prev_pred]
|
| 411 |
+
all_pred.append(DecodeIds(now_pred).strip().split()[0])
|
| 412 |
+
all_gt.append(DecodeIds(now_gt).strip())
|
| 413 |
+
now_gt = []
|
| 414 |
+
now_pred = []
|
| 415 |
+
else:
|
| 416 |
+
now_gt = [y]
|
| 417 |
+
now_pred = [0] if prev_pred is None else [prev_pred]
|
| 418 |
+
else:
|
| 419 |
+
if tokenizer.convert_ids_to_tokens(y)[0] == '\u0120':
|
| 420 |
+
if len(now_gt) > 0:
|
| 421 |
+
try:
|
| 422 |
+
all_pred.append(DecodeIds(now_pred).strip().split()[0])
|
| 423 |
+
except IndexError:
|
| 424 |
+
all_pred.append("<SPACE>")
|
| 425 |
+
all_gt.append(DecodeIds(now_gt).strip())
|
| 426 |
+
now_gt = []
|
| 427 |
+
now_pred = []
|
| 428 |
+
if y in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id] or tokenizer.convert_ids_to_tokens(y).startswith("<NUM_LIT"):
|
| 429 |
+
if len(now_gt) > 0:
|
| 430 |
+
try:
|
| 431 |
+
all_pred.append(DecodeIds(now_pred).strip().split()[0])
|
| 432 |
+
except IndexError:
|
| 433 |
+
all_pred.append("<SPACE>")
|
| 434 |
+
all_gt.append(DecodeIds(now_gt).strip())
|
| 435 |
+
now_gt = [y]
|
| 436 |
+
now_pred = [pred[i-1]]
|
| 437 |
+
try:
|
| 438 |
+
all_pred.append(DecodeIds(now_pred).strip().split()[0])
|
| 439 |
+
except IndexError:
|
| 440 |
+
all_pred.append("<SPACE>")
|
| 441 |
+
all_gt.append(DecodeIds(now_gt).strip())
|
| 442 |
+
now_gt = []
|
| 443 |
+
now_pred = []
|
| 444 |
+
continue
|
| 445 |
+
now_gt.append(y)
|
| 446 |
+
now_pred.append(pred[i-1])
|
| 447 |
+
assert len(all_pred) == len(all_gt)
|
| 448 |
+
|
| 449 |
+
total_pred.extend(all_pred)
|
| 450 |
+
total_gt.extend(all_gt)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
for x, y in zip(all_pred, all_gt):
|
| 454 |
+
if y not in ["<s>", "</s>", "<EOL>", "<pad>"]:
|
| 455 |
+
total += 1
|
| 456 |
+
if x == y:
|
| 457 |
+
correct += 1
|
| 458 |
+
|
| 459 |
+
if step % args.logging_steps == 0:
|
| 460 |
+
logger.info(f"{step} are done!")
|
| 461 |
+
logger.info(f"{total}, {correct/total}")
|
| 462 |
+
|
| 463 |
+
# pickle.dump(total_pred, open(os.path.join(args.output_dir, "preds.pkl"), "wb"))
|
| 464 |
+
# pickle.dump(total_gt, open(os.path.join(args.output_dir, "gts.pkl"), "wb"))
|
| 465 |
+
|
| 466 |
+
saved_file = os.path.join(args.output_dir, "predictions.txt")
|
| 467 |
+
total_samples = post_process(args, total_pred, total_gt, open(os.path.join(args.data_dir, f"{file_type}.txt")).readlines(), saved_file)
|
| 468 |
+
logger.info(f"Eval on {total_samples}, saved at {saved_file}")
|
| 469 |
+
|
| 470 |
+
return total, correct
|
| 471 |
+
|
| 472 |
+
def post_process(args, preds, gts, true_gts, saved_file):
|
| 473 |
+
wf = open(saved_file, "w")
|
| 474 |
+
|
| 475 |
+
cnt = 0
|
| 476 |
+
new_gt = []
|
| 477 |
+
new_pred = []
|
| 478 |
+
for i, (pred,gt) in enumerate(zip(preds,gts)):
|
| 479 |
+
if gt in ["", "<pad>"]:
|
| 480 |
+
continue
|
| 481 |
+
new_gt.append(gt)
|
| 482 |
+
new_pred.append(pred.replace(" ", ""))
|
| 483 |
+
if gt == "</s>":
|
| 484 |
+
gt_str = " ".join(new_gt)
|
| 485 |
+
pred_str = " ".join(new_pred)
|
| 486 |
+
assert gt_str == true_gts[cnt].strip(), f"{cnt} sample gt_str != true_gt"
|
| 487 |
+
wf.write(pred_str+"\n")
|
| 488 |
+
cnt += 1
|
| 489 |
+
new_gt = []
|
| 490 |
+
new_pred = []
|
| 491 |
+
|
| 492 |
+
return cnt
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def main():
|
| 496 |
+
parser = argparse.ArgumentParser()
|
| 497 |
+
|
| 498 |
+
## Required parameters
|
| 499 |
+
parser.add_argument("--data_dir", default=None, type=str, required=True,
|
| 500 |
+
help="The input data path.")
|
| 501 |
+
parser.add_argument("--langs", default=None, type=str, required=True,
|
| 502 |
+
help="Languages to train, if all, train all languages in data_dir")
|
| 503 |
+
parser.add_argument("--output_dir", default=None, type=str, required=True,
|
| 504 |
+
help="The output directory where the model predictions and checkpoints will be written.")
|
| 505 |
+
|
| 506 |
+
## Other parameters
|
| 507 |
+
parser.add_argument("--model_type", default="gpt2", type=str,
|
| 508 |
+
help="The model architecture to be fine-tuned.")
|
| 509 |
+
parser.add_argument("--pretrain_dir", default="", type=str,
|
| 510 |
+
help="The output directory where the model predictions and checkpoints will be written.")
|
| 511 |
+
parser.add_argument("--config_dir", type=str,
|
| 512 |
+
help="config name. Required when training from scratch")
|
| 513 |
+
parser.add_argument("--tokenizer_dir", type=str,
|
| 514 |
+
help="Pre-trained tokenizer dir. Required when training from scratch")
|
| 515 |
+
parser.add_argument("--lit_file", type=str,
|
| 516 |
+
help="literals json file")
|
| 517 |
+
parser.add_argument("--load_name", type=str, default="pretrained",
|
| 518 |
+
help="Load pretrained model name")
|
| 519 |
+
|
| 520 |
+
parser.add_argument("--mlm", action='store_true',
|
| 521 |
+
help="Train with masked-language modeling loss instead of language modeling.")
|
| 522 |
+
parser.add_argument("--mlm_probability", type=float, default=0.15,
|
| 523 |
+
help="Ratio of tokens to mask for masked language modeling loss")
|
| 524 |
+
|
| 525 |
+
parser.add_argument("--cache_dir", default="", type=str,
|
| 526 |
+
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
|
| 527 |
+
parser.add_argument("--block_size", default=1024, type=int,
|
| 528 |
+
help="Optional input sequence length after tokenization."
|
| 529 |
+
"The training dataset will be truncated in block of this size for training."
|
| 530 |
+
"Default to the model max input length for single sentence inputs (take into account special tokens).")
|
| 531 |
+
parser.add_argument("--do_train", action='store_true',
|
| 532 |
+
help="Whether to run training.")
|
| 533 |
+
parser.add_argument("--do_eval", action='store_true',
|
| 534 |
+
help="Whether to run eval on the dev set.")
|
| 535 |
+
parser.add_argument("--evaluate_during_training", action='store_true',
|
| 536 |
+
help="Run evaluation during training at each logging step.")
|
| 537 |
+
parser.add_argument("--do_lower_case", action='store_true',
|
| 538 |
+
help="Set this flag if you are using an uncased model.")
|
| 539 |
+
|
| 540 |
+
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int,
|
| 541 |
+
help="Batch size per GPU/CPU for training.")
|
| 542 |
+
parser.add_argument("--per_gpu_eval_batch_size", default=12, type=int,
|
| 543 |
+
help="Batch size per GPU/CPU for evaluation.")
|
| 544 |
+
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
|
| 545 |
+
help="Number of updates steps to accumulate before performing a backward/update pass.")
|
| 546 |
+
parser.add_argument("--learning_rate", default=5e-5, type=float,
|
| 547 |
+
help="The initial learning rate for Adam.")
|
| 548 |
+
parser.add_argument("--weight_decay", default=0.0, type=float,
|
| 549 |
+
help="Weight deay if we apply some.")
|
| 550 |
+
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
|
| 551 |
+
help="Epsilon for Adam optimizer.")
|
| 552 |
+
parser.add_argument("--max_grad_norm", default=1.0, type=float,
|
| 553 |
+
help="Max gradient norm.")
|
| 554 |
+
parser.add_argument("--num_train_epochs", default=1.0, type=float,
|
| 555 |
+
help="Total number of training epochs to perform.")
|
| 556 |
+
parser.add_argument("--max_steps", default=-1, type=int,
|
| 557 |
+
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
|
| 558 |
+
parser.add_argument("--warmup_steps", default=0, type=int,
|
| 559 |
+
help="Linear warmup over warmup_steps.")
|
| 560 |
+
|
| 561 |
+
parser.add_argument('--logging_steps', type=int, default=1000,
|
| 562 |
+
help="Log every X updates steps.")
|
| 563 |
+
parser.add_argument('--save_steps', type=int, default=5000,
|
| 564 |
+
help="Save checkpoint every X updates steps.")
|
| 565 |
+
parser.add_argument('--save_total_limit', type=int, default=None,
|
| 566 |
+
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
|
| 567 |
+
parser.add_argument("--eval_all_checkpoints", action='store_true',
|
| 568 |
+
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
|
| 569 |
+
parser.add_argument("--no_cuda", action='store_true',
|
| 570 |
+
help="Avoid using CUDA when available")
|
| 571 |
+
parser.add_argument('--overwrite_output_dir', action='store_true',
|
| 572 |
+
help="Overwrite the content of the output directory")
|
| 573 |
+
parser.add_argument('--overwrite_cache', action='store_true',
|
| 574 |
+
help="Overwrite the cached training and evaluation sets")
|
| 575 |
+
parser.add_argument('--seed', type=int, default=42,
|
| 576 |
+
help="random seed for initialization")
|
| 577 |
+
parser.add_argument('--not_pretrain', action='store_true',
|
| 578 |
+
help="use different dataset")
|
| 579 |
+
|
| 580 |
+
parser.add_argument('--fp16', action='store_true',
|
| 581 |
+
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
|
| 582 |
+
parser.add_argument('--fp16_opt_level', type=str, default='O1',
|
| 583 |
+
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
|
| 584 |
+
"See details at https://nvidia.github.io/apex/amp.html")
|
| 585 |
+
parser.add_argument("--local_rank", type=int, default=-1,
|
| 586 |
+
help="For distributed training: local_rank")
|
| 587 |
+
parser.add_argument("--node_index", type=int, default=-1,
|
| 588 |
+
help="node index if multi-node running")
|
| 589 |
+
parser.add_argument("--gpu_per_node", type=int, default=-1,
|
| 590 |
+
help="num of gpus per node")
|
| 591 |
+
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
|
| 592 |
+
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
|
| 593 |
+
|
| 594 |
+
parser.add_argument('--log_file', type=str, default='')
|
| 595 |
+
parser.add_argument('--tensorboard_dir', type=str)
|
| 596 |
+
|
| 597 |
+
pool = None
|
| 598 |
+
args = parser.parse_args()
|
| 599 |
+
|
| 600 |
+
# args.output_dir = os.path.join(args.output_dir, args.dataset)
|
| 601 |
+
|
| 602 |
+
if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
|
| 603 |
+
raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
|
| 604 |
+
"flag (masked language modeling).")
|
| 605 |
+
|
| 606 |
+
if os.path.exists(args.output_dir) and os.listdir(
|
| 607 |
+
args.output_dir) and args.do_train and not args.overwrite_output_dir:
|
| 608 |
+
raise ValueError(
|
| 609 |
+
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
|
| 610 |
+
args.output_dir))
|
| 611 |
+
|
| 612 |
+
# Setup distant debugging if needed
|
| 613 |
+
if args.server_ip and args.server_port:
|
| 614 |
+
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
|
| 615 |
+
import ptvsd
|
| 616 |
+
print("Waiting for debugger attach")
|
| 617 |
+
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
|
| 618 |
+
ptvsd.wait_for_attach()
|
| 619 |
+
|
| 620 |
+
logger.info("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node))
|
| 621 |
+
# Setup CUDA, GPU & distributed training
|
| 622 |
+
if args.local_rank == -1 or args.no_cuda:
|
| 623 |
+
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
|
| 624 |
+
args.n_gpu = torch.cuda.device_count()
|
| 625 |
+
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
|
| 626 |
+
torch.cuda.set_device(args.local_rank)
|
| 627 |
+
device = torch.device("cuda", args.local_rank)
|
| 628 |
+
torch.distributed.init_process_group(backend='nccl')
|
| 629 |
+
args.local_rank += args.node_index * args.gpu_per_node
|
| 630 |
+
args.n_gpu = 1
|
| 631 |
+
args.device = device
|
| 632 |
+
# args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
|
| 633 |
+
|
| 634 |
+
# Setup logging
|
| 635 |
+
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
|
| 636 |
+
datefmt='%m/%d/%Y %H:%M:%S',
|
| 637 |
+
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
|
| 638 |
+
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s",
|
| 639 |
+
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,
|
| 640 |
+
torch.distributed.get_world_size() if args.local_rank != -1 else 1)
|
| 641 |
+
|
| 642 |
+
# 使用FileHandler输出到文件
|
| 643 |
+
fh = logging.FileHandler(args.log_file)
|
| 644 |
+
logger.addHandler(fh)
|
| 645 |
+
|
| 646 |
+
# Set seed
|
| 647 |
+
set_seed(args)
|
| 648 |
+
|
| 649 |
+
# Load pretrained model and tokenizer
|
| 650 |
+
if args.local_rank not in [-1, 0]:
|
| 651 |
+
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
|
| 652 |
+
|
| 653 |
+
args.start_epoch = 0
|
| 654 |
+
args.start_step = 0
|
| 655 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 656 |
+
if args.do_train and os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
|
| 657 |
+
args.pretrain_dir = os.path.join(checkpoint_last)
|
| 658 |
+
args.config_name = os.path.join(checkpoint_last, 'config.json')
|
| 659 |
+
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
|
| 660 |
+
with open(idx_file, encoding='utf-8') as idxf:
|
| 661 |
+
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
|
| 662 |
+
|
| 663 |
+
step_file = os.path.join(checkpoint_last, 'step_file.txt')
|
| 664 |
+
if os.path.exists(step_file):
|
| 665 |
+
with open(step_file, encoding='utf-8') as stepf:
|
| 666 |
+
args.start_step = int(stepf.readlines()[0].strip())
|
| 667 |
+
|
| 668 |
+
logger.info("reload model from {}, resume from {} steps".format(checkpoint_last, args.start_step))
|
| 669 |
+
|
| 670 |
+
# get special tokens
|
| 671 |
+
special_tokens = get_special_tokens(args.lit_file)
|
| 672 |
+
|
| 673 |
+
# Load pre-trained model
|
| 674 |
+
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
|
| 675 |
+
pretrained = checkpoint_last #args.pretrain_dir
|
| 676 |
+
if pretrained:
|
| 677 |
+
tokenizer = tokenizer_class.from_pretrained(pretrained, do_lower_case=args.do_lower_case, sep_token='<EOL>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', additional_special_tokens=special_tokens)
|
| 678 |
+
if args.model_type == "rnn":
|
| 679 |
+
model = model_class(len(tokenizer), 768, 768, 1)
|
| 680 |
+
model_last = os.path.join(pretrained, 'model.pt')
|
| 681 |
+
if os.path.exists(model_last):
|
| 682 |
+
logger.warning(f"Loading model from {model_last}")
|
| 683 |
+
model.load_state_dict(torch.load(model_last, map_location="cpu"))
|
| 684 |
+
else:
|
| 685 |
+
model = model_class.from_pretrained(pretrained)
|
| 686 |
+
model.resize_token_embeddings(len(tokenizer))
|
| 687 |
+
else:
|
| 688 |
+
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_dir, sep_token='<EOL>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', additional_special_tokens=special_tokens)
|
| 689 |
+
args.vocab_size = len(tokenizer)
|
| 690 |
+
if args.model_type == "rnn":
|
| 691 |
+
model = model_class(len(tokenizer), 768, 768, 1)
|
| 692 |
+
else:
|
| 693 |
+
config = config_class.from_pretrained(args.config_dir)
|
| 694 |
+
model = model_class(config)
|
| 695 |
+
model.resize_token_embeddings(len(tokenizer))
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
model_parameters = model.parameters()
|
| 699 |
+
num_params = sum([np.prod(p.size()) for p in model_parameters])
|
| 700 |
+
logger.info(f"Model has a total of {num_params} trainable parameters")
|
| 701 |
+
|
| 702 |
+
if args.local_rank == 0:
|
| 703 |
+
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
|
| 704 |
+
|
| 705 |
+
logger.info("Training/evaluation parameters %s", args)
|
| 706 |
+
|
| 707 |
+
# Training
|
| 708 |
+
if args.do_train:
|
| 709 |
+
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
|
| 710 |
+
|
| 711 |
+
global_step, tr_loss = train(args, train_dataset, model, tokenizer, fh, pool)
|
| 712 |
+
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
|
| 713 |
+
|
| 714 |
+
# Only works on single GPU
|
| 715 |
+
if args.do_eval:
|
| 716 |
+
checkpoint_prefix = 'epoch_5/subject_model.pth'
|
| 717 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 718 |
+
model.load_state_dict(torch.load(output_dir))
|
| 719 |
+
model.to(args.device)
|
| 720 |
+
# 不要用dev文件,否则会在EvalDataset的__init__中检测不通过,被exit
|
| 721 |
+
# dev_total, dev_cr = eval_acc(args, model, tokenizer, 'dev')
|
| 722 |
+
# logger.info(f"Dev total tokens: {dev_total}, accuracy: {dev_cr/dev_total}")
|
| 723 |
+
test_total, test_cr = eval_acc(args, model, tokenizer, 'test')
|
| 724 |
+
logger.info(f"Test total tokens: {test_total}, accuracy: {test_cr/test_total}")
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
if __name__ == "__main__":
|
| 728 |
+
main()
|
Code-Code/CodeCompletion-token/code/train.sh
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LANG=java # set python for py150
|
| 2 |
+
DATADIR=../dataset/javaCorpus/token_completion
|
| 3 |
+
LITFILE=../dataset/javaCorpus/literals.json
|
| 4 |
+
OUTPUTDIR=../model/javaCorpus
|
| 5 |
+
PRETRAINDIR=microsoft/CodeGPT-small-java # microsoft/CodeGPT-small-py for py150
|
| 6 |
+
LOGFILE=train_javaCorpus.log
|
| 7 |
+
PER_NODE_GPU=4 # modify YOUR_GPU_NUM
|
| 8 |
+
|
| 9 |
+
CUDA_VISIBLE_DEVICES=0,1,2,3 python run_lm.py \
|
| 10 |
+
--data_dir=$DATADIR \
|
| 11 |
+
--lit_file=$LITFILE \
|
| 12 |
+
--langs=$LANG \
|
| 13 |
+
--output_dir=$OUTPUTDIR \
|
| 14 |
+
--pretrain_dir=$PRETRAINDIR \
|
| 15 |
+
--log_file=$LOGFILE \
|
| 16 |
+
--model_type=gpt2 \
|
| 17 |
+
--block_size=512 \
|
| 18 |
+
--do_train \
|
| 19 |
+
--gpu_per_node $PER_NODE_GPU \
|
| 20 |
+
--learning_rate=8e-5 \
|
| 21 |
+
--weight_decay=0.01 \
|
| 22 |
+
--evaluate_during_training \
|
| 23 |
+
--per_gpu_train_batch_size=1 \
|
| 24 |
+
--per_gpu_eval_batch_size=4 \
|
| 25 |
+
--gradient_accumulation_steps=4 \
|
| 26 |
+
--num_train_epochs=5 \
|
| 27 |
+
--logging_steps=100 \
|
| 28 |
+
--save_steps=1000 \
|
| 29 |
+
--seed=42 \
|
| 30 |
+
--overwrite_output_dir \
|
| 31 |
+
--not_pretrain
|
Code-Code/CodeCompletion-token/data.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3fe81ae13261569dcb0147143f6be01900bdea8fc19394b931a2f6be720dac03
|
| 3 |
+
size 16149700
|
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_1/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7620d7764c8ab3ed610bd33a089895ae34640f5d8ac29ba18b3906228df3e79f
|
| 3 |
+
size 497840154
|
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_2/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14cbd1d37904f6daacbe4345be5c9ebb052ff0320d6a652630e7fa2c8a14bd34
|
| 3 |
+
size 497840154
|
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_3/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8750f7eff3f95fea0dc69af85df906d5a4bc7387bc46f80aece0877e62d20f3d
|
| 3 |
+
size 497840154
|
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_4/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:614d002f472a8aa35050b56e8ccb6c5fcdeabe1bbf5f50e0c2e3d18e0dd0ed23
|
| 3 |
+
size 497840154
|
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_5/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fdf5488012ceaf71409a8d129f391f4ba06a86054b63b79a8c0b4c0c41799f20
|
| 3 |
+
size 497840154
|
Code-Code/Defect-detection/code/eval.sh
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0,1 python run.py \
|
| 2 |
+
--output_dir=../model \
|
| 3 |
+
--model_type=roberta \
|
| 4 |
+
--tokenizer_name=microsoft/codebert-base \
|
| 5 |
+
--model_name_or_path=microsoft/codebert-base \
|
| 6 |
+
--do_eval \
|
| 7 |
+
--do_test \
|
| 8 |
+
--train_data_file=../dataset/train.jsonl \
|
| 9 |
+
--eval_data_file=../dataset/valid.jsonl \
|
| 10 |
+
--test_data_file=../dataset/valid.jsonl \
|
| 11 |
+
--epoch 5 \
|
| 12 |
+
--block_size 400 \
|
| 13 |
+
--train_batch_size 32 \
|
| 14 |
+
--eval_batch_size 64 \
|
| 15 |
+
--learning_rate 2e-5 \
|
| 16 |
+
--max_grad_norm 1.0 \
|
| 17 |
+
--evaluate_during_training \
|
| 18 |
+
--seed 123456
|
Code-Code/Defect-detection/code/evaluate.sh
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
python evaluator.py -a ../dataset/valid.jsonl -p ../model/predictions.txt
|
Code-Code/Defect-detection/code/evaluator.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
import logging
|
| 4 |
+
import sys
|
| 5 |
+
import json
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
def read_answers(filename):
|
| 9 |
+
answers={}
|
| 10 |
+
with open(filename) as f:
|
| 11 |
+
for line in f:
|
| 12 |
+
line=line.strip()
|
| 13 |
+
js=json.loads(line)
|
| 14 |
+
answers[js['idx']]=js['target']
|
| 15 |
+
return answers
|
| 16 |
+
|
| 17 |
+
def read_predictions(filename):
|
| 18 |
+
predictions={}
|
| 19 |
+
with open(filename) as f:
|
| 20 |
+
for line in f:
|
| 21 |
+
line=line.strip()
|
| 22 |
+
idx,label=line.split()
|
| 23 |
+
predictions[int(idx)]=int(label)
|
| 24 |
+
return predictions
|
| 25 |
+
|
| 26 |
+
def calculate_scores(answers,predictions):
|
| 27 |
+
Acc=[]
|
| 28 |
+
for key in answers:
|
| 29 |
+
if key not in predictions:
|
| 30 |
+
logging.error("Missing prediction for index {}.".format(key))
|
| 31 |
+
sys.exit()
|
| 32 |
+
Acc.append(answers[key]==predictions[key])
|
| 33 |
+
|
| 34 |
+
scores={}
|
| 35 |
+
scores['Acc']=np.mean(Acc)
|
| 36 |
+
return scores
|
| 37 |
+
|
| 38 |
+
def main():
|
| 39 |
+
import argparse
|
| 40 |
+
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for Defect Detection dataset.')
|
| 41 |
+
parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
|
| 42 |
+
parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
args = parser.parse_args()
|
| 46 |
+
answers=read_answers(args.answers)
|
| 47 |
+
predictions=read_predictions(args.predictions)
|
| 48 |
+
scores=calculate_scores(answers,predictions)
|
| 49 |
+
print(scores)
|
| 50 |
+
|
| 51 |
+
if __name__ == '__main__':
|
| 52 |
+
main()
|
Code-Code/Defect-detection/code/model.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT License.
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch
|
| 6 |
+
from torch.autograd import Variable
|
| 7 |
+
import copy
|
| 8 |
+
from torch.nn import CrossEntropyLoss, MSELoss
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Model(nn.Module):
|
| 13 |
+
def __init__(self, encoder,config,tokenizer,args):
|
| 14 |
+
super(Model, self).__init__()
|
| 15 |
+
self.encoder = encoder
|
| 16 |
+
self.config=config
|
| 17 |
+
self.tokenizer=tokenizer
|
| 18 |
+
self.args=args
|
| 19 |
+
|
| 20 |
+
# Define dropout layer, dropout_probability is taken from args.
|
| 21 |
+
self.dropout = nn.Dropout(args.dropout_probability)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def forward(self, input_ids=None,labels=None, return_vec=None):
|
| 25 |
+
outputs=self.encoder(input_ids,attention_mask=input_ids.ne(1))
|
| 26 |
+
|
| 27 |
+
if return_vec:
|
| 28 |
+
return outputs.pooler_output
|
| 29 |
+
outputs = outputs[0]
|
| 30 |
+
|
| 31 |
+
# Apply dropout
|
| 32 |
+
outputs = self.dropout(outputs)
|
| 33 |
+
|
| 34 |
+
logits=outputs
|
| 35 |
+
prob=torch.sigmoid(logits)
|
| 36 |
+
if labels is not None:
|
| 37 |
+
labels=labels.float()
|
| 38 |
+
loss=torch.log(prob[:,0]+1e-10)*labels+torch.log((1-prob)[:,0]+1e-10)*(1-labels)
|
| 39 |
+
loss=-loss.mean()
|
| 40 |
+
return loss,prob
|
| 41 |
+
else:
|
| 42 |
+
return prob
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
Code-Code/Defect-detection/code/run.py
ADDED
|
@@ -0,0 +1,598 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""
|
| 17 |
+
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
|
| 18 |
+
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
|
| 19 |
+
using a masked language modeling (MLM) loss.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
from __future__ import absolute_import, division, print_function
|
| 23 |
+
|
| 24 |
+
import argparse
|
| 25 |
+
import glob
|
| 26 |
+
import logging
|
| 27 |
+
import os
|
| 28 |
+
import pickle
|
| 29 |
+
import random
|
| 30 |
+
import re
|
| 31 |
+
import shutil
|
| 32 |
+
|
| 33 |
+
import numpy as np
|
| 34 |
+
import torch
|
| 35 |
+
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
|
| 36 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 37 |
+
import json
|
| 38 |
+
try:
|
| 39 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 40 |
+
except:
|
| 41 |
+
from tensorboardX import SummaryWriter
|
| 42 |
+
|
| 43 |
+
from tqdm import tqdm, trange
|
| 44 |
+
import multiprocessing
|
| 45 |
+
from model import Model
|
| 46 |
+
cpu_cont = multiprocessing.cpu_count()
|
| 47 |
+
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
|
| 48 |
+
BertConfig, BertForMaskedLM, BertTokenizer, BertForSequenceClassification,
|
| 49 |
+
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
|
| 50 |
+
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
|
| 51 |
+
RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer,
|
| 52 |
+
DistilBertConfig, DistilBertForMaskedLM, DistilBertForSequenceClassification, DistilBertTokenizer)
|
| 53 |
+
|
| 54 |
+
logger = logging.getLogger(__name__)
|
| 55 |
+
|
| 56 |
+
MODEL_CLASSES = {
|
| 57 |
+
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
|
| 58 |
+
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
|
| 59 |
+
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
|
| 60 |
+
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
|
| 61 |
+
'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class InputFeatures(object):
|
| 67 |
+
"""A single training/test features for a example."""
|
| 68 |
+
def __init__(self,
|
| 69 |
+
input_tokens,
|
| 70 |
+
input_ids,
|
| 71 |
+
idx,
|
| 72 |
+
label,
|
| 73 |
+
|
| 74 |
+
):
|
| 75 |
+
self.input_tokens = input_tokens
|
| 76 |
+
self.input_ids = input_ids
|
| 77 |
+
self.idx=str(idx)
|
| 78 |
+
self.label=label
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def convert_examples_to_features(js,tokenizer,args):
|
| 82 |
+
#source
|
| 83 |
+
code=' '.join(js['func'].split())
|
| 84 |
+
code_tokens=tokenizer.tokenize(code)[:args.block_size-2]
|
| 85 |
+
source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
|
| 86 |
+
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
|
| 87 |
+
padding_length = args.block_size - len(source_ids)
|
| 88 |
+
source_ids+=[tokenizer.pad_token_id]*padding_length
|
| 89 |
+
return InputFeatures(source_tokens,source_ids,js['idx'],js['target'])
|
| 90 |
+
|
| 91 |
+
class TextDataset(Dataset):
|
| 92 |
+
def __init__(self, tokenizer, args, file_path=None):
|
| 93 |
+
self.examples = []
|
| 94 |
+
with open(file_path) as f:
|
| 95 |
+
for line in f:
|
| 96 |
+
js=json.loads(line.strip())
|
| 97 |
+
self.examples.append(convert_examples_to_features(js,tokenizer,args))
|
| 98 |
+
if 'train' in file_path:
|
| 99 |
+
for idx, example in enumerate(self.examples[:3]):
|
| 100 |
+
logger.info("*** Example ***")
|
| 101 |
+
logger.info("idx: {}".format(idx))
|
| 102 |
+
logger.info("label: {}".format(example.label))
|
| 103 |
+
logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
|
| 104 |
+
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
|
| 105 |
+
|
| 106 |
+
def __len__(self):
|
| 107 |
+
return len(self.examples)
|
| 108 |
+
|
| 109 |
+
def __getitem__(self, i):
|
| 110 |
+
return torch.tensor(self.examples[i].input_ids),torch.tensor(self.examples[i].label)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def set_seed(seed=42):
|
| 114 |
+
random.seed(seed)
|
| 115 |
+
os.environ['PYHTONHASHSEED'] = str(seed)
|
| 116 |
+
np.random.seed(seed)
|
| 117 |
+
torch.manual_seed(seed)
|
| 118 |
+
torch.cuda.manual_seed(seed)
|
| 119 |
+
torch.backends.cudnn.deterministic = True
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def train(args, train_dataset, model, tokenizer):
|
| 123 |
+
""" Train the model """
|
| 124 |
+
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
|
| 125 |
+
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
|
| 126 |
+
|
| 127 |
+
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
|
| 128 |
+
batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
|
| 129 |
+
args.max_steps=args.epoch*len( train_dataloader)
|
| 130 |
+
args.save_steps=len( train_dataloader)
|
| 131 |
+
args.warmup_steps=len( train_dataloader)
|
| 132 |
+
args.logging_steps=len( train_dataloader)
|
| 133 |
+
args.num_train_epochs=args.epoch
|
| 134 |
+
model.to(args.device)
|
| 135 |
+
# Prepare optimizer and schedule (linear warmup and decay)
|
| 136 |
+
no_decay = ['bias', 'LayerNorm.weight']
|
| 137 |
+
optimizer_grouped_parameters = [
|
| 138 |
+
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
| 139 |
+
'weight_decay': args.weight_decay},
|
| 140 |
+
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
| 141 |
+
]
|
| 142 |
+
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
|
| 143 |
+
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
|
| 144 |
+
num_training_steps=args.max_steps)
|
| 145 |
+
if args.fp16:
|
| 146 |
+
try:
|
| 147 |
+
from apex import amp
|
| 148 |
+
except ImportError:
|
| 149 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
| 150 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
| 151 |
+
|
| 152 |
+
# multi-gpu training (should be after apex fp16 initialization)
|
| 153 |
+
if args.n_gpu > 1:
|
| 154 |
+
model = torch.nn.DataParallel(model)
|
| 155 |
+
|
| 156 |
+
# Distributed training (should be after apex fp16 initialization)
|
| 157 |
+
if args.local_rank != -1:
|
| 158 |
+
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
|
| 159 |
+
output_device=args.local_rank,
|
| 160 |
+
find_unused_parameters=True)
|
| 161 |
+
|
| 162 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 163 |
+
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
|
| 164 |
+
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
|
| 165 |
+
if os.path.exists(scheduler_last):
|
| 166 |
+
scheduler.load_state_dict(torch.load(scheduler_last))
|
| 167 |
+
if os.path.exists(optimizer_last):
|
| 168 |
+
optimizer.load_state_dict(torch.load(optimizer_last))
|
| 169 |
+
# Train!
|
| 170 |
+
logger.info("***** Running training *****")
|
| 171 |
+
logger.info(" Num examples = %d", len(train_dataset))
|
| 172 |
+
logger.info(" Num Epochs = %d", args.num_train_epochs)
|
| 173 |
+
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
|
| 174 |
+
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
|
| 175 |
+
args.train_batch_size * args.gradient_accumulation_steps * (
|
| 176 |
+
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
|
| 177 |
+
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
|
| 178 |
+
logger.info(" Total optimization steps = %d", args.max_steps)
|
| 179 |
+
|
| 180 |
+
global_step = args.start_step
|
| 181 |
+
tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
|
| 182 |
+
best_mrr=0.0
|
| 183 |
+
best_acc=0.0
|
| 184 |
+
# model.resize_token_embeddings(len(tokenizer))
|
| 185 |
+
model.zero_grad()
|
| 186 |
+
|
| 187 |
+
# Initialize early stopping parameters at the start of training
|
| 188 |
+
early_stopping_counter = 0
|
| 189 |
+
best_loss = None
|
| 190 |
+
|
| 191 |
+
for idx in range(args.start_epoch, int(args.num_train_epochs)):
|
| 192 |
+
bar = tqdm(train_dataloader,total=len(train_dataloader))
|
| 193 |
+
tr_num=0
|
| 194 |
+
train_loss=0
|
| 195 |
+
for step, batch in enumerate(bar):
|
| 196 |
+
inputs = batch[0].to(args.device)
|
| 197 |
+
labels=batch[1].to(args.device)
|
| 198 |
+
model.train()
|
| 199 |
+
loss,logits = model(inputs,labels)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
if args.n_gpu > 1:
|
| 203 |
+
loss = loss.mean() # mean() to average on multi-gpu parallel training
|
| 204 |
+
if args.gradient_accumulation_steps > 1:
|
| 205 |
+
loss = loss / args.gradient_accumulation_steps
|
| 206 |
+
|
| 207 |
+
if args.fp16:
|
| 208 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
| 209 |
+
scaled_loss.backward()
|
| 210 |
+
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
|
| 211 |
+
else:
|
| 212 |
+
loss.backward()
|
| 213 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
|
| 214 |
+
|
| 215 |
+
tr_loss += loss.item()
|
| 216 |
+
tr_num+=1
|
| 217 |
+
train_loss+=loss.item()
|
| 218 |
+
if avg_loss==0:
|
| 219 |
+
avg_loss=tr_loss
|
| 220 |
+
avg_loss=round(train_loss/tr_num,5)
|
| 221 |
+
bar.set_description("epoch {} loss {}".format(idx,avg_loss))
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
if (step + 1) % args.gradient_accumulation_steps == 0:
|
| 225 |
+
optimizer.step()
|
| 226 |
+
optimizer.zero_grad()
|
| 227 |
+
scheduler.step()
|
| 228 |
+
global_step += 1
|
| 229 |
+
output_flag=True
|
| 230 |
+
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
|
| 231 |
+
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
|
| 232 |
+
logging_loss = tr_loss
|
| 233 |
+
tr_nb=global_step
|
| 234 |
+
|
| 235 |
+
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
|
| 236 |
+
|
| 237 |
+
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
|
| 238 |
+
results = evaluate(args, model, tokenizer,eval_when_training=True)
|
| 239 |
+
for key, value in results.items():
|
| 240 |
+
logger.info(" %s = %s", key, round(value,4))
|
| 241 |
+
# Save model checkpoint
|
| 242 |
+
|
| 243 |
+
if results['eval_acc']>best_acc:
|
| 244 |
+
best_acc=results['eval_acc']
|
| 245 |
+
logger.info(" "+"*"*20)
|
| 246 |
+
logger.info(" Best acc:%s",round(best_acc,4))
|
| 247 |
+
logger.info(" "+"*"*20)
|
| 248 |
+
|
| 249 |
+
checkpoint_prefix = 'checkpoint-best-acc'
|
| 250 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 251 |
+
if not os.path.exists(output_dir):
|
| 252 |
+
os.makedirs(output_dir)
|
| 253 |
+
model_to_save = model.module if hasattr(model,'module') else model
|
| 254 |
+
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
|
| 255 |
+
torch.save(model_to_save.state_dict(), output_dir)
|
| 256 |
+
logger.info("Saving model checkpoint to %s", output_dir)
|
| 257 |
+
|
| 258 |
+
# 每一轮记录checkpoint
|
| 259 |
+
output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
|
| 260 |
+
if not os.path.exists(output_dir):
|
| 261 |
+
os.makedirs(output_dir)
|
| 262 |
+
model_to_save = model.module if hasattr(model, 'module') else model
|
| 263 |
+
ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
|
| 264 |
+
logger.info("Saving model checkpoint to %s", ckpt_output_path)
|
| 265 |
+
torch.save(model_to_save.state_dict(), ckpt_output_path)
|
| 266 |
+
|
| 267 |
+
# Calculate average loss for the epoch
|
| 268 |
+
avg_loss = train_loss / tr_num
|
| 269 |
+
|
| 270 |
+
# Check for early stopping condition
|
| 271 |
+
if args.early_stopping_patience is not None:
|
| 272 |
+
if best_loss is None or avg_loss < best_loss - args.min_loss_delta:
|
| 273 |
+
best_loss = avg_loss
|
| 274 |
+
early_stopping_counter = 0
|
| 275 |
+
else:
|
| 276 |
+
early_stopping_counter += 1
|
| 277 |
+
if early_stopping_counter >= args.early_stopping_patience:
|
| 278 |
+
logger.info("Early stopping")
|
| 279 |
+
break # Exit the loop early
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def evaluate(args, model, tokenizer,eval_when_training=False):
|
| 285 |
+
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
| 286 |
+
eval_output_dir = args.output_dir
|
| 287 |
+
|
| 288 |
+
eval_dataset = TextDataset(tokenizer, args,args.eval_data_file)
|
| 289 |
+
|
| 290 |
+
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
|
| 291 |
+
os.makedirs(eval_output_dir)
|
| 292 |
+
|
| 293 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 294 |
+
# Note that DistributedSampler samples randomly
|
| 295 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 296 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
|
| 297 |
+
|
| 298 |
+
# multi-gpu evaluate
|
| 299 |
+
if args.n_gpu > 1 and eval_when_training is False:
|
| 300 |
+
model = torch.nn.DataParallel(model)
|
| 301 |
+
|
| 302 |
+
# Eval!
|
| 303 |
+
logger.info("***** Running evaluation *****")
|
| 304 |
+
logger.info(" Num examples = %d", len(eval_dataset))
|
| 305 |
+
logger.info(" Batch size = %d", args.eval_batch_size)
|
| 306 |
+
eval_loss = 0.0
|
| 307 |
+
nb_eval_steps = 0
|
| 308 |
+
model.eval()
|
| 309 |
+
logits=[]
|
| 310 |
+
labels=[]
|
| 311 |
+
for batch in eval_dataloader:
|
| 312 |
+
inputs = batch[0].to(args.device)
|
| 313 |
+
label=batch[1].to(args.device)
|
| 314 |
+
with torch.no_grad():
|
| 315 |
+
lm_loss,logit = model(inputs,label)
|
| 316 |
+
eval_loss += lm_loss.mean().item()
|
| 317 |
+
logits.append(logit.cpu().numpy())
|
| 318 |
+
labels.append(label.cpu().numpy())
|
| 319 |
+
nb_eval_steps += 1
|
| 320 |
+
logits=np.concatenate(logits,0)
|
| 321 |
+
labels=np.concatenate(labels,0)
|
| 322 |
+
preds=logits[:,0]>0.5
|
| 323 |
+
eval_acc=np.mean(labels==preds)
|
| 324 |
+
eval_loss = eval_loss / nb_eval_steps
|
| 325 |
+
perplexity = torch.tensor(eval_loss)
|
| 326 |
+
|
| 327 |
+
result = {
|
| 328 |
+
"eval_loss": float(perplexity),
|
| 329 |
+
"eval_acc":round(eval_acc,4),
|
| 330 |
+
}
|
| 331 |
+
return result
|
| 332 |
+
|
| 333 |
+
def test(args, model, tokenizer):
|
| 334 |
+
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
| 335 |
+
eval_dataset = TextDataset(tokenizer, args,args.test_data_file)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 339 |
+
# Note that DistributedSampler samples randomly
|
| 340 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 341 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
|
| 342 |
+
|
| 343 |
+
# multi-gpu evaluate
|
| 344 |
+
if args.n_gpu > 1:
|
| 345 |
+
model = torch.nn.DataParallel(model)
|
| 346 |
+
|
| 347 |
+
# Eval!
|
| 348 |
+
logger.info("***** Running Test *****")
|
| 349 |
+
logger.info(" Num examples = %d", len(eval_dataset))
|
| 350 |
+
logger.info(" Batch size = %d", args.eval_batch_size)
|
| 351 |
+
eval_loss = 0.0
|
| 352 |
+
nb_eval_steps = 0
|
| 353 |
+
model.eval()
|
| 354 |
+
logits=[]
|
| 355 |
+
labels=[]
|
| 356 |
+
for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
|
| 357 |
+
inputs = batch[0].to(args.device)
|
| 358 |
+
label=batch[1].to(args.device)
|
| 359 |
+
with torch.no_grad():
|
| 360 |
+
logit = model(inputs)
|
| 361 |
+
logits.append(logit.cpu().numpy())
|
| 362 |
+
labels.append(label.cpu().numpy())
|
| 363 |
+
|
| 364 |
+
logits=np.concatenate(logits,0)
|
| 365 |
+
labels=np.concatenate(labels,0)
|
| 366 |
+
preds=logits[:,0]>0.5
|
| 367 |
+
with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f:
|
| 368 |
+
for example,pred in zip(eval_dataset.examples,preds):
|
| 369 |
+
if pred:
|
| 370 |
+
f.write(example.idx+'\t1\n')
|
| 371 |
+
else:
|
| 372 |
+
f.write(example.idx+'\t0\n')
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def main():
|
| 377 |
+
parser = argparse.ArgumentParser()
|
| 378 |
+
|
| 379 |
+
## Required parameters
|
| 380 |
+
parser.add_argument("--train_data_file", default=None, type=str, required=True,
|
| 381 |
+
help="The input training data file (a text file).")
|
| 382 |
+
parser.add_argument("--output_dir", default=None, type=str, required=True,
|
| 383 |
+
help="The output directory where the model predictions and checkpoints will be written.")
|
| 384 |
+
|
| 385 |
+
## Other parameters
|
| 386 |
+
parser.add_argument("--eval_data_file", default=None, type=str,
|
| 387 |
+
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
|
| 388 |
+
parser.add_argument("--test_data_file", default=None, type=str,
|
| 389 |
+
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
|
| 390 |
+
|
| 391 |
+
parser.add_argument("--model_type", default="bert", type=str,
|
| 392 |
+
help="The model architecture to be fine-tuned.")
|
| 393 |
+
parser.add_argument("--model_name_or_path", default=None, type=str,
|
| 394 |
+
help="The model checkpoint for weights initialization.")
|
| 395 |
+
|
| 396 |
+
parser.add_argument("--mlm", action='store_true',
|
| 397 |
+
help="Train with masked-language modeling loss instead of language modeling.")
|
| 398 |
+
parser.add_argument("--mlm_probability", type=float, default=0.15,
|
| 399 |
+
help="Ratio of tokens to mask for masked language modeling loss")
|
| 400 |
+
|
| 401 |
+
parser.add_argument("--config_name", default="", type=str,
|
| 402 |
+
help="Optional pretrained config name or path if not the same as model_name_or_path")
|
| 403 |
+
parser.add_argument("--tokenizer_name", default="", type=str,
|
| 404 |
+
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
|
| 405 |
+
parser.add_argument("--cache_dir", default="", type=str,
|
| 406 |
+
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
|
| 407 |
+
parser.add_argument("--block_size", default=-1, type=int,
|
| 408 |
+
help="Optional input sequence length after tokenization."
|
| 409 |
+
"The training dataset will be truncated in block of this size for training."
|
| 410 |
+
"Default to the model max input length for single sentence inputs (take into account special tokens).")
|
| 411 |
+
parser.add_argument("--do_train", action='store_true',
|
| 412 |
+
help="Whether to run training.")
|
| 413 |
+
parser.add_argument("--do_eval", action='store_true',
|
| 414 |
+
help="Whether to run eval on the dev set.")
|
| 415 |
+
parser.add_argument("--do_test", action='store_true',
|
| 416 |
+
help="Whether to run eval on the dev set.")
|
| 417 |
+
parser.add_argument("--evaluate_during_training", action='store_true',
|
| 418 |
+
help="Run evaluation during training at each logging step.")
|
| 419 |
+
parser.add_argument("--do_lower_case", action='store_true',
|
| 420 |
+
help="Set this flag if you are using an uncased model.")
|
| 421 |
+
|
| 422 |
+
parser.add_argument("--train_batch_size", default=4, type=int,
|
| 423 |
+
help="Batch size per GPU/CPU for training.")
|
| 424 |
+
parser.add_argument("--eval_batch_size", default=4, type=int,
|
| 425 |
+
help="Batch size per GPU/CPU for evaluation.")
|
| 426 |
+
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
|
| 427 |
+
help="Number of updates steps to accumulate before performing a backward/update pass.")
|
| 428 |
+
parser.add_argument("--learning_rate", default=5e-5, type=float,
|
| 429 |
+
help="The initial learning rate for Adam.")
|
| 430 |
+
parser.add_argument("--weight_decay", default=0.0, type=float,
|
| 431 |
+
help="Weight deay if we apply some.")
|
| 432 |
+
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
|
| 433 |
+
help="Epsilon for Adam optimizer.")
|
| 434 |
+
parser.add_argument("--max_grad_norm", default=1.0, type=float,
|
| 435 |
+
help="Max gradient norm.")
|
| 436 |
+
parser.add_argument("--num_train_epochs", default=1.0, type=float,
|
| 437 |
+
help="Total number of training epochs to perform.")
|
| 438 |
+
parser.add_argument("--max_steps", default=-1, type=int,
|
| 439 |
+
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
|
| 440 |
+
parser.add_argument("--warmup_steps", default=0, type=int,
|
| 441 |
+
help="Linear warmup over warmup_steps.")
|
| 442 |
+
|
| 443 |
+
parser.add_argument('--logging_steps', type=int, default=50,
|
| 444 |
+
help="Log every X updates steps.")
|
| 445 |
+
parser.add_argument('--save_steps', type=int, default=50,
|
| 446 |
+
help="Save checkpoint every X updates steps.")
|
| 447 |
+
parser.add_argument('--save_total_limit', type=int, default=None,
|
| 448 |
+
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
|
| 449 |
+
parser.add_argument("--eval_all_checkpoints", action='store_true',
|
| 450 |
+
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
|
| 451 |
+
parser.add_argument("--no_cuda", action='store_true',
|
| 452 |
+
help="Avoid using CUDA when available")
|
| 453 |
+
parser.add_argument('--overwrite_output_dir', action='store_true',
|
| 454 |
+
help="Overwrite the content of the output directory")
|
| 455 |
+
parser.add_argument('--overwrite_cache', action='store_true',
|
| 456 |
+
help="Overwrite the cached training and evaluation sets")
|
| 457 |
+
parser.add_argument('--seed', type=int, default=42,
|
| 458 |
+
help="random seed for initialization")
|
| 459 |
+
parser.add_argument('--epoch', type=int, default=42,
|
| 460 |
+
help="random seed for initialization")
|
| 461 |
+
parser.add_argument('--fp16', action='store_true',
|
| 462 |
+
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
|
| 463 |
+
parser.add_argument('--fp16_opt_level', type=str, default='O1',
|
| 464 |
+
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
|
| 465 |
+
"See details at https://nvidia.github.io/apex/amp.html")
|
| 466 |
+
parser.add_argument("--local_rank", type=int, default=-1,
|
| 467 |
+
help="For distributed training: local_rank")
|
| 468 |
+
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
|
| 469 |
+
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
|
| 470 |
+
|
| 471 |
+
# Add early stopping parameters and dropout probability parameters
|
| 472 |
+
parser.add_argument("--early_stopping_patience", type=int, default=None,
|
| 473 |
+
help="Number of epochs with no improvement after which training will be stopped.")
|
| 474 |
+
parser.add_argument("--min_loss_delta", type=float, default=0.001,
|
| 475 |
+
help="Minimum change in the loss required to qualify as an improvement.")
|
| 476 |
+
parser.add_argument('--dropout_probability', type=float, default=0, help='dropout probability')
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
args = parser.parse_args()
|
| 482 |
+
|
| 483 |
+
# Setup distant debugging if needed
|
| 484 |
+
if args.server_ip and args.server_port:
|
| 485 |
+
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
|
| 486 |
+
import ptvsd
|
| 487 |
+
print("Waiting for debugger attach")
|
| 488 |
+
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
|
| 489 |
+
ptvsd.wait_for_attach()
|
| 490 |
+
|
| 491 |
+
# Setup CUDA, GPU & distributed training
|
| 492 |
+
if args.local_rank == -1 or args.no_cuda:
|
| 493 |
+
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
|
| 494 |
+
args.n_gpu = torch.cuda.device_count()
|
| 495 |
+
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
|
| 496 |
+
torch.cuda.set_device(args.local_rank)
|
| 497 |
+
device = torch.device("cuda", args.local_rank)
|
| 498 |
+
torch.distributed.init_process_group(backend='nccl')
|
| 499 |
+
args.n_gpu = 1
|
| 500 |
+
args.device = device
|
| 501 |
+
args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
|
| 502 |
+
args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
|
| 503 |
+
# Setup logging
|
| 504 |
+
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
|
| 505 |
+
datefmt='%m/%d/%Y %H:%M:%S',
|
| 506 |
+
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
|
| 507 |
+
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
|
| 508 |
+
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
# Set seed
|
| 513 |
+
set_seed(args.seed)
|
| 514 |
+
|
| 515 |
+
# Load pretrained model and tokenizer
|
| 516 |
+
if args.local_rank not in [-1, 0]:
|
| 517 |
+
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
|
| 518 |
+
|
| 519 |
+
args.start_epoch = 0
|
| 520 |
+
args.start_step = 0
|
| 521 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 522 |
+
if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
|
| 523 |
+
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
|
| 524 |
+
args.config_name = os.path.join(checkpoint_last, 'config.json')
|
| 525 |
+
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
|
| 526 |
+
with open(idx_file, encoding='utf-8') as idxf:
|
| 527 |
+
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
|
| 528 |
+
|
| 529 |
+
step_file = os.path.join(checkpoint_last, 'step_file.txt')
|
| 530 |
+
if os.path.exists(step_file):
|
| 531 |
+
with open(step_file, encoding='utf-8') as stepf:
|
| 532 |
+
args.start_step = int(stepf.readlines()[0].strip())
|
| 533 |
+
|
| 534 |
+
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
|
| 535 |
+
|
| 536 |
+
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
|
| 537 |
+
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
|
| 538 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 539 |
+
config.num_labels=1
|
| 540 |
+
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
|
| 541 |
+
do_lower_case=args.do_lower_case,
|
| 542 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 543 |
+
if args.block_size <= 0:
|
| 544 |
+
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
|
| 545 |
+
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
|
| 546 |
+
if args.model_name_or_path:
|
| 547 |
+
model = model_class.from_pretrained(args.model_name_or_path,
|
| 548 |
+
from_tf=bool('.ckpt' in args.model_name_or_path),
|
| 549 |
+
config=config,
|
| 550 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 551 |
+
else:
|
| 552 |
+
model = model_class(config)
|
| 553 |
+
|
| 554 |
+
model=Model(model,config,tokenizer,args)
|
| 555 |
+
if args.local_rank == 0:
|
| 556 |
+
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
|
| 557 |
+
|
| 558 |
+
logger.info("Training/evaluation parameters %s", args)
|
| 559 |
+
|
| 560 |
+
# Training
|
| 561 |
+
if args.do_train:
|
| 562 |
+
if args.local_rank not in [-1, 0]:
|
| 563 |
+
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
|
| 564 |
+
|
| 565 |
+
train_dataset = TextDataset(tokenizer, args,args.train_data_file)
|
| 566 |
+
if args.local_rank == 0:
|
| 567 |
+
torch.distributed.barrier()
|
| 568 |
+
|
| 569 |
+
train(args, train_dataset, model, tokenizer)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
# Evaluation
|
| 574 |
+
results = {}
|
| 575 |
+
if args.do_eval and args.local_rank in [-1, 0]:
|
| 576 |
+
checkpoint_prefix = 'epoch_5/subject_model.pth'
|
| 577 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 578 |
+
model.load_state_dict(torch.load(output_dir))
|
| 579 |
+
model.to(args.device)
|
| 580 |
+
result=evaluate(args, model, tokenizer)
|
| 581 |
+
logger.info("***** Eval results *****")
|
| 582 |
+
for key in sorted(result.keys()):
|
| 583 |
+
logger.info(" %s = %s", key, str(round(result[key],4)))
|
| 584 |
+
|
| 585 |
+
if args.do_test and args.local_rank in [-1, 0]:
|
| 586 |
+
checkpoint_prefix = 'epoch_5/subject_model.pth'
|
| 587 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 588 |
+
model.load_state_dict(torch.load(output_dir))
|
| 589 |
+
model.to(args.device)
|
| 590 |
+
test(args, model, tokenizer)
|
| 591 |
+
|
| 592 |
+
return results
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
if __name__ == "__main__":
|
| 596 |
+
main()
|
| 597 |
+
|
| 598 |
+
|
Code-Code/Defect-detection/code/train.sh
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0,1 python run.py \
|
| 2 |
+
--output_dir=../model \
|
| 3 |
+
--model_type=roberta \
|
| 4 |
+
--tokenizer_name=microsoft/codebert-base \
|
| 5 |
+
--model_name_or_path=microsoft/codebert-base \
|
| 6 |
+
--do_train \
|
| 7 |
+
--train_data_file=../dataset/train.jsonl \
|
| 8 |
+
--eval_data_file=../dataset/valid.jsonl \
|
| 9 |
+
--test_data_file=../dataset/test.jsonl \
|
| 10 |
+
--epoch 5 \
|
| 11 |
+
--block_size 400 \
|
| 12 |
+
--train_batch_size 32 \
|
| 13 |
+
--eval_batch_size 64 \
|
| 14 |
+
--learning_rate 2e-5 \
|
| 15 |
+
--max_grad_norm 1.0 \
|
| 16 |
+
--evaluate_during_training \
|
| 17 |
+
--seed 123456
|
Code-Code/Defect-detection/dataset.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fafb4004eda1a4e1d4392b002e3de6f542d2a2b6701ec9758f25791bc9da49d6
|
| 3 |
+
size 14533467
|
Code-Code/Defect-detection/model/epoch_1/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b22544bd55c811a58a09ef3354f9ca0e5492967428d6bd04213e5e23f93054c5
|
| 3 |
+
size 498673198
|
Code-Code/Defect-detection/model/epoch_2/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b27ecb25228c2630cb2449cccb5c6bcfc3b213c5cf54d4d7f92205510bbc1356
|
| 3 |
+
size 498673198
|
Code-Code/Defect-detection/model/epoch_3/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e4187f180a07f788680cae80879e0e9db389ddda30b4842a115ea0fbc97ad5bb
|
| 3 |
+
size 498673198
|
Code-Code/Defect-detection/model/epoch_4/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d5757d63d13eab8e1dbc45754ceb2a2b325bb3336fba6fc6005c14cea7d5d5ad
|
| 3 |
+
size 498673198
|
Code-Code/Defect-detection/model/epoch_5/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d2791e6e7dea22221eaaed6a9bf29e4bd5ef9d62ef40f4547d8bef9ed994508f
|
| 3 |
+
size 498673198
|
Code-Code/code-refinement/code/bleu.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 Google Inc. All Rights Reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# ==============================================================================
|
| 15 |
+
|
| 16 |
+
"""Python implementation of BLEU and smooth-BLEU.
|
| 17 |
+
|
| 18 |
+
This module provides a Python implementation of BLEU and smooth-BLEU.
|
| 19 |
+
Smooth BLEU is computed following the method outlined in the paper:
|
| 20 |
+
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
|
| 21 |
+
evaluation metrics for machine translation. COLING 2004.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
import collections
|
| 25 |
+
import math
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _get_ngrams(segment, max_order):
|
| 29 |
+
"""Extracts all n-grams upto a given maximum order from an input segment.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
segment: text segment from which n-grams will be extracted.
|
| 33 |
+
max_order: maximum length in tokens of the n-grams returned by this
|
| 34 |
+
methods.
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
The Counter containing all n-grams upto max_order in segment
|
| 38 |
+
with a count of how many times each n-gram occurred.
|
| 39 |
+
"""
|
| 40 |
+
ngram_counts = collections.Counter()
|
| 41 |
+
for order in range(1, max_order + 1):
|
| 42 |
+
for i in range(0, len(segment) - order + 1):
|
| 43 |
+
ngram = tuple(segment[i:i+order])
|
| 44 |
+
ngram_counts[ngram] += 1
|
| 45 |
+
return ngram_counts
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
|
| 49 |
+
smooth=False):
|
| 50 |
+
"""Computes BLEU score of translated segments against one or more references.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
reference_corpus: list of lists of references for each translation. Each
|
| 54 |
+
reference should be tokenized into a list of tokens.
|
| 55 |
+
translation_corpus: list of translations to score. Each translation
|
| 56 |
+
should be tokenized into a list of tokens.
|
| 57 |
+
max_order: Maximum n-gram order to use when computing BLEU score.
|
| 58 |
+
smooth: Whether or not to apply Lin et al. 2004 smoothing.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
|
| 62 |
+
precisions and brevity penalty.
|
| 63 |
+
"""
|
| 64 |
+
matches_by_order = [0] * max_order
|
| 65 |
+
possible_matches_by_order = [0] * max_order
|
| 66 |
+
reference_length = 0
|
| 67 |
+
translation_length = 0
|
| 68 |
+
for (references, translation) in zip(reference_corpus,
|
| 69 |
+
translation_corpus):
|
| 70 |
+
reference_length += min(len(r) for r in references)
|
| 71 |
+
translation_length += len(translation)
|
| 72 |
+
|
| 73 |
+
merged_ref_ngram_counts = collections.Counter()
|
| 74 |
+
for reference in references:
|
| 75 |
+
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
|
| 76 |
+
translation_ngram_counts = _get_ngrams(translation, max_order)
|
| 77 |
+
overlap = translation_ngram_counts & merged_ref_ngram_counts
|
| 78 |
+
for ngram in overlap:
|
| 79 |
+
matches_by_order[len(ngram)-1] += overlap[ngram]
|
| 80 |
+
for order in range(1, max_order+1):
|
| 81 |
+
possible_matches = len(translation) - order + 1
|
| 82 |
+
if possible_matches > 0:
|
| 83 |
+
possible_matches_by_order[order-1] += possible_matches
|
| 84 |
+
|
| 85 |
+
precisions = [0] * max_order
|
| 86 |
+
for i in range(0, max_order):
|
| 87 |
+
if smooth:
|
| 88 |
+
precisions[i] = ((matches_by_order[i] + 1.) /
|
| 89 |
+
(possible_matches_by_order[i] + 1.))
|
| 90 |
+
else:
|
| 91 |
+
if possible_matches_by_order[i] > 0:
|
| 92 |
+
precisions[i] = (float(matches_by_order[i]) /
|
| 93 |
+
possible_matches_by_order[i])
|
| 94 |
+
else:
|
| 95 |
+
precisions[i] = 0.0
|
| 96 |
+
|
| 97 |
+
if min(precisions) > 0:
|
| 98 |
+
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
|
| 99 |
+
geo_mean = math.exp(p_log_sum)
|
| 100 |
+
else:
|
| 101 |
+
geo_mean = 0
|
| 102 |
+
|
| 103 |
+
ratio = float(translation_length) / reference_length
|
| 104 |
+
|
| 105 |
+
if ratio > 1.0:
|
| 106 |
+
bp = 1.
|
| 107 |
+
else:
|
| 108 |
+
bp = math.exp(1 - 1. / ratio)
|
| 109 |
+
|
| 110 |
+
bleu = geo_mean * bp
|
| 111 |
+
|
| 112 |
+
return (bleu, precisions, bp, ratio, translation_length, reference_length)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _bleu(ref_file, trans_file, subword_option=None):
|
| 116 |
+
max_order = 4
|
| 117 |
+
smooth = True
|
| 118 |
+
ref_files = [ref_file]
|
| 119 |
+
reference_text = []
|
| 120 |
+
for reference_filename in ref_files:
|
| 121 |
+
with open(reference_filename) as fh:
|
| 122 |
+
reference_text.append(fh.readlines())
|
| 123 |
+
per_segment_references = []
|
| 124 |
+
for references in zip(*reference_text):
|
| 125 |
+
reference_list = []
|
| 126 |
+
for reference in references:
|
| 127 |
+
reference_list.append(reference.strip().split())
|
| 128 |
+
per_segment_references.append(reference_list)
|
| 129 |
+
translations = []
|
| 130 |
+
with open(trans_file) as fh:
|
| 131 |
+
for line in fh:
|
| 132 |
+
translations.append(line.strip().split())
|
| 133 |
+
bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
|
| 134 |
+
return round(100 * bleu_score,2)
|
Code-Code/code-refinement/code/eval.sh
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pretrained_model=microsoft/codebert-base
|
| 2 |
+
output_dir=../model
|
| 3 |
+
data_size=small
|
| 4 |
+
|
| 5 |
+
CUDA_VISIBLE_DEVICES=1 python run.py \
|
| 6 |
+
--do_test \
|
| 7 |
+
--model_type roberta \
|
| 8 |
+
--model_name_or_path $pretrained_model \
|
| 9 |
+
--config_name roberta-base \
|
| 10 |
+
--tokenizer_name roberta-base \
|
| 11 |
+
--load_model_path $output_dir/epoch_34/subject_model.pth \
|
| 12 |
+
--dev_filename ../data/$data_size/valid.buggy-fixed.buggy,../data/$data_size/valid.buggy-fixed.fixed \
|
| 13 |
+
--output_dir $output_dir \
|
| 14 |
+
--max_source_length 256 \
|
| 15 |
+
--max_target_length 256 \
|
| 16 |
+
--beam_size 5 \
|
| 17 |
+
--eval_batch_size 16
|
Code-Code/code-refinement/code/evaluate.sh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
python evaluator.py \
|
| 2 |
+
-ref ../data/small/valid.buggy-fixed.fixed \
|
| 3 |
+
-pre ../model/test_0.output
|