Muennighoff commited on
Commit
5c627fa
1 Parent(s): 1580303
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. inference.py +12 -2
  2. inference.sh +1 -1
  3. inference_javascript.sh +25 -0
  4. inference_python.py +144 -0
  5. inference_python.sh +25 -0
  6. javascript/tasky_commits_javascript_0_32909.jsonl +0 -0
  7. javascript/tasky_commits_javascript_1053088_1085997.jsonl +0 -0
  8. javascript/tasky_commits_javascript_1118906_1151815.jsonl +0 -0
  9. javascript/tasky_commits_javascript_1184724_1217633.jsonl +0 -0
  10. javascript/tasky_commits_javascript_1283451_1316360.jsonl +0 -0
  11. javascript/tasky_commits_javascript_1316360_1349269.jsonl +0 -0
  12. javascript/tasky_commits_javascript_131636_164545.jsonl +0 -0
  13. javascript/tasky_commits_javascript_1415087_1447996.jsonl +0 -0
  14. javascript/tasky_commits_javascript_1447996_1480905.jsonl +0 -0
  15. javascript/tasky_commits_javascript_1513814_1546723.jsonl +0 -0
  16. javascript/tasky_commits_javascript_1579632_1612541.jsonl +0 -0
  17. javascript/tasky_commits_javascript_164545_197454.jsonl +0 -0
  18. javascript/tasky_commits_javascript_1678359_1711268.jsonl +0 -0
  19. javascript/tasky_commits_javascript_1711268_1744177.jsonl +0 -0
  20. javascript/tasky_commits_javascript_1809995_1842904.jsonl +0 -0
  21. javascript/tasky_commits_javascript_1842904_1875813.jsonl +0 -0
  22. javascript/tasky_commits_javascript_1875813_1908722.jsonl +0 -0
  23. javascript/tasky_commits_javascript_1941631_1974540.jsonl +0 -0
  24. javascript/tasky_commits_javascript_1974540_2007449.jsonl +0 -0
  25. javascript/tasky_commits_javascript_2040358_2073267.jsonl +0 -0
  26. javascript/tasky_commits_javascript_2073267_2106176.jsonl +0 -0
  27. javascript/tasky_commits_javascript_2106176_2139085.jsonl +0 -0
  28. javascript/tasky_commits_javascript_2171994_2204903.jsonl +0 -0
  29. javascript/tasky_commits_javascript_2237812_2270721.jsonl +0 -0
  30. javascript/tasky_commits_javascript_2270721_2303630.jsonl +0 -0
  31. javascript/tasky_commits_javascript_230363_263272.jsonl +0 -0
  32. javascript/tasky_commits_javascript_2402357_2435266.jsonl +0 -0
  33. javascript/tasky_commits_javascript_2501084_2533993.jsonl +0 -0
  34. javascript/tasky_commits_javascript_2599811_2632720.jsonl +0 -0
  35. javascript/tasky_commits_javascript_2632720_2665629.jsonl +0 -0
  36. javascript/tasky_commits_javascript_263272_296181.jsonl +0 -0
  37. javascript/tasky_commits_javascript_2665629_2698538.jsonl +0 -0
  38. javascript/tasky_commits_javascript_2698538_2731447.jsonl +0 -0
  39. javascript/tasky_commits_javascript_2764356_2797265.jsonl +0 -0
  40. javascript/tasky_commits_javascript_2830174_2863083.jsonl +0 -0
  41. javascript/tasky_commits_javascript_2863083_2895992.jsonl +0 -0
  42. javascript/tasky_commits_javascript_2895992_2928901.jsonl +0 -0
  43. javascript/tasky_commits_javascript_2928901_2961810.jsonl +0 -0
  44. javascript/tasky_commits_javascript_2961810_2994719.jsonl +0 -0
  45. javascript/tasky_commits_javascript_296181_329090.jsonl +0 -0
  46. javascript/tasky_commits_javascript_2994719_3027628.jsonl +0 -0
  47. javascript/tasky_commits_javascript_3027628_3060537.jsonl +0 -0
  48. javascript/tasky_commits_javascript_3126355_3159264.jsonl +0 -0
  49. javascript/tasky_commits_javascript_3192173_3225082.jsonl +0 -0
  50. javascript/tasky_commits_javascript_3225082_3257991.jsonl +0 -0
inference.py CHANGED
@@ -28,7 +28,7 @@ def parse_args():
28
  required=True,
29
  help="Ending file number to download. Valid values: 0 - 1023",
30
  )
31
- parser.add_argument("--batch_size", type=int, default=64, help="Batch size")
32
  parser.add_argument(
33
  "--model_name",
34
  type=str,
@@ -98,6 +98,11 @@ def batch_inference(data, batch_size=32):
98
  if __name__ == "__main__":
99
  args = parse_args()
100
 
 
 
 
 
 
101
  tokenizer = AutoTokenizer.from_pretrained(args.model_name)
102
  model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
103
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
@@ -106,8 +111,13 @@ if __name__ == "__main__":
106
 
107
  path = "javascript_messages.jsonl"
108
  ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
109
- ds = ds[range(args.start, args.end)]
110
  df = pd.DataFrame(ds, index=None)
 
 
 
 
 
111
  texts = df["message"].to_list()
112
  commits = df["commit"].to_list()
113
  preds = batch_inference(texts, batch_size=args.batch_size)
 
28
  required=True,
29
  help="Ending file number to download. Valid values: 0 - 1023",
30
  )
31
+ parser.add_argument("--batch_size", type=int, default=32, help="Batch size")
32
  parser.add_argument(
33
  "--model_name",
34
  type=str,
 
98
  if __name__ == "__main__":
99
  args = parse_args()
100
 
101
+ tasky_commits_path = f"tasky_commits_javascript_{args.start}_{args.end}.jsonl"
102
+ if os.path.exists(f"javascript/{tasky_commits_path}"):
103
+ print("Exists:", tasky_commits_path)
104
+ exit()
105
+
106
  tokenizer = AutoTokenizer.from_pretrained(args.model_name)
107
  model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
108
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
 
111
 
112
  path = "javascript_messages.jsonl"
113
  ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
114
+ ds = ds[range(args.start, min(args.end, len(ds)))]
115
  df = pd.DataFrame(ds, index=None)
116
+ #tasky_commits_path = f"tasky_commits_javascript_{args.start}_{args.end}.jsonl"
117
+ #if os.path.exists(f"javascript/{tasky_commits_path}"):
118
+ # print("Exists:", tasky_commits_path)
119
+ # exit()
120
+
121
  texts = df["message"].to_list()
122
  commits = df["commit"].to_list()
123
  preds = batch_inference(texts, batch_size=args.batch_size)
inference.sh CHANGED
@@ -2,7 +2,7 @@
2
  #SBATCH --job-name=inference
3
  #SBATCH --nodes=1
4
  #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
- #SBATCH --cpus-per-task=8 # number of cores per tasks
6
  #SBATCH --hint=nomultithread # we get physical cores not logical
7
  #SBATCH --gres=gpu:1 # number of gpus
8
  #SBATCH --time 12:00:00 # maximum execution time (HH:MM:SS)
 
2
  #SBATCH --job-name=inference
3
  #SBATCH --nodes=1
4
  #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=24 # number of cores per tasks
6
  #SBATCH --hint=nomultithread # we get physical cores not logical
7
  #SBATCH --gres=gpu:1 # number of gpus
8
  #SBATCH --time 12:00:00 # maximum execution time (HH:MM:SS)
inference_javascript.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=inference
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=24 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:1 # number of gpus
8
+ #SBATCH --time 12:00:00 # maximum execution time (HH:MM:SS)
9
+ #SBATCH --output=%x-%j.out # output file name
10
+ #SBATCH --account=ajs@v100
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ echo "START TIME: $(date)"
16
+
17
+ NUM_SAMPLES=4766203
18
+ NUM_SAMPLES_PER_JOB=32909
19
+ #NUM_SAMPLES_PER_JOB=128
20
+
21
+ TASK_START=$((($SLURM_ARRAY_TASK_ID) * $NUM_SAMPLES_PER_JOB))
22
+ TASK_END=$(($TASK_START + $NUM_SAMPLES_PER_JOB))
23
+
24
+ echo "Starting task $SLURM_ARRAY_TASK_ID with start $TASK_START and end $TASK_END"
25
+ python inference.py --start $TASK_START --end $TASK_END
inference_python.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !pip install -q transformers datasets sentencepiece
2
+ import argparse
3
+ import gc
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+ import pandas as pd
9
+ import torch
10
+ from tqdm import tqdm
11
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
12
+
13
+ TOTAL_NUM_FILES_C4_TRAIN = 1024
14
+
15
+
16
+ def parse_args():
17
+ parser = argparse.ArgumentParser()
18
+
19
+ parser.add_argument(
20
+ "--start",
21
+ type=int,
22
+ required=True,
23
+ help="Starting file number to download. Valid values: 0 - 1023",
24
+ )
25
+ parser.add_argument(
26
+ "--end",
27
+ type=int,
28
+ required=True,
29
+ help="Ending file number to download. Valid values: 0 - 1023",
30
+ )
31
+ parser.add_argument("--batch_size", type=int, default=32, help="Batch size")
32
+ parser.add_argument(
33
+ "--model_name",
34
+ type=str,
35
+ default="taskydata/deberta-v3-base_10xp3nirstbbflanseuni_10xc4",
36
+ help="Model name",
37
+ )
38
+ parser.add_argument(
39
+ "--local_cache_location",
40
+ type=str,
41
+ default="c4_download",
42
+ help="local cache location from where the dataset will be loaded",
43
+ )
44
+ parser.add_argument(
45
+ "--use_local_cache_location",
46
+ type=bool,
47
+ default=True,
48
+ help="Set True if you want to load the dataset from local cache.",
49
+ )
50
+ parser.add_argument(
51
+ "--clear_dataset_cache",
52
+ type=bool,
53
+ default=False,
54
+ help="Set True if you want to delete the dataset files from the cache after inference.",
55
+ )
56
+ parser.add_argument(
57
+ "--release_memory",
58
+ type=bool,
59
+ default=True,
60
+ help="Set True if you want to release the memory of used variables.",
61
+ )
62
+
63
+ args = parser.parse_args()
64
+ return args
65
+
66
+
67
+ def chunks(l, n):
68
+ for i in range(0, len(l), n):
69
+ yield l[i : i + n]
70
+
71
+
72
+ def batch_tokenize(data, batch_size):
73
+ batches = list(chunks(data, batch_size))
74
+ tokenized_batches = []
75
+ for batch in batches:
76
+ # max_length will automatically be set to the max length of the model (512 for deberta)
77
+ tensor = tokenizer(
78
+ batch,
79
+ return_tensors="pt",
80
+ padding="max_length",
81
+ truncation=True,
82
+ max_length=512,
83
+ )
84
+ tokenized_batches.append(tensor)
85
+ return tokenized_batches, batches
86
+
87
+
88
+ def batch_inference(data, batch_size=32):
89
+ preds = []
90
+ tokenized_batches, batches = batch_tokenize(data, batch_size)
91
+ for i in tqdm(range(len(batches))):
92
+ with torch.no_grad():
93
+ logits = model(**tokenized_batches[i].to(device)).logits.cpu()
94
+ preds.extend(logits)
95
+ return preds
96
+
97
+
98
+ if __name__ == "__main__":
99
+ args = parse_args()
100
+
101
+ tasky_commits_path = f"tasky_commits_python_{args.start}_{args.end}.jsonl"
102
+ if os.path.exists(f"python/{tasky_commits_path}"):
103
+ print("Exists:", tasky_commits_path)
104
+ exit()
105
+
106
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name)
107
+ model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
108
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
109
+ model.to(device)
110
+ model.eval()
111
+
112
+ path = "python_messages.jsonl"
113
+ ds = datasets.load_dataset("json", data_files=[path], ignore_verifications=True)["train"]
114
+ ds = ds[range(args.start, min(args.end, len(ds)))]
115
+ df = pd.DataFrame(ds, index=None)
116
+
117
+ texts = df["message"].to_list()
118
+ commits = df["commit"].to_list()
119
+ preds = batch_inference(texts, batch_size=args.batch_size)
120
+
121
+ assert len(preds) == len(texts)
122
+
123
+ # Write two jsonl files:
124
+ # 1) Probas for all of C4
125
+ # 2) Probas + texts for samples predicted as tasky
126
+ tasky_commits_path = f"tasky_commits_python_{args.start}_{args.end}.jsonl"
127
+
128
+ with open(tasky_commits_path, "w") as f:
129
+ for i in range(len(preds)):
130
+ predicted_class_id = preds[i].argmax().item()
131
+ pred = model.config.id2label[predicted_class_id]
132
+ tasky_proba = torch.softmax(preds[i], dim=-1)[-1].item()
133
+
134
+ f.write(
135
+ json.dumps(
136
+ {
137
+ "commit": commits[i],
138
+ "message": texts[i],
139
+ "proba": tasky_proba,
140
+ }
141
+ )
142
+ + "\n"
143
+ )
144
+
inference_python.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=inference
3
+ #SBATCH --nodes=1
4
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
5
+ #SBATCH --cpus-per-task=24 # number of cores per tasks
6
+ #SBATCH --hint=nomultithread # we get physical cores not logical
7
+ #SBATCH --gres=gpu:1 # number of gpus
8
+ #SBATCH --time 12:00:00 # maximum execution time (HH:MM:SS)
9
+ #SBATCH --output=%x-%j.out # output file name
10
+ #SBATCH --account=ajs@v100
11
+
12
+ set -x -e
13
+
14
+ source $six_ALL_CCFRWORK/start-tr13f-6B3-ml-t0
15
+ echo "START TIME: $(date)"
16
+
17
+ NUM_SAMPLES=5821648
18
+ NUM_SAMPLES_PER_JOB=32909
19
+ #NUM_SAMPLES_PER_JOB=128
20
+
21
+ TASK_START=$((($SLURM_ARRAY_TASK_ID) * $NUM_SAMPLES_PER_JOB))
22
+ TASK_END=$(($TASK_START + $NUM_SAMPLES_PER_JOB))
23
+
24
+ echo "Starting task $SLURM_ARRAY_TASK_ID with start $TASK_START and end $TASK_END"
25
+ python inference_python.py --start $TASK_START --end $TASK_END
javascript/tasky_commits_javascript_0_32909.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1053088_1085997.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1118906_1151815.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1184724_1217633.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1283451_1316360.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1316360_1349269.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_131636_164545.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1415087_1447996.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1447996_1480905.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1513814_1546723.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1579632_1612541.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_164545_197454.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1678359_1711268.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1711268_1744177.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1809995_1842904.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1842904_1875813.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1875813_1908722.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1941631_1974540.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_1974540_2007449.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2040358_2073267.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2073267_2106176.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2106176_2139085.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2171994_2204903.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2237812_2270721.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2270721_2303630.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_230363_263272.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2402357_2435266.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2501084_2533993.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2599811_2632720.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2632720_2665629.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_263272_296181.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2665629_2698538.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2698538_2731447.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2764356_2797265.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2830174_2863083.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2863083_2895992.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2895992_2928901.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2928901_2961810.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2961810_2994719.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_296181_329090.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_2994719_3027628.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_3027628_3060537.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_3126355_3159264.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_3192173_3225082.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
javascript/tasky_commits_javascript_3225082_3257991.jsonl ADDED
The diff for this file is too large to render. See raw diff