akhauriyash commited on
Commit
f4e6654
·
verified ·
1 Parent(s): 0baa57c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +62 -4
README.md CHANGED
@@ -40,12 +40,70 @@ This dataset has 7502559 rows:
40
  ## How to load with 🤗 Datasets
41
  ```python
42
  from datasets import load_dataset
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
- # After you upload this folder to a dataset repo, e.g. your-username/Code-Regression
45
- ds = load_dataset("your-username/Code-Regression")
46
 
47
- # Or from a local clone:
48
- # ds = load_dataset("json", data_files="Code-Regression/data.jsonl", split="train")
 
 
 
49
  ```
50
 
51
  # Credits
 
40
  ## How to load with 🤗 Datasets
41
  ```python
42
  from datasets import load_dataset
43
+ ds = load_dataset("akhauriyash/Code-Regression")
44
+ ```
45
+
46
+ ## Testing Code-Regression with a basic Gemma RLM model
47
+
48
+ Use the code below as reference for evaluating a basic RegressLM model ( better, more models to come! :) )
49
+
50
+ ```
51
+ import torch
52
+ import numpy as np
53
+ from datasets import load_dataset
54
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
55
+ from scipy.stats import spearmanr
56
+ from tqdm import tqdm
57
+
58
+ REPO_ID = "akhauriyash/RegressLM-gemma-s-RLM-table3"
59
+ DATASET = "akhauriyash/Code-Regression"
60
+ dataset = load_dataset(DATASET, split="train")
61
+ tok = AutoTokenizer.from_pretrained(REPO_ID, trust_remote_code=True)
62
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
63
+ model = AutoModelForSeq2SeqLM.from_pretrained(REPO_ID, trust_remote_code=True).to(device).eval()
64
+ MAX_ITEMS, BATCH_SIZE, spaces, results = 512, 16, ["KBSS", "CDSS", "APPS"], {}
65
+ language = None # Specify language for CDSS, e.g. "python"
66
+ n_out_tokens = getattr(model.config, "num_tokens_per_obj", 8) * getattr(model.config, "max_num_objs", 1)
67
+ n_out_tokens = model.config.num_tokens_per_obj * model.config.max_num_objs
68
+
69
+ for SPACE in spaces:
70
+ inputs, targets = [], []
71
+ for row in tqdm(dataset, desc=f"Processing {SPACE} till {MAX_ITEMS} items"):
72
+ if row.get("space") == SPACE and "input" in row and "target" in row:
73
+ try:
74
+ lang = eval(row['metadata'])['language'] if SPACE == "CDSS" else None
75
+ if SPACE != "CDSS" or language is None or lang == language:
76
+ targets.append(float(row["target"]))
77
+ if SPACE == "CDSS":
78
+ inputs.append(f"# {SPACE}\n# Language: {lang}\n{row['input']}")
79
+ else:
80
+ inputs.append(f"{SPACE}\n{row['input']}")
81
+ except: continue
82
+ if len(inputs) >= MAX_ITEMS: break
83
+ preds = []
84
+ for i in tqdm(range(0, len(inputs), BATCH_SIZE)):
85
+ enc = tok(inputs[i:i+BATCH_SIZE], return_tensors="pt", truncation=True, padding=True, max_length=2048).to(device)
86
+ batch_preds = []
87
+ for _ in range(8):
88
+ out = model.generate(**enc, max_new_tokens=n_out_tokens, min_new_tokens=n_out_tokens, do_sample=True, top_p=0.95, temperature=1.0)
89
+ decoded = [tok.token_ids_to_floats(seq.tolist()) for seq in out]
90
+ decoded = [d[0] if isinstance(d, list) and d else float("nan") for d in decoded]
91
+ batch_preds.append(decoded)
92
+ preds.extend(torch.tensor(batch_preds).median(dim=0).values.tolist())
93
+ spear, _ = spearmanr(np.array(targets), np.array(preds))
94
+ results[SPACE] = spear; print(f"Spearman ρ for {SPACE}: {spear:.3f}")
95
+
96
+ print("Spearman ρ | KBSS | CDSS | APPS")
97
+ print(f"{REPO_ID} | " + " | ".join(f"{results[s]:.3f}" for s in spaces))
98
+
99
+ ```
100
 
 
 
101
 
102
+ We got the following results when testing on a random subset of the Code-Regression dataset.
103
+
104
+ ```
105
+ Model ID | KBSS | CDSS | APPS
106
+ akhauriyash/RegressLM-gemma-s-RLM-table3 | 0.527 | 0.787 | 0.926
107
  ```
108
 
109
  # Credits