Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
n<1K
ArXiv:
Tags:
License:
asahi417 commited on
Commit
b016a6f
1 Parent(s): 64505a7
experiments/get_qualitative_table.py CHANGED
@@ -5,9 +5,9 @@ from datasets import load_dataset
5
  pd.set_option('display.max_rows', None)
6
  pd.set_option('display.max_columns', None)
7
 
8
- data_valid = load_dataset("cardiffnlp/relentless", split="validation")
9
- lc_valid = pd.read_csv("results_validation/lm_lc/lm.csv", index_col=0)
10
- qa_valid = pd.read_csv("results_validation/lm_qa/lm.csv", index_col=0)
11
 
12
  data_test = load_dataset("cardiffnlp/relentless", split="test")
13
  lc = pd.read_csv("results/lm_lc/lm.csv", index_col=0)
@@ -44,7 +44,7 @@ for prompt in ['qa', 'lc']:
44
  top = scores[int(len(scores) * p / 100)]
45
  bottom = scores[-int(len(scores) * p / 100)]
46
 
47
- with open(f"results_validation/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
48
  negative_ppl_valid = [json.loads(x)['perplexity'] * -1 for x in f.read().split("\n") if len(x) > 0]
49
  _d = [x for x in data_valid if x['relation_type'] == d['relation_type']][0]
50
  scores_val = _d['scores_mean']
@@ -52,8 +52,15 @@ for prompt in ['qa', 'lc']:
52
  false_bottom = ", ".join([":".join(_d['pairs'][n]) for n, (s, p) in enumerate(zip(scores_val, negative_ppl_valid)) if s >= top and p <= bottom_pred])
53
 
54
  table.append({
55
- "prompt": prompt, "model": target[i], "relation": pretty_name[d['relation_type']], "top": false_top, "bottom": false_bottom
56
  })
57
 
58
  table = pd.DataFrame(table)
59
- table.to_csv("results_validation/qualitative.csv", index=False)
 
 
 
 
 
 
 
 
5
  pd.set_option('display.max_rows', None)
6
  pd.set_option('display.max_columns', None)
7
 
8
+ data_valid = load_dataset("cardiffnlp/relentless", split="test")
9
+ lc_valid = pd.read_csv("results/lm_lc/lm.csv", index_col=0)
10
+ qa_valid = pd.read_csv("results/lm_qa/lm.csv", index_col=0)
11
 
12
  data_test = load_dataset("cardiffnlp/relentless", split="test")
13
  lc = pd.read_csv("results/lm_lc/lm.csv", index_col=0)
 
44
  top = scores[int(len(scores) * p / 100)]
45
  bottom = scores[-int(len(scores) * p / 100)]
46
 
47
+ with open(f"results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
48
  negative_ppl_valid = [json.loads(x)['perplexity'] * -1 for x in f.read().split("\n") if len(x) > 0]
49
  _d = [x for x in data_valid if x['relation_type'] == d['relation_type']][0]
50
  scores_val = _d['scores_mean']
 
52
  false_bottom = ", ".join([":".join(_d['pairs'][n]) for n, (s, p) in enumerate(zip(scores_val, negative_ppl_valid)) if s >= top and p <= bottom_pred])
53
 
54
  table.append({
55
+ "model": target[i], "relation": pretty_name[d['relation_type']], "top": false_top, "bottom": false_bottom
56
  })
57
 
58
  table = pd.DataFrame(table)
59
+ table.to_csv("results/qualitative.csv", index=False)
60
+ with pd.option_context("max_colwidth", 1000):
61
+ _table = table[['model', 'relation', 'top']]
62
+ _table = _table[_table['top'].str.len() > 0]
63
+ print(_table.to_latex(index=False, escape=False))
64
+ _table = table[['model', 'relation', 'bottom']]
65
+ _table = _table[_table['bottom'].str.len() > 0]
66
+ print(_table.to_latex(index=False, escape=False))
experiments/results/qualitative.csv ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model,relation,top,bottom
2
+ Flan-T5 extsubscript{XXL},Rival,,Isaac Newton:Gottfried Leibniz
3
+ Flan-T5 extsubscript{XXL},Ally,"Armenia:Azerbaijan, Liam Gallagher:Noel Gallagher, Russia:Georgia","China:North Korea, Ron Weasley:Neville Longbottom, Windows:Xbox"
4
+ Flan-T5 extsubscript{XXL},Inf,"Harry Potter:Wizard of Oz, heavy metal:punk music, Luke Bryan:Hank Williams, James Brown:Michael Jackson","Prince Harry:Monarchy, trending music:TikTok, Coca-Cola:Pepsi, Apple Music:Spotify, Pepsi:Coca-Cola, Hoover:Dyson"
5
+ Flan-T5 extsubscript{XXL},Know,,"Corsica:Napoleon Bonaparte, France:cheese"
6
+ Flan-T5 extsubscript{XXL},Sim,"sphinx:sphynx, New York:York, cannoli:canneloni","Suits:Law & Order, Shark:Bush"
7
+ Flan-UL2,Rival,Serena Williams:Andy Murray,
8
+ Flan-UL2,Ally,"Liam Gallagher:Noel Gallagher, Google:Samsung","Tata Motors:Jaguar, China:North Korea, HSBC:BlackRock, Coca-Cola:McDonald's, Huawei:China"
9
+ Flan-UL2,Inf,"Harry Potter:Wizard of Oz, heavy metal:punk music, James Brown:Michael Jackson","Prince Harry:Monarchy, trending music:TikTok, Wales:Westminster, Theresa May:David Cameron"
10
+ Flan-UL2,Know,Belgium:wine,"Europe:The Final Countdown, Corsica:Napoleon Bonaparte, OpenAI:ChatGPT"
11
+ Flan-UL2,Sim,"sphinx:sphynx, cannoli:canneloni","Minnesota:Wisconsin, Shark:Bush, Glastonbury:Roskilde"
12
+ OPT extsubscript{13B},Rival,Serena Williams:Andy Murray,
13
+ OPT extsubscript{13B},Ally,"Joseph Stalin:Josip Broz Tito, Armenia:Azerbaijan, Sophia Loren:Marlon Brando","FTX:Alameda Research, Red Bull:GoPro, HSBC:BlackRock, Microsoft:LinkedIn, Windows:Xbox"
14
+ OPT extsubscript{13B},Inf,"Joe Biden:Donald Trump, Harry Potter:Wizard of Oz, Singaporean food:Malaysian food","Prince Harry:Monarchy, trending music:TikTok, Wales:Westminster"
15
+ OPT extsubscript{13B},Know,"Coca-Cola:Pepsi, Steve Jobs:AirPods","OpenAI:ChatGPT, UK:rain"
16
+ OPT extsubscript{13B},Sim,,"pill:tablet, Great Britian:British Empire, fusilli:rotini, Shark:Bush"
17
+ GPT-3 extsubscript{davinci},Rival,Serena Williams:Andy Murray,Netflix:Disney Plus
18
+ GPT-3 extsubscript{davinci},Ally,"Joseph Stalin:Josip Broz Tito, Armenia:Azerbaijan, Liam Gallagher:Noel Gallagher","FTX:Alameda Research, Rishi Sunak:Joe Biden, Microsoft:LinkedIn, Windows:Xbox"
19
+ GPT-3 extsubscript{davinci},Inf,Harry Potter:Wizard of Oz,"Prince Harry:Monarchy, trending music:TikTok, Stephen King:Arthur Machen"
20
+ GPT-3 extsubscript{davinci},Know,Coca-Cola:Pepsi,OpenAI:ChatGPT
21
+ GPT-3 extsubscript{davinci},Sim,Nicolae Ceaușescu:Javier Hernández,"Homebase:IKEA, fusilli:rotini, Shark:Bush, Primark:Shein"