patrickvonplaten commited on
Commit
5211e0c
1 Parent(s): 015300c
model/restored.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b52b3009e9230a48e67af5b8d9c74152268e54f2f2d651aede99b9d3f645371c
3
- size 59949409917
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8185e89d0ebf9a63a207216b0e5ef89e67d74601b2cfec424a9eef2eba0dfbe
3
+ size 59949410247
model/special_tokens_map.json CHANGED
@@ -1 +1,23 @@
1
- {"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
model/tokenizer_config.json CHANGED
@@ -1 +1,31 @@
1
- {"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "tokenizer_class": "GPT2Tokenizer"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "pad_token": null,
22
+ "tokenizer_class": "GPT2Tokenizer",
23
+ "unk_token": {
24
+ "__type": "AddedToken",
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": true,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
model/vocab.json CHANGED
The diff for this file is too large to render. See raw diff
 
run_model.py CHANGED
@@ -1,23 +1,14 @@
1
  #!/usr/bin/env python3
2
- #!/usr/bin/env python3
3
  import os
4
  from transformers import AutoTokenizer, GPT2Tokenizer
5
- from megatron.initialize import initialize_megatron
6
  from metaseq import checkpoint_utils
 
7
  import torch
8
 
9
  path = "./model"
 
10
 
11
- # just need to initialize args with something,
12
- # => doesn't need to correspond to the "correct" architecture for this checkpoint
13
- initialize_megatron(args_defaults={
14
- "micro_batch_size": 1,
15
- "num_layers": 12,
16
- "hidden_size": 768,
17
- "num_attention_heads": 12,
18
- "max_position_embeddings": 2048,
19
- "encoder_seq_length": 2048
20
- })
21
 
22
  vocab_file = os.path.join(path, "gpt2-vocab.json")
23
  merges_file = os.path.join(path, "gpt2-merges.txt")
@@ -34,32 +25,51 @@ checkpoint = checkpoint_utils.load_model_ensemble_and_task(
34
  )
35
 
36
  model = checkpoint[0][0].eval()
37
- model = model.cuda().half()
38
 
 
39
 
40
  # forward passes
41
  def single_batch_forward_logits(prompts):
42
  input_ids = tokenizer(prompts, return_tensors="pt").input_ids
43
  input_ids = torch.cat([torch.tensor([[0]]), input_ids], dim=-1)
44
- input_ids = input_ids.cuda()
45
  with torch.no_grad():
46
  logits = model(input_ids)[0]
47
  return logits
48
 
 
 
 
 
 
 
 
 
 
49
  prompts = [
50
- "Today is a beautiful day and I want to",
51
- "In the city of",
52
- "Paris is the capital of France and",
53
- "Computers and mobile phones have taken",
54
  ]
55
 
56
  print("Next word generation")
57
  for prompt in prompts:
58
  print("-------------")
59
  print(f"Prompt: {prompt}...\n")
60
- logits = single_batch_forward_logits(prompt)
 
 
 
 
 
 
61
  pred_next_token = torch.argmax(logits[0, -1], -1)
62
  next_token = tokenizer.convert_ids_to_tokens([pred_next_token])
63
  next_token = next_token[0].replace("Ġ", "")
64
  print(f"Next word: {next_token}")
65
  print("-------------")
 
 
 
 
1
  #!/usr/bin/env python3
 
2
  import os
3
  from transformers import AutoTokenizer, GPT2Tokenizer
4
+ #from megatron.initialize import initialize_megatron
5
  from metaseq import checkpoint_utils
6
+ from transformers import OPTForCausalLM
7
  import torch
8
 
9
  path = "./model"
10
+ hf_path = "/home/patrick/facebook/opt-30b"
11
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  vocab_file = os.path.join(path, "gpt2-vocab.json")
14
  merges_file = os.path.join(path, "gpt2-merges.txt")
 
25
  )
26
 
27
  model = checkpoint[0][0].eval()
28
+ model = model
29
 
30
+ hf_model = OPTForCausalLM.from_pretrained(hf_path)
31
 
32
  # forward passes
33
  def single_batch_forward_logits(prompts):
34
  input_ids = tokenizer(prompts, return_tensors="pt").input_ids
35
  input_ids = torch.cat([torch.tensor([[0]]), input_ids], dim=-1)
36
+ input_ids = input_ids
37
  with torch.no_grad():
38
  logits = model(input_ids)[0]
39
  return logits
40
 
41
+ # forward hf
42
+ def forward_hf(prompts):
43
+ input_ids = tokenizer(prompts, return_tensors="pt").input_ids
44
+ input_ids = torch.cat([torch.tensor([[0]]), input_ids], dim=-1)
45
+ input_ids = input_ids
46
+ with torch.no_grad():
47
+ logits = hf_model(input_ids)[0]
48
+ return logits
49
+
50
  prompts = [
51
+ "Today is a beautiful day and I want to",
52
+ "In the city of",
53
+ "Paris is the capital of France and",
54
+ "Computers and mobile phones have taken",
55
  ]
56
 
57
  print("Next word generation")
58
  for prompt in prompts:
59
  print("-------------")
60
  print(f"Prompt: {prompt}...\n")
61
+ logits_fsq = single_batch_forward_logits(prompt)
62
+ pred_next_token = torch.argmax(logits_fsq[0, -1], -1)
63
+ next_token = tokenizer.convert_ids_to_tokens([pred_next_token])
64
+ next_token = next_token[0].replace("Ġ", "")
65
+ print(f"Next word: {next_token}")
66
+ print("-------------")
67
+ logits = forward_hf(prompt)
68
  pred_next_token = torch.argmax(logits[0, -1], -1)
69
  next_token = tokenizer.convert_ids_to_tokens([pred_next_token])
70
  next_token = next_token[0].replace("Ġ", "")
71
  print(f"Next word: {next_token}")
72
  print("-------------")
73
+
74
+
75
+ print("Is equal:", torch.allclose(logits_fsq.cpu(), logits.cpu(), atol=1e-3))