mia naomi commited on
Commit
f41b4c6
1 Parent(s): 8e0992b

Delete shakespeare-inference.py

Browse files
Files changed (1) hide show
  1. shakespeare-inference.py +0 -174
shakespeare-inference.py DELETED
@@ -1,174 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """inference
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/19EoREzhPG9bv_2DF_XNWOIijNGGD6hWK
8
- """
9
-
10
- !pip install transformers==4.14.1
11
- !pip install bitsandbytes-cuda111==0.26.0
12
-
13
- from IPython import display
14
- display.clear_output()
15
-
16
- import transformers
17
- import torch
18
- import torch.nn.functional as F
19
- from torch import nn
20
- from torch.cuda.amp import custom_fwd, custom_bwd
21
- from bitsandbytes.functional import quantize_blockwise, dequantize_blockwise
22
- from tqdm.auto import tqdm
23
-
24
- #@title convert to 8bit
25
- class FrozenBNBLinear(nn.Module):
26
- def __init__(self, weight, absmax, code, bias=None):
27
- assert isinstance(bias, nn.Parameter) or bias is None
28
- super().__init__()
29
- self.out_features, self.in_features = weight.shape
30
- self.register_buffer("weight", weight.requires_grad_(False))
31
- self.register_buffer("absmax", absmax.requires_grad_(False))
32
- self.register_buffer("code", code.requires_grad_(False))
33
- self.adapter = None
34
- self.bias = bias
35
-
36
- def forward(self, input):
37
- output = DequantizeAndLinear.apply(input, self.weight, self.absmax, self.code, self.bias)
38
- if self.adapter:
39
- output += self.adapter(input)
40
- return output
41
-
42
- @classmethod
43
- def from_linear(cls, linear: nn.Linear) -> "FrozenBNBLinear":
44
- weights_int8, state = quantize_blockise_lowmemory(linear.weight)
45
- return cls(weights_int8, *state, linear.bias)
46
-
47
- def __repr__(self):
48
- return f"{self.__class__.__name__}({self.in_features}, {self.out_features})"
49
-
50
-
51
- class DequantizeAndLinear(torch.autograd.Function):
52
- @staticmethod
53
- @custom_fwd
54
- def forward(ctx, input: torch.Tensor, weights_quantized: torch.ByteTensor,
55
- absmax: torch.FloatTensor, code: torch.FloatTensor, bias: torch.FloatTensor):
56
- weights_deq = dequantize_blockwise(weights_quantized, absmax=absmax, code=code)
57
- ctx.save_for_backward(input, weights_quantized, absmax, code)
58
- ctx._has_bias = bias is not None
59
- return F.linear(input, weights_deq, bias)
60
-
61
- @staticmethod
62
- @custom_bwd
63
- def backward(ctx, grad_output: torch.Tensor):
64
- assert not ctx.needs_input_grad[1] and not ctx.needs_input_grad[2] and not ctx.needs_input_grad[3]
65
- input, weights_quantized, absmax, code = ctx.saved_tensors
66
- # grad_output: [*batch, out_features]
67
- weights_deq = dequantize_blockwise(weights_quantized, absmax=absmax, code=code)
68
- grad_input = grad_output @ weights_deq
69
- grad_bias = grad_output.flatten(0, -2).sum(dim=0) if ctx._has_bias else None
70
- return grad_input, None, None, None, grad_bias
71
-
72
-
73
- class FrozenBNBEmbedding(nn.Module):
74
- def __init__(self, weight, absmax, code):
75
- super().__init__()
76
- self.num_embeddings, self.embedding_dim = weight.shape
77
- self.register_buffer("weight", weight.requires_grad_(False))
78
- self.register_buffer("absmax", absmax.requires_grad_(False))
79
- self.register_buffer("code", code.requires_grad_(False))
80
- self.adapter = None
81
-
82
- def forward(self, input, **kwargs):
83
- with torch.no_grad():
84
- # note: both quantuized weights and input indices are *not* differentiable
85
- weight_deq = dequantize_blockwise(self.weight, absmax=self.absmax, code=self.code)
86
- output = F.embedding(input, weight_deq, **kwargs)
87
- if self.adapter:
88
- output += self.adapter(input)
89
- return output
90
-
91
- @classmethod
92
- def from_embedding(cls, embedding: nn.Embedding) -> "FrozenBNBEmbedding":
93
- weights_int8, state = quantize_blockise_lowmemory(embedding.weight)
94
- return cls(weights_int8, *state)
95
-
96
- def __repr__(self):
97
- return f"{self.__class__.__name__}({self.num_embeddings}, {self.embedding_dim})"
98
-
99
-
100
- def quantize_blockise_lowmemory(matrix: torch.Tensor, chunk_size: int = 2 ** 20):
101
- assert chunk_size % 4096 == 0
102
- code = None
103
- chunks = []
104
- absmaxes = []
105
- flat_tensor = matrix.view(-1)
106
- for i in range((matrix.numel() - 1) // chunk_size + 1):
107
- input_chunk = flat_tensor[i * chunk_size: (i + 1) * chunk_size].clone()
108
- quantized_chunk, (absmax_chunk, code) = quantize_blockwise(input_chunk, code=code)
109
- chunks.append(quantized_chunk)
110
- absmaxes.append(absmax_chunk)
111
-
112
- matrix_i8 = torch.cat(chunks).reshape_as(matrix)
113
- absmax = torch.cat(absmaxes)
114
- return matrix_i8, (absmax, code)
115
-
116
-
117
- def convert_to_int8(model):
118
- """Convert linear and embedding modules to 8-bit with optional adapters"""
119
- for module in list(model.modules()):
120
- for name, child in module.named_children():
121
- if isinstance(child, nn.Linear):
122
- setattr(
123
- module,
124
- name,
125
- FrozenBNBLinear(
126
- weight=torch.zeros(child.out_features, child.in_features, dtype=torch.uint8),
127
- absmax=torch.zeros((child.weight.numel() - 1) // 4096 + 1),
128
- code=torch.zeros(256),
129
- bias=child.bias,
130
- ),
131
- )
132
- elif isinstance(child, nn.Embedding):
133
- setattr(
134
- module,
135
- name,
136
- FrozenBNBEmbedding(
137
- weight=torch.zeros(child.num_embeddings, child.embedding_dim, dtype=torch.uint8),
138
- absmax=torch.zeros((child.weight.numel() - 1) // 4096 + 1),
139
- code=torch.zeros(256),
140
- )
141
- )
142
- class GPTJBlock(transformers.models.gptj.modeling_gptj.GPTJBlock):
143
- def __init__(self, config):
144
- super().__init__(config)
145
-
146
- convert_to_int8(self.attn)
147
- convert_to_int8(self.mlp)
148
-
149
- class GPTJModel(transformers.models.gptj.modeling_gptj.GPTJModel):
150
- def __init__(self, config):
151
- super().__init__(config)
152
- convert_to_int8(self)
153
-
154
- class GPTJForCausalLM(transformers.models.gptj.modeling_gptj.GPTJForCausalLM):
155
- def __init__(self, config):
156
- super().__init__(config)
157
- convert_to_int8(self)
158
-
159
-
160
- transformers.models.gptj.modeling_gptj.GPTJBlock = GPTJBlock # monkey-patch GPT-J
161
-
162
- tokenizer = transformers.AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
163
- gpt = GPTJForCausalLM.from_pretrained("crumb/gpt-j-6b-shakespeare", low_cpu_mem_usage=True)
164
-
165
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
166
- gpt = gpt.to(device)
167
-
168
- prompt = """ROMEO: I would I were thy bird.
169
- JULIET: Sweet, so would I, Yet I should kill thee with much cherishing. Good night, good night! Parting is such sweet"""
170
- prompt = tokenizer(prompt, return_tensors='pt')
171
- prompt = {key: value.to(device) for key, value in prompt.items()}
172
- out = gpt.generate(**prompt, min_length=32, max_length=64, do_sample=True)
173
- out = tokenizer.decode(out[0])
174
- print(out)