RashiAgarwal commited on
Commit
1ff18ea
1 Parent(s): a280a67

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +141 -0
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """S22.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1pq0UO46D0emoqF8rPuD4cUznmYVSMESO
8
+ """
9
+
10
+ # Commented out IPython magic to ensure Python compatibility.
11
+ # %pip install lightning -q
12
+
13
+
14
+
15
+ import torch
16
+ torch.cuda.is_available()
17
+
18
+ import glob
19
+ import math
20
+ import sys
21
+ import time
22
+ from pathlib import Path
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import lightning as L
26
+ from lightning.fabric.loggers import CSVLogger
27
+ from lightning.fabric.strategies import FSDPStrategy
28
+
29
+ from tsai_gpt.model import GPT, Block, Config
30
+ from tsai_gpt.packed_dataset import CombinedDataset, PackedDataset
31
+ from tsai_gpt.speed_monitor import SpeedMonitorBase, estimate_flops, measure_flops
32
+ from tsai_gpt.speed_monitor import SpeedMonitorFabric as SpeedMonitor
33
+ from tsai_gpt.utils import chunked_cross_entropy, get_default_supported_precision, num_parameters, load_checkpoint
34
+ import os
35
+ import pickle
36
+ from contextlib import nullcontext
37
+ from torch.utils.data import DataLoader
38
+ import torch.nn.functional as F
39
+ from tsai_gpt.tokenizer import Tokenizer
40
+ import gradio as gr
41
+
42
+ model_name = "pythia-160m"
43
+ name = "redpajama"
44
+ out_dir = Path("out") / name
45
+
46
+ hparams = {k: v for k, v in locals().items() if isinstance(v, (int, float, str)) and not k.startswith("_")}
47
+ logger = CSVLogger("out", name, flush_logs_every_n_steps=log_interval)
48
+
49
+ fabric = L.Fabric(devices=1, strategy='auto', precision=None, loggers=logger)
50
+
51
+ checkpoint_path = Path("out/redpajama/iter-023999-ckpt.pth")
52
+ config = Config.from_name(model_name)
53
+ model = GPT(config)
54
+
55
+ load_checkpoint(fabric, model, checkpoint_path)
56
+
57
+ #print(model.transformer.h[0].mlp.fc.weight)
58
+
59
+ def generate( model, config, idx, max_new_tokens, temperature=1.0, top_k=None):
60
+ """
61
+ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
62
+ the sequence max_new_tokens times, feeding the predictions back into the model each time.
63
+ Most likely you'll want to make sure to be in model.eval() mode of operation for this.
64
+
65
+ """
66
+ idx = idx.unsqueeze(dim=0)
67
+ for _ in range(max_new_tokens):
68
+
69
+ # # if the sequence context is growing too long we must crop it at block_size
70
+ idx_cond = idx if idx.size(1) <= config.block_size else idx[ :,-config.block_size:]
71
+ # forward the model to get the logits for the index in the sequence
72
+ idx_cd = idx
73
+ logits = model(idx_cd)
74
+ # pluck the logits at the final step and scale by desired temperature
75
+ logits = logits[:, -1, :] / temperature
76
+ # optionally crop the logits to only the top k options
77
+ if top_k is not None:
78
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
79
+ logits[logits < v[:, [-1]]] = -float('Inf')
80
+ # apply softmax to convert logits to (normalized) probabilities
81
+ probs = F.softmax(logits, dim=-1)
82
+ # sample from the distribution
83
+ idx_next = torch.multinomial(probs, num_samples=1)
84
+ # append sampled index to the running sequence and continue
85
+ idx = torch.cat((idx, idx_next), dim=1)
86
+
87
+ return idx
88
+
89
+
90
+
91
+ checkpoint_dir = Path('./checkpoints/meta-llama/Llama-2-7b-chat-hf')
92
+ token = Tokenizer(checkpoint_dir = checkpoint_dir)
93
+
94
+ def tsaigpt(start:str , model= model, max_new_tokens = 300, num_samples =2, tokeniser= token):
95
+
96
+
97
+
98
+ # -----------------------------------------------------------------------------
99
+ temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
100
+ top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability
101
+ seed = 1337
102
+ device = 'cpu' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.
103
+ dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32' or 'bfloat16' or 'float16'
104
+ compile = False # use PyTorch 2.0 to compile the model to be faster
105
+ #exec(open('configurator.py').read()) # overrides from command line or config file
106
+ # -----------------------------------------------------------------------------
107
+
108
+ torch.manual_seed(seed)
109
+ torch.cuda.manual_seed(seed)
110
+ torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
111
+ torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
112
+ device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast
113
+ ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
114
+ ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
115
+
116
+ model.eval()
117
+ model.to(device)
118
+ if compile:
119
+ model = torch.compile(model) # requires PyTorch 2.0 (optional)
120
+
121
+
122
+
123
+ start_ids = tokeniser.encode(start).to(device)
124
+ #x = torch.tensor(start_ids, dtype=torch.long, device=device).clone().detach()
125
+
126
+ # run generation
127
+ with torch.no_grad():
128
+ with ctx:
129
+
130
+ y = generate(model =model, config =config , max_new_tokens = max_new_tokens, idx = start_ids ,temperature=1.0, top_k=None)
131
+ #print(decode(y[0].tolist()))
132
+ output = tokeniser.decode(y[0])
133
+ return output
134
+
135
+ INTERFACE = gr.Interface(fn=tsaigpt, inputs=[gr.Textbox(label= "Prompt", value= 'All that glisters is not gold.'),
136
+ gr.Slider(minimum = 300, maximum = 500, value= 300, label= "Maximum number of tokens to be generated")] ,
137
+ outputs=gr.Text(label= "Generated Text"), title="TSAI_GPT",
138
+ description="TSAIGPT is a transformer-based language model with only 0.16 billion parameters, trained on RedPajama 1T Sample.",
139
+ examples = [['We know what we are, but know not what we may be',300],
140
+ ['Sweet are the uses of adversity which, like the toad, ugly and venomous, wears yet a precious jewel in his head',300],]
141
+ ).launch(debug=True)