import gradio as gr #from transformers import pipeline from contextlib import nullcontext import torch import tiktoken from model import GPTConfig, GPT import sys init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl') out_dir = 'out' # ignored if init_from is not 'resume' #start = "\n" # or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt" num_samples = 1 # number of samples to draw max_new_tokens = 300 # number of tokens generated in each sample temperature = 1.0 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability seed = 1337 device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc. dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float32' # 'float32' or 'bfloat16' or 'float16' compile = True # use PyTorch 2.0 to compile the model to be faster exec(open('configurator.py').read()) # overrides from command line or config file torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype) # init from a model saved in a specific directory ckpt_path = ('ckpt_30000.pt') checkpoint = torch.load(ckpt_path, map_location=device) gptconf = GPTConfig(**checkpoint['model_args']) model = GPT(gptconf) state_dict = checkpoint['model'] unwanted_prefix = '_orig_mod.' for k,v in list(state_dict.items()): if k.startswith(unwanted_prefix): state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k) model.load_state_dict(state_dict) model.eval() model.to(device) if compile: model = torch.compile(model) # requires PyTorch 2.0 (optional) enc = tiktoken.get_encoding("gpt2") encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"}) decode = lambda l: enc.decode(l) def get_response(prompt,history): start_ids = encode(prompt) x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...]) with torch.no_grad(): with ctx: for k in range(num_samples): y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k) #return (decode(y[0].tolist()).split('[EndOfText]')[0].split('Assistant:')[1]) return (decode(y[0].tolist())) demo = gr.ChatInterface(get_response,description="Ask me any Sports question",) if __name__ == "__main__": demo.launch()