Spaces:
Sleeping
Sleeping
File size: 1,385 Bytes
4ff1c2a 363ade7 4ff1c2a 363ade7 4ff1c2a 363ade7 4ff1c2a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import torch
import RohanGivenCode
from RohanGivenCode import *
save1_or_load0 = 0 # 1 => Save; 0 => Load
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
device = "mps"
print(f"using device: {device}")
# SEED
torch.manual_seed(1337)
if torch.cuda.is_available():
torch.cuda.manual_seed(1337)
# STOP
num_return_sequences = 5
max_length = 30
import gradio as gr
def sentence_builder(txt, new_tokens):
txt_len = len(txt.split())
if(txt_len < 9): # To make up minumum requirement of 9 words
txt += " My lord, I claim your gift, my due by promise"
t_loader = DataLoaderLite(B = 8, T = 1, text_input = txt)
out = infer_the_model(device, t_loader, save1_or_load0 = 0, new_tokens = 100)
return out
demo = gr.Interface(
sentence_builder,
[
gr.Textbox("", label = "Input", info="Give 8 words atleast, not to get concatenated with default words to make up it's minimum requirement."),
gr.Dropdown(
["100", "200", "300", "400", "500", "1000", "2000"],
label="New Tokens",
info="Choose how many tokens required in output.",
value="100"
)
],
[
gr.Textbox("", label = "Output")
],
title="Shakespeare Drama Dialogue Mimick by GPT3"
)
demo.launch(debug=True) |