RashiAgarwal commited on
Commit
0999416
1 Parent(s): 9da4bfa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -72,10 +72,7 @@ def nanogpt(start:str , max_new_tokens = 500, num_samples =2):
72
  encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"})
73
  decode = lambda l: enc.decode(l)
74
 
75
- # encode the beginning of the prompt
76
- # if start.startswith('FILE:'):
77
- # with open(start[5:], 'r', encoding='utf-8') as f:
78
- # start = f.read()
79
  start_ids = encode(start)
80
  x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
81
 
@@ -90,7 +87,8 @@ def nanogpt(start:str , max_new_tokens = 500, num_samples =2):
90
 
91
  INTERFACE = gr.Interface(fn=nanogpt, inputs=[gr.Textbox(label= "Prompt"),gr.Slider(300,500, "number", label= "Maximum number of tokens to be geenrated")] , outputs=gr.Text(label= "Generated Text"), title="NanoGPT",
92
  description="NanoGPT is a large transformer-based language model with 10.65 million parameters, trained on a small dataset of Shakespeare work (size: 1MB only). It is trained with character level tokeniation with a simple objective: predict the next char, given all of the previous chars within some text.",
93
- examples = [['We as the new generation AI enginners.',300,1],
94
- ['A forgotten era of humility and happiness',300,2],
95
 
96
- ]).launch(debug=True)
 
 
72
  encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"})
73
  decode = lambda l: enc.decode(l)
74
 
75
+
 
 
 
76
  start_ids = encode(start)
77
  x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
78
 
 
87
 
88
  INTERFACE = gr.Interface(fn=nanogpt, inputs=[gr.Textbox(label= "Prompt"),gr.Slider(300,500, "number", label= "Maximum number of tokens to be geenrated")] , outputs=gr.Text(label= "Generated Text"), title="NanoGPT",
89
  description="NanoGPT is a large transformer-based language model with 10.65 million parameters, trained on a small dataset of Shakespeare work (size: 1MB only). It is trained with character level tokeniation with a simple objective: predict the next char, given all of the previous chars within some text.",
90
+ # examples = [['We as the new generation AI enginners.',300,1],
91
+ # ['A forgotten era of humility and happiness',300,2],
92
 
93
+ # ]
94
+ ).launch(debug=True)