Spaces:
Sleeping
Sleeping
clamepending
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -65,4 +65,4 @@ def tokenize(input):
|
|
65 |
examples = ["ayyyy whats up 👋", "Okay now picture little Bobby just a youngin' runnin' round", "Peace is when you leave it in the past, let it heal like a cast;When enough time pass, then you blast;Kinda like John Wick, bars like a convict;Fuck around and you don't wanna start shit, woo!"]
|
66 |
|
67 |
intf = gr.Interface(fn=tokenize, inputs="text", outputs=["text", "text", gr.components.Number()], examples=examples, title = "Logic Tokenizer", description="Logic Tokenizer tokenizes your text based on BPE run on the top 10 songs by logic. The vocab size is 1024, and expanded from an original 256 from utf-8. The float output is the compression ratio of len(input)/len(encoded), and the array of integers are the tokens the model learned.")
|
68 |
-
intf.launch(inline=True
|
|
|
65 |
examples = ["ayyyy whats up 👋", "Okay now picture little Bobby just a youngin' runnin' round", "Peace is when you leave it in the past, let it heal like a cast;When enough time pass, then you blast;Kinda like John Wick, bars like a convict;Fuck around and you don't wanna start shit, woo!"]
|
66 |
|
67 |
intf = gr.Interface(fn=tokenize, inputs="text", outputs=["text", "text", gr.components.Number()], examples=examples, title = "Logic Tokenizer", description="Logic Tokenizer tokenizes your text based on BPE run on the top 10 songs by logic. The vocab size is 1024, and expanded from an original 256 from utf-8. The float output is the compression ratio of len(input)/len(encoded), and the array of integers are the tokens the model learned.")
|
68 |
+
intf.launch(inline=True)
|