okeanos commited on
Commit
6999339
1 Parent(s): 25a8d0a
Files changed (1) hide show
  1. app.py +1 -38
app.py CHANGED
@@ -1,40 +1,3 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- import torch
4
- import subprocess
5
- import spaces
6
- import os
7
 
8
-
9
-
10
- # Initialize the model pipeline
11
- generator = pipeline('text-generation', model='okeanos/uptimeai-8273')
12
- @spaces.GPU
13
- def generate_text(prompt):
14
- # Generate text using the model
15
- generator.model.cuda()
16
- generator.device = torch.device("cuda")
17
- prompt = f"<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
18
- outputs = generator(
19
- prompt,
20
- )
21
- # Extract the generated text and return it
22
- generated_text = outputs[0]['generated_text']
23
- return generated_text
24
- # Create the Gradio interface
25
- iface = gr.Interface(
26
- fn=generate_text,
27
- inputs=[
28
- gr.Textbox(label="Prompt", lines=2, placeholder="Type a prompt..."),
29
- gr.Slider(minimum=0.1, maximum=2.0, step=0.01, value=0.8, label="Temperature"),
30
- gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.95, label="Top p"),
31
- gr.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
32
- gr.Slider(minimum=1.0, maximum=2.0, step=0.01, value=1.10, label="Repetition Penalty"),
33
- gr.Slider(minimum=5, maximum=4096, step=5, value=1024, label="Max Length")
34
- ],
35
- outputs=gr.Textbox(label="Generated Text"),
36
- title="ChatHercules-2.5-Mistral-7B",
37
- description="Try out the ChatHercules-2.5-Mistral-7B model for free!"
38
- )
39
-
40
- iface.launch()
 
1
  import gradio as gr
 
 
 
 
 
2
 
3
+ gr.Interface.load("models/Ejafa/vicuna_7B_vanilla_1.1").launch()