ffreemt commited on
Commit
3294385
1 Parent(s): 874bd1c
Files changed (1) hide show
  1. app.py +25 -26
app.py CHANGED
@@ -9,28 +9,25 @@ transformers
9
  sentencepiece
10
  torch
11
 
12
- """
13
- # pylint: disable=line-too-long, missing-module-docstring, missing-function-docstring
14
- # import torch
15
  import gradio as gr
16
- from examples_list import examples_list
17
- from transformers import AutoModel, AutoTokenizer # AutoModelForCausalLM,
18
 
19
- # device = "cuda" if torch.cuda.is_available() else "cpu"
 
20
 
21
- # tokenizer = AutoTokenizer.from_pretrained("stabilityai/StableBeluga2", use_fast=False)
22
- # model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga2", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
23
- # system_prompt = "### System:\nYou are Stable Beluga, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n"
24
- # pipeline = pipeline(task="text-generation", model="meta-llama/Llama-2-7b")
 
 
 
 
25
 
26
- _ = """
27
- tokenizer = AutoTokenizer.from_pretrained(
28
- "THUDM/chatglm2-6b-int4", trust_remote_code=True
29
- )
30
- chat_model = AutoModel.from_pretrained(
31
- "THUDM/chatglm2-6b-int4", trust_remote_code=True # 3.92G
32
- ).float()
33
  """
 
 
 
 
34
 
35
  def stream_chat():
36
  """samples:
@@ -75,17 +72,16 @@ def update_chatbot():
75
  message = yield
76
  print(f"{message=}")
77
 
 
 
78
 
79
- def chat1(message, history):
80
- # prompt = f"{system_prompt}### User: {message}\n\n### Assistant:\n"
81
- # inputs = tokenizer(prompt, return_tensors="pt").to(device=device)
82
- # output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
83
- # return tokenizer.decode(output[0], skip_special_tokens=True)
84
- for response, _ in chat_model.stream_chat(
85
- tokenizer, message, history, max_length=2048, top_p=0.7, temperature=0.95
86
- ):
87
- yield response, _
88
 
 
89
  with gr.Blocks(theme=gr.themes.Glass(text_size="sm", spacing_size="sm"),) as block:
90
  chatbot = gr.Chatbot()
91
  msg = gr.Textbox()
@@ -98,3 +94,6 @@ block(
98
  # title="gradio-chatinterface-tryout",
99
  # examples=examples_list,
100
  ).queue(max_size=2).launch()
 
 
 
 
9
  sentencepiece
10
  torch
11
 
 
 
 
12
  import gradio as gr
 
 
13
 
14
+ def greet(name):
15
+ return "Hello " + name + "!"
16
 
17
+ with gr.Blocks() as demo:
18
+ name = gr.Textbox(label="Name")
19
+ output = gr.Textbox(label="Output Box")
20
+ greet_btn = gr.Button("Greet")
21
+ greet_btn.click(fn=greet, inputs=name, outputs=output, api_name="greet")
22
+
23
+
24
+ demo.launch()
25
 
 
 
 
 
 
 
 
26
  """
27
+ # pylint: disable=line-too-long, missing-module-docstring, missing-function-docstring
28
+ # import torch
29
+ import gradio as gr
30
+
31
 
32
  def stream_chat():
33
  """samples:
 
72
  message = yield
73
  print(f"{message=}")
74
 
75
+ def greet(name):
76
+ return "Hello " + name + "!"
77
 
78
+ with gr.Blocks() as block:
79
+ name = gr.Textbox(label="Name")
80
+ output = gr.Textbox(label="Output Box")
81
+ greet_btn = gr.Button("Greet")
82
+ greet_btn.click(fn=greet, inputs=name, outputs=output, api_name="greet")
 
 
 
 
83
 
84
+ _ = """
85
  with gr.Blocks(theme=gr.themes.Glass(text_size="sm", spacing_size="sm"),) as block:
86
  chatbot = gr.Chatbot()
87
  msg = gr.Textbox()
 
94
  # title="gradio-chatinterface-tryout",
95
  # examples=examples_list,
96
  ).queue(max_size=2).launch()
97
+ # """
98
+
99
+ block.queue(max_size=2).launch()