TheMaisk commited on
Commit
2f68a4d
1 Parent(s): b790d47

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import InferenceClient
2
+ import gradio as gr
3
+ import os
4
+
5
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
+
7
+ secret_prompt = os.getenv("SECRET_PROMPT")
8
+
9
+ def format_prompt(new_message, history):
10
+ prompt = secret_prompt
11
+ for user_msg, bot_msg in history:
12
+ prompt += f"[INST] {user_msg} [/INST]"
13
+ prompt += f" {bot_msg}</s> "
14
+ prompt += f"[INST] {new_message} [/INST]"
15
+ return prompt
16
+
17
+ def generate(prompt, history,
18
+ temperature=0.25,
19
+ max_new_tokens=512,
20
+ top_p=0.95,
21
+ repetition_penalty=1.0):
22
+
23
+ temperature = float(temperature)
24
+
25
+ if temperature < 1e-2:
26
+ temperature = 1e-2
27
+
28
+ top_p = float(top_p)
29
+
30
+ generate_kwargs = dict(
31
+ temperature=temperature,
32
+ max_new_tokens=max_new_tokens,
33
+ top_p=top_p,
34
+ repetition_penalty=repetition_penalty,
35
+ do_sample=True,
36
+ seed=727,
37
+ )
38
+
39
+ formatted_prompt = format_prompt(prompt, history)
40
+
41
+ stream = client.text_generation(formatted_prompt,
42
+ **generate_kwargs,
43
+ stream=True,
44
+ details=True,
45
+ return_full_text=False)
46
+ output = ""
47
+
48
+ for response in stream:
49
+ output += response.token.text
50
+ yield output
51
+ return output
52
+
53
+ samir_chatbot = gr.Chatbot(avatar_images=["./user.png", "./bot.png"],
54
+ bubble_full_width=False,
55
+ show_label=False,
56
+ show_copy_button=True,
57
+ likeable=True,)
58
+
59
+
60
+ # Gradio-Demo konfigurieren
61
+ theme = 'syddharth/gray-minimal'
62
+ demo = gr.ChatInterface(fn=generate,
63
+ chatbot=samir_chatbot,
64
+ title="Ailexs Mixtral 8x7b Chat",
65
+ theme=theme)
66
+
67
+ demo.queue().launch(show_api=False)