Spaces:
Sleeping
Sleeping
File size: 5,347 Bytes
066e937 9819c30 43cb2b8 41e855d 43cb2b8 41e855d 43cb2b8 8c188ac 43cb2b8 8c188ac 43cb2b8 8c188ac 43cb2b8 41e855d 8c188ac 43cb2b8 8c188ac 43cb2b8 41e855d 9819c30 41e855d 8c188ac 49876ba 43cb2b8 eb81ac7 2b1ef13 066e937 eb81ac7 066e937 eb81ac7 066e937 eb81ac7 066e937 eb81ac7 066e937 eb81ac7 066e937 9819c30 49876ba 066e937 49876ba 066e937 49876ba 066e937 e1f6cb1 2b1ef13 43cb2b8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
import gradio as gr
from huggingface_hub import InferenceClient
import pandas as pd
import json
import os
import re
import uuid
client = InferenceClient("tiiuae/falcon-7b-instruct") # HuggingFaceH4/zephyr-7b-beta
def trigger_example(example):
chat, updated_history = generate_response(example)
return chat, updated_history
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
uploaded_file,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
if uploaded_file is not None:
with open(uploaded_file.name, "r") as f:
file_content = f.read()
messages.append({"role": "user", "content": f"{message}\n\nFile content:\n{file_content}"})
else:
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
if uploaded_file is not None:
print(f"Uploaded file: {uploaded_file.name}")
if uploaded_file.name.endswith(".csv"):
try:
df = pd.read_csv(uploaded_file.name)
print(f"CSV file loaded with {len(df)} rows and {len(df.columns)} columns.")
json_data = df.to_json(orient="records")
with open(f"{uploaded_file.name.split('.')[0]}.json", "w") as json_file:
json_file.write(json_data)
print(f"JSON file created: {uploaded_file.name.split('.')[0]}.json")
except Exception as e:
print(f"Error loading CSV file: {e}")
elif uploaded_file.name.endswith(".txt"):
try:
with open(uploaded_file.name, "r") as f:
text = f.read()
print(f"Text file loaded with {len(text)} characters.")
json_data = json.dumps({"text": text})
with open(f"{uploaded_file.name.split('.')[0]}.json", "w") as json_file:
json_file.write(json_data)
print(f"JSON file created: {uploaded_file.name.split('.')[0]}.json")
except Exception as e:
print(f"Error loading text file: {e}")
def clear_chat():
return [], [], str(uuid.uuid4())
examples = [
"Explain the relativity theory in French",
"Como sair de um helicóptero que caiu na água?",
"¿Cómo le explicarías el aprendizaje automático a un extraterrestre?",
"Explain gravity to a chicken.",
"Give me an example of an endangered species and let me know what I can do to help preserve it",
"Formally introduce the transformer architecture with notation.",
]
demo = gr.ChatInterface(
respond,
title="Nixie Steamcore, a hotbot!",
additional_inputs=[
gr.Textbox(value="Nixie Steamcore, a hotbot!", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=1.2, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
gr.File(label="Upload a document"),
],
)
if __name__ == "__main__":
demo.launch(debug=True)
"""
if __name__ == "__main__":
# demo.launch(debug=True)
try:
demo.queue(api_open=False, max_size=40).launch(show_api=False)
except Exception as e:
print(f"Error: {e}")
"""
"""
import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()
""" |