Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +73 -49
- requirements.txt +3 -1
app.py
CHANGED
@@ -1,63 +1,87 @@
|
|
1 |
-
import
|
2 |
-
|
|
|
|
|
3 |
|
4 |
-
""
|
5 |
-
|
6 |
-
"""
|
7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
def
|
11 |
-
|
12 |
-
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
if val[1]:
|
24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
25 |
|
26 |
-
|
|
|
27 |
|
28 |
-
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
max_tokens=max_tokens,
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
"""
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
"""
|
45 |
-
|
46 |
-
|
47 |
-
additional_inputs=[
|
48 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
49 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
50 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
51 |
-
gr.Slider(
|
52 |
-
minimum=0.1,
|
53 |
-
maximum=1.0,
|
54 |
-
value=0.95,
|
55 |
-
step=0.05,
|
56 |
-
label="Top-p (nucleus sampling)",
|
57 |
-
),
|
58 |
-
],
|
59 |
-
)
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
|
|
62 |
if __name__ == "__main__":
|
63 |
-
|
|
|
1 |
+
import os
|
2 |
+
import threading
|
3 |
+
import time
|
4 |
+
import subprocess
|
5 |
|
6 |
+
print("Expanding user path for Ollama")
|
7 |
+
OLLAMA = os.path.expanduser("~/ollama")
|
|
|
|
|
8 |
|
9 |
+
print("Checking if Ollama exists at the path")
|
10 |
+
if not os.path.exists(OLLAMA):
|
11 |
+
print("Ollama not found, downloading it")
|
12 |
+
subprocess.run("curl -L https://ollama.com/download/ollama-linux-amd64 -o ~/ollama", shell=True)
|
13 |
+
os.chmod(OLLAMA, 0o755)
|
14 |
|
15 |
+
def ollama_service_thread():
|
16 |
+
print("Starting Ollama service thread")
|
17 |
+
subprocess.run("~/ollama serve", shell=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
print("Creating and starting Ollama service thread")
|
20 |
+
OLLAMA_SERVICE_THREAD = threading.Thread(target=ollama_service_thread)
|
21 |
+
OLLAMA_SERVICE_THREAD.start()
|
|
|
|
|
22 |
|
23 |
+
print("Giving Ollama serve a moment to start")
|
24 |
+
time.sleep(10)
|
25 |
|
26 |
+
print("Setting model to 'gemma2'")
|
27 |
+
model = "gemma2"
|
28 |
|
29 |
+
print(f"Pulling model {model}")
|
30 |
+
subprocess.run(f"~/ollama pull {model}", shell=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
################################################
|
33 |
+
################################################
|
34 |
+
import copy
|
35 |
+
import gradio as gr
|
36 |
+
from ollama import Client
|
37 |
|
38 |
+
print("Initializing Ollama client")
|
39 |
+
client = Client(host='http://localhost:11434', timeout=120)
|
40 |
+
|
41 |
+
print("Getting Hugging Face token and model ID from environment variables")
|
42 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
43 |
+
MODEL_ID = os.environ.get("MODEL_ID", "google/gemma-2-9b-it")
|
44 |
+
MODEL_NAME = MODEL_ID.split("/")[-1]
|
45 |
+
|
46 |
+
print("Setting up title and description for Gradio interface")
|
47 |
+
TITLE = "<h1><center>ollama-Chat</center></h1>"
|
48 |
+
DESCRIPTION = f"""
|
49 |
+
<h3>MODEL: <a href="https://hf.co/{MODEL_ID}">{MODEL_NAME}</a></h3>
|
50 |
+
<p>Running on Ollama backend.</p>
|
51 |
"""
|
52 |
+
|
53 |
+
|
54 |
+
CSS = """
|
55 |
+
.duplicate-button {
|
56 |
+
margin: auto !important;
|
57 |
+
color: white !important;
|
58 |
+
background: black !important;
|
59 |
+
border-radius: 100vh !important;
|
60 |
+
}
|
61 |
+
h3 {
|
62 |
+
text-align: center;
|
63 |
+
}
|
64 |
"""
|
65 |
+
import gradio as gr
|
66 |
+
from llama_index.llms.ollama import Ollama
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
+
# Initialize the Ollama model
|
69 |
+
llm = Ollama(model="llama3", request_timeout=120.0)
|
70 |
+
|
71 |
+
# Define the function to get the response from Ollama
|
72 |
+
def get_response(question):
|
73 |
+
resp = llm.complete(question)
|
74 |
+
return resp
|
75 |
+
|
76 |
+
# Create the Gradio interface
|
77 |
+
iface = gr.Interface(
|
78 |
+
fn=get_response,
|
79 |
+
inputs="text",
|
80 |
+
outputs="text",
|
81 |
+
title="Ask Paul Graham",
|
82 |
+
description="Enter a question to learn more about Paul Graham."
|
83 |
+
)
|
84 |
|
85 |
+
# Launch the Gradio app
|
86 |
if __name__ == "__main__":
|
87 |
+
iface.launch()
|
requirements.txt
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
|
|
|
|
|
|
1 |
+
ollama
|
2 |
+
streamlit
|
3 |
+
llama_index.llms.ollama
|