Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,28 @@
|
|
|
|
1 |
from llama_index.llms.ollama import Ollama
|
2 |
from llama_index.core.llms import ChatMessage
|
3 |
import gradio as gr
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
llm = Ollama(model="llama3", request_timeout=120.0)
|
6 |
|
7 |
def get_completion(prompt):
|
|
|
1 |
+
import httpx
|
2 |
from llama_index.llms.ollama import Ollama
|
3 |
from llama_index.core.llms import ChatMessage
|
4 |
import gradio as gr
|
5 |
|
6 |
+
import os
|
7 |
+
import threading
|
8 |
+
import subprocess
|
9 |
+
|
10 |
+
def ollama():
|
11 |
+
os.environ['OLLAMA_HOST'] = '127.0.0.1:11434'
|
12 |
+
os.environ['OLLAMA_ORIGINS'] = '*'
|
13 |
+
subprocess.Popen(["ollama", "serve"])
|
14 |
+
|
15 |
+
ollama_thread = threading.Thread(target=ollama)
|
16 |
+
ollama_thread.start()
|
17 |
+
|
18 |
+
proxy_url = "http://localhost:11434"
|
19 |
+
|
20 |
+
|
21 |
+
proxies = {
|
22 |
+
"http://": "http://localhost:11434",
|
23 |
+
"https://": "http://localhost:11434",
|
24 |
+
}
|
25 |
+
|
26 |
llm = Ollama(model="llama3", request_timeout=120.0)
|
27 |
|
28 |
def get_completion(prompt):
|