Update app.py
Browse files
app.py
CHANGED
|
@@ -4,7 +4,6 @@ from huggingface_hub import InferenceClient
|
|
| 4 |
from sentence_transformers import SentenceTransformer
|
| 5 |
import torch
|
| 6 |
|
| 7 |
-
|
| 8 |
with open("knowledge.txt", "r", encoding="utf-8") as file:
|
| 9 |
recent = file.read()
|
| 10 |
# opens the text, saves as "file"
|
|
@@ -58,7 +57,10 @@ def get_top_chunks(query):
|
|
| 58 |
# values of each index number is added to top_chunks
|
| 59 |
return top_chunks
|
| 60 |
|
| 61 |
-
client = InferenceClient(
|
|
|
|
|
|
|
|
|
|
| 62 |
#client is where you can change the LLM model!
|
| 63 |
def respond(message,history):
|
| 64 |
if not message.strip():
|
|
|
|
| 4 |
from sentence_transformers import SentenceTransformer
|
| 5 |
import torch
|
| 6 |
|
|
|
|
| 7 |
with open("knowledge.txt", "r", encoding="utf-8") as file:
|
| 8 |
recent = file.read()
|
| 9 |
# opens the text, saves as "file"
|
|
|
|
| 57 |
# values of each index number is added to top_chunks
|
| 58 |
return top_chunks
|
| 59 |
|
| 60 |
+
client = InferenceClient(
|
| 61 |
+
model="Qwen/Qwen2.5-72B-Instruct",
|
| 62 |
+
token="your_token_here"
|
| 63 |
+
)
|
| 64 |
#client is where you can change the LLM model!
|
| 65 |
def respond(message,history):
|
| 66 |
if not message.strip():
|