Spaces:
Runtime error
Runtime error
Ankitajadhav
commited on
Commit
•
535f0de
1
Parent(s):
290b5bf
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,9 @@ from datasets import load_dataset
|
|
10 |
# from transformers import AutoModelForCausalLM, AutoTokenizer
|
11 |
import gradio as gr
|
12 |
from mistral_inference.model import Transformer
|
|
|
|
|
|
|
13 |
|
14 |
# Function to clear the cache
|
15 |
def clear_cache(model_name):
|
@@ -84,9 +87,10 @@ vector_store.populate_vectors(dataset=None)
|
|
84 |
# load model orca-mini general purpose model
|
85 |
# tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
|
86 |
# model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
|
87 |
-
|
88 |
-
|
89 |
-
model
|
|
|
90 |
|
91 |
# Define the chatbot response function
|
92 |
def chatbot_response(user_input):
|
|
|
10 |
# from transformers import AutoModelForCausalLM, AutoTokenizer
|
11 |
import gradio as gr
|
12 |
from mistral_inference.model import Transformer
|
13 |
+
from gpt4all import GPT4All
|
14 |
+
from pathlib import Path
|
15 |
+
|
16 |
|
17 |
# Function to clear the cache
|
18 |
def clear_cache(model_name):
|
|
|
87 |
# load model orca-mini general purpose model
|
88 |
# tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
|
89 |
# model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3")
|
90 |
+
model_name = 'Meta-Llama-3-8B-Instruct.Q4_0.gguf' # .gguf represents quantized model
|
91 |
+
model_path = "gpt4all"
|
92 |
+
# add path to download load the model locally, download once and load for subsequent inference
|
93 |
+
model = GPT4All(model_name=model_name, model_path=model_path,device="cuda")
|
94 |
|
95 |
# Define the chatbot response function
|
96 |
def chatbot_response(user_input):
|