orionweller
commited on
Commit
•
2860a50
1
Parent(s):
2ded88b
fix
Browse files- app.py +6 -0
- requirements.txt +1 -0
app.py
CHANGED
@@ -2,6 +2,7 @@ import sys
|
|
2 |
import warnings
|
3 |
print("Warning: This application requires specific library versions. Please ensure you have the correct versions installed.")
|
4 |
|
|
|
5 |
import gradio as gr
|
6 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
import torch
|
@@ -39,7 +40,12 @@ Query: {query}
|
|
39 |
Document: {text}
|
40 |
Relevant (only output one word, either "true" or "false"): [/INST] """
|
41 |
|
|
|
42 |
def check_relevance(query, instruction, passage):
|
|
|
|
|
|
|
|
|
43 |
full_query = f"{query} {instruction}"
|
44 |
prompt = template.format(query=full_query, text=passage)
|
45 |
|
|
|
2 |
import warnings
|
3 |
print("Warning: This application requires specific library versions. Please ensure you have the correct versions installed.")
|
4 |
|
5 |
+
import spaces
|
6 |
import gradio as gr
|
7 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
8 |
import torch
|
|
|
40 |
Document: {text}
|
41 |
Relevant (only output one word, either "true" or "false"): [/INST] """
|
42 |
|
43 |
+
@spaces.GPU
|
44 |
def check_relevance(query, instruction, passage):
|
45 |
+
if torch.cuda.is_available():
|
46 |
+
device = "cuda"
|
47 |
+
model = model.to(device)
|
48 |
+
|
49 |
full_query = f"{query} {instruction}"
|
50 |
prompt = template.format(query=full_query, text=passage)
|
51 |
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
gradio
|
|
|
2 |
transformers==4.35.2
|
3 |
numpy==1.24.3
|
4 |
sentencepiece==0.1.99
|
|
|
1 |
gradio
|
2 |
+
spaces
|
3 |
transformers==4.35.2
|
4 |
numpy==1.24.3
|
5 |
sentencepiece==0.1.99
|