Spaces:
Runtime error
Runtime error
Abdulrhman37
commited on
Commit
·
2846ff5
1
Parent(s):
1988e80
changed GPU to CPU
Browse files
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
title: Metallurgy Expert Assistant # Replace this with the name of your Space
|
3 |
-
emoji:
|
4 |
colorFrom: blue # Starting gradient color for the card
|
5 |
colorTo: red # Ending gradient color for the card
|
6 |
sdk: gradio # The SDK you're using, e.g., gradio, streamlit, etc.
|
|
|
1 |
---
|
2 |
title: Metallurgy Expert Assistant # Replace this with the name of your Space
|
3 |
+
emoji: 🏭 # Choose an emoji for your project
|
4 |
colorFrom: blue # Starting gradient color for the card
|
5 |
colorTo: red # Ending gradient color for the card
|
6 |
sdk: gradio # The SDK you're using, e.g., gradio, streamlit, etc.
|
app.py
CHANGED
@@ -1,8 +1,11 @@
|
|
1 |
from model import load_model, answer
|
2 |
from components import create_app_layout
|
|
|
|
|
3 |
|
4 |
# Load the model and tokenizer
|
5 |
model, tokenizer = load_model()
|
|
|
6 |
|
7 |
# Define the function for Gradio to call
|
8 |
def gradio_answer_fn(query):
|
|
|
1 |
from model import load_model, answer
|
2 |
from components import create_app_layout
|
3 |
+
import torch
|
4 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
5 |
|
6 |
# Load the model and tokenizer
|
7 |
model, tokenizer = load_model()
|
8 |
+
model.to(device)
|
9 |
|
10 |
# Define the function for Gradio to call
|
11 |
def gradio_answer_fn(query):
|
model.py
CHANGED
@@ -2,6 +2,8 @@ from unsloth import FastLanguageModel
|
|
2 |
import torch
|
3 |
from prompts import metallurgy_prompt
|
4 |
|
|
|
|
|
5 |
def load_model():
|
6 |
max_seq_length = 2048
|
7 |
dtype = None
|
|
|
2 |
import torch
|
3 |
from prompts import metallurgy_prompt
|
4 |
|
5 |
+
# Check if a GPU is available; otherwise, use the CPU
|
6 |
+
|
7 |
def load_model():
|
8 |
max_seq_length = 2048
|
9 |
dtype = None
|