Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -30,19 +30,14 @@ disease_details = {
|
|
30 |
# Passwords
|
31 |
doctor_password = "doctor123"
|
32 |
|
33 |
-
# Load doctor consultation model and tokenizer
|
34 |
-
|
35 |
-
|
36 |
-
except Exception as e:
|
37 |
-
print(f"Fast tokenizer failed: {e}. Falling back to slow tokenizer.")
|
38 |
-
doctor_tokenizer = AutoTokenizer.from_pretrained("ahmed-7124/dgptAW", use_fast=False)
|
39 |
-
|
40 |
-
doctor_model = AutoModelForCausalLM.from_pretrained("ahmed-7124/dgptAW")
|
41 |
|
42 |
def consult_doctor(prompt):
|
43 |
-
inputs =
|
44 |
-
outputs =
|
45 |
-
response =
|
46 |
return response
|
47 |
|
48 |
# Functions
|
|
|
30 |
# Passwords
|
31 |
doctor_password = "doctor123"
|
32 |
|
33 |
+
# Load doctor consultation model and tokenizer (Now using the new model)
|
34 |
+
tokenizer = AutoTokenizer.from_pretrained("harishussain12/PastelMed")
|
35 |
+
model = AutoModelForCausalLM.from_pretrained("harishussain12/PastelMed")
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
def consult_doctor(prompt):
|
38 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
39 |
+
outputs = model.generate(**inputs, max_new_tokens=100)
|
40 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
41 |
return response
|
42 |
|
43 |
# Functions
|