MohammedAlakhras commited on
Commit
58e74a8
1 Parent(s): 925b61d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -34
app.py CHANGED
@@ -13,6 +13,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id)
13
 
14
  # Move the model to the device
15
  model = model.to(device)
 
16
 
17
  def answer_question(
18
  prompt,
@@ -23,20 +24,20 @@ def answer_question(
23
  do_sample=True,
24
  **kwargs,
25
  ):
26
- inputs = tokenizer(prompt, return_tensors="pt")
27
- # Move the inputs to the device
28
- inputs = {key: val.to(device) for key, val in inputs.items()}
29
- input_ids = inputs["input_ids"]
30
- attention_mask = inputs["attention_mask"]
31
- generation_config = GenerationConfig(
32
- temperature=temperature,
33
- top_p=top_p,
34
- top_k=top_k,
35
- num_beams=num_beams,
36
- do_sample=do_sample,
37
- **kwargs,
38
- )
39
- with torch.no_grad():
40
  generation_output = model.generate(
41
  input_ids=input_ids,
42
  attention_mask=attention_mask,
@@ -45,29 +46,12 @@ def answer_question(
45
  output_scores=True,
46
  max_new_tokens=512,
47
  eos_token_id=tokenizer.eos_token_id
48
-
49
  )
50
  s = generation_output.sequences[0]
51
  output = tokenizer.decode(s, skip_special_tokens=True)
52
  return output.split(" Response:")[1]
53
 
54
- example_prompt = """
55
- Below is an instruction that describes a task, paired with an input that provides further context.Write a response that appropriately completes the request.
56
-
57
- ### Instruction:
58
- If you are a doctor, please answer the medical questions based on the patient's description.
59
-
60
- ### Input:
61
- Hi i have sore lumps under the skin on my legs. they started on my left ankle and are approx 1 - 2cm diameter and are spreading up onto my thies. I am eating panadol night and anti allergy pills (Atarax). I have had this for about two weeks now. Please advise.
62
-
63
- ### Response:
64
- """
65
-
66
- print(answer_question(example_prompt))
67
-
68
-
69
  def gui_interface(prompt):
70
-
71
  prompt="""
72
  Below is an instruction that describes a task, paired with an input that provides further context.Write a response that appropriately completes the request.
73
 
@@ -78,10 +62,7 @@ def gui_interface(prompt):
78
  """+prompt+"""
79
  ### Response:
80
  """
81
-
82
- print(prompt)
83
  return answer_question(prompt)
84
 
85
  iface = gr.Interface(fn=gui_interface, inputs="text", outputs="text")
86
  iface.launch()
87
-
 
13
 
14
  # Move the model to the device
15
  model = model.to(device)
16
+ model.eval() # Set the model to evaluation mode
17
 
18
  def answer_question(
19
  prompt,
 
24
  do_sample=True,
25
  **kwargs,
26
  ):
27
+ with torch.no_grad(): # Disable gradient calculation
28
+ inputs = tokenizer(prompt, return_tensors="pt")
29
+ # Move the inputs to the device
30
+ inputs = {key: val.to(device) for key, val in inputs.items()}
31
+ input_ids = inputs["input_ids"]
32
+ attention_mask = inputs["attention_mask"]
33
+ generation_config = GenerationConfig(
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ top_k=top_k,
37
+ num_beams=num_beams,
38
+ do_sample=do_sample,
39
+ **kwargs,
40
+ )
41
  generation_output = model.generate(
42
  input_ids=input_ids,
43
  attention_mask=attention_mask,
 
46
  output_scores=True,
47
  max_new_tokens=512,
48
  eos_token_id=tokenizer.eos_token_id
 
49
  )
50
  s = generation_output.sequences[0]
51
  output = tokenizer.decode(s, skip_special_tokens=True)
52
  return output.split(" Response:")[1]
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  def gui_interface(prompt):
 
55
  prompt="""
56
  Below is an instruction that describes a task, paired with an input that provides further context.Write a response that appropriately completes the request.
57
 
 
62
  """+prompt+"""
63
  ### Response:
64
  """
 
 
65
  return answer_question(prompt)
66
 
67
  iface = gr.Interface(fn=gui_interface, inputs="text", outputs="text")
68
  iface.launch()