Mohammed-Altaf commited on
Commit
29341fc
1 Parent(s): f56f478

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -14
app.py CHANGED
@@ -2,29 +2,17 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  model_id = "Mohammed-Altaf/Medical-ChatBot"
5
- model = AutoModelForCausalLM.from_pretrained(
6
- model_id,
7
- torch_dtype=torch.bfloat16,
8
- trust_remote_code=True,
9
- device_map="auto",
10
- low_cpu_mem_usage=True,
11
- )
12
  tokenizer = AutoTokenizer.from_pretrained(model_id)
13
 
14
 
15
  def generate_text(input_text):
16
  input_ids = tokenizer.encode(input_text, return_tensors="pt")
17
- attention_mask = torch.ones(input_ids.shape)
18
 
19
  output = model.generate(
20
  input_ids,
21
- attention_mask=attention_mask,
22
  max_length=200,
23
- do_sample=True,
24
- top_k=10,
25
- num_return_sequences=1,
26
- eos_token_id=tokenizer.eos_token_id,
27
- )
28
 
29
  output_text = tokenizer.decode(output[0], skip_special_tokens=True)
30
  print(output_text)
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  model_id = "Mohammed-Altaf/Medical-ChatBot"
5
+ model = AutoModelForCausalLM.from_pretrained(model_id)
 
 
 
 
 
 
6
  tokenizer = AutoTokenizer.from_pretrained(model_id)
7
 
8
 
9
  def generate_text(input_text):
10
  input_ids = tokenizer.encode(input_text, return_tensors="pt")
 
11
 
12
  output = model.generate(
13
  input_ids,
 
14
  max_length=200,
15
+ )
 
 
 
 
16
 
17
  output_text = tokenizer.decode(output[0], skip_special_tokens=True)
18
  print(output_text)