SrikanthChellappa commited on
Commit
1e35e1a
1 Parent(s): 2e49712

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -0
README.md CHANGED
@@ -74,10 +74,13 @@ Collaiborator-MEDLLM-Llama-3-8b-v1 was trained using an NVIDIA A40 GPU, which pr
74
  ## How to use
75
 
76
  import transformers
 
77
  import torch
78
 
 
79
  model_id = "collaiborateorg/Collaiborator-MEDLLM-Llama-3-8B-v1"
80
 
 
81
  pipeline = transformers.pipeline(
82
  "text-generation",
83
  model=model_id,
@@ -85,22 +88,26 @@ pipeline = transformers.pipeline(
85
  device_map="auto",
86
  )
87
 
 
88
  messages = [
89
  {"role": "system", "content": "You are an expert trained on healthcare and biomedical domain!"},
90
  {"role": "user", "content": "I'm a 35-year-old male and for the past few months, I've been experiencing fatigue, increased sensitivity to cold, and dry, itchy skin. What is the diagnosis here?"},
91
  ]
92
 
 
93
  prompt = pipeline.tokenizer.apply_chat_template(
94
  messages,
95
  tokenize=False,
96
  add_generation_prompt=True
97
  )
98
 
 
99
  terminators = [
100
  pipeline.tokenizer.eos_token_id,
101
  pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
102
  ]
103
 
 
104
  outputs = pipeline(
105
  prompt,
106
  max_new_tokens=256,
@@ -109,6 +116,7 @@ outputs = pipeline(
109
  temperature=0.6,
110
  top_p=0.9,
111
  )
 
112
  print(outputs[0]["generated_text"][len(prompt):])
113
 
114
  ### Contact Information
 
74
  ## How to use
75
 
76
  import transformers
77
+
78
  import torch
79
 
80
+
81
  model_id = "collaiborateorg/Collaiborator-MEDLLM-Llama-3-8B-v1"
82
 
83
+
84
  pipeline = transformers.pipeline(
85
  "text-generation",
86
  model=model_id,
 
88
  device_map="auto",
89
  )
90
 
91
+
92
  messages = [
93
  {"role": "system", "content": "You are an expert trained on healthcare and biomedical domain!"},
94
  {"role": "user", "content": "I'm a 35-year-old male and for the past few months, I've been experiencing fatigue, increased sensitivity to cold, and dry, itchy skin. What is the diagnosis here?"},
95
  ]
96
 
97
+
98
  prompt = pipeline.tokenizer.apply_chat_template(
99
  messages,
100
  tokenize=False,
101
  add_generation_prompt=True
102
  )
103
 
104
+
105
  terminators = [
106
  pipeline.tokenizer.eos_token_id,
107
  pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
108
  ]
109
 
110
+
111
  outputs = pipeline(
112
  prompt,
113
  max_new_tokens=256,
 
116
  temperature=0.6,
117
  top_p=0.9,
118
  )
119
+
120
  print(outputs[0]["generated_text"][len(prompt):])
121
 
122
  ### Contact Information