Sourabh2 commited on
Commit
185c69f
1 Parent(s): 39333b6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +21 -16
README.md CHANGED
@@ -9,22 +9,27 @@ To use the model:
9
  # Load model directly
10
  from transformers import AutoTokenizer, AutoModelForCausalLM
11
 
12
- tokenizer = AutoTokenizer.from_pretrained("Sourabh2/Chemistry_elements", trust_remote_code=True)
13
- model = AutoModelForCausalLM.from_pretrained("Sourabh2/Chemistry_elements", trust_remote_code=True)
14
  # Set up the device (GPU if available, otherwise CPU)
15
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  model = model.to(device)
17
- # Example usage
18
- messages = [
19
- {
20
- "role": "user",
21
- "content": "hydrogen"
22
- }
23
- ]
24
- prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
25
- inputs = tokenizer(prompt, return_tensors='pt', padding=True, truncation=True)
26
- outputs = model.generate(**inputs, max_length=150, num_return_sequences=1)
27
- text = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
- print(text.split("assistant")[1])
29
- # Decode and print output
30
- ['Symbol: H', 'Atomic_Number: 1', 'Atomic_Weight: 1.008', 'Density: 0.0899', 'Melting_Point: 14.01', 'Boiling_Point: 20.28', 'Phase: Gas', 'Absolute_Melting_Point: 14.01']
 
 
 
 
 
 
9
  # Load model directly
10
  from transformers import AutoTokenizer, AutoModelForCausalLM
11
 
12
+ tokenizer = AutoTokenizer.from_pretrained("Sourabh2/Chemical_compund", trust_remote_code=True)
13
+ model = AutoModelForCausalLM.from_pretrained("Sourabh2/Chemical_compund", trust_remote_code=True)
14
  # Set up the device (GPU if available, otherwise CPU)
15
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  model = model.to(device)
17
+
18
+
19
+
20
+ input_str = "Nobelium".lower()
21
+ input_ids = tokenizer.encode(input_str, return_tensors='pt').to(device)
22
+
23
+ output = model.generate(
24
+ input_ids,
25
+ max_length=200,
26
+ num_return_sequences=1,
27
+ do_sample=True,
28
+ top_k=8,
29
+ top_p=0.95,
30
+ temperature=0.1,
31
+ repetition_penalty=1.2
32
+ )
33
+
34
+ decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
35
+ print(decoded_output)