|
|
|
## How to Load and Use the Model |
|
To use the model: |
|
|
|
1. Install required libraries: torch and transformers |
|
2. Use the following code: |
|
|
|
```python |
|
# Load model directly |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Sourabh2/Chemical_compund", trust_remote_code=True) |
|
model = AutoModelForCausalLM.from_pretrained("Sourabh2/Chemical_compund", trust_remote_code=True) |
|
# Set up the device (GPU if available, otherwise CPU) |
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model = model.to(device) |
|
|
|
|
|
|
|
input_str = "Nobelium".lower() |
|
input_ids = tokenizer.encode(input_str, return_tensors='pt').to(device) |
|
|
|
output = model.generate( |
|
input_ids, |
|
max_length=200, |
|
num_return_sequences=1, |
|
do_sample=True, |
|
top_k=8, |
|
top_p=0.95, |
|
temperature=0.1, |
|
repetition_penalty=1.2 |
|
) |
|
|
|
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True) |
|
print(decoded_output) |