| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig | |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
| print(f"Using device: {device}") | |
| model = AutoModelForCausalLM.from_pretrained( | |
| "/Users/mcclainthiel/plasmidgpt-addgene-gpt2", | |
| trust_remote_code=True | |
| ).to(device) | |
| model.eval() | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| "/Users/mcclainthiel/plasmidgpt-addgene-gpt2", | |
| trust_remote_code=True | |
| ) | |
| start_sequence = 'ATGGCTAGCGAATTCGGCGCGCCT' | |
| print(f"Start sequence: {start_sequence}\n") | |
| input_ids = tokenizer.encode(start_sequence, return_tensors='pt').to(device) | |
| outputs = model.generate( | |
| input_ids, | |
| max_length=300, | |
| num_return_sequences=1, | |
| temperature=1.0, | |
| do_sample=True, | |
| pad_token_id=tokenizer.pad_token_id, | |
| eos_token_id=tokenizer.eos_token_id | |
| ) | |
| generated_sequence = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| print(f"Generated sequence:\n{generated_sequence}\n") | |
| print(f"Length: {len(generated_sequence)} bp") | |