Philip Fradkin commited on
Commit
be652b1
·
1 Parent(s): bc2395e

fix: readme proper dashed lines

Browse files
Files changed (1) hide show
  1. README.md +17 -17
README.md CHANGED
@@ -82,7 +82,7 @@ The 4-track model requires only a one-hot encoded sequence of your mRNA. This re
82
  Here is example code
83
  ```
84
  # Sequence for short mRNA
85
- > seq=(
86
  'TCATCTGGATTATACATATTTCGCAATGAAAGAGAGGAAGAAAAGGAAGCAGCAAAATATGTGGAGGCCCA'
87
  'ACAAAAGAGACTAGAAGCCTTATTCACTAAAATTCAGGAGGAATTTGAAGAACATGAAGTTACTTCCTCC'
88
  'ACTGAAGTCTTGAACCCCCCAAAGTCATCCATGAGGGTTGGAATCAACTTCTGAAAACACAACAAAACCA'
@@ -92,24 +92,24 @@ Here is example code
92
  'TCTGCTGAACAAAAGGAACAAGGTTTATCAAGGGATGTCACAACCGTGTGGAAGTTGCGTATTGTAAGCTATTC'
93
  )
94
  # One hot encode function
95
- > oh = seq_to_oh(seq)
96
- > one_hot = seq_to_oh(seq)
97
- > one_hot = one_hot.T
98
- > torch_one_hot = torch.tensor(one_hot, dtype=torch.float32)
99
- > torch_one_hot = torch_one_hot.unsqueeze(0)
100
- > print(torch_one_hot.shape)
101
- > torch_one_hot = torch_one_hot.to(device='cuda')
102
- > lengths = torch.tensor([torch_one_hot.shape[2]]).to(device='cuda')
103
  # Load Orthrus
104
- > run_name="orthrus_base_4_track"
105
- > checkpoint="epoch=18-step=20000.ckpt"
106
- > model_repository="./models"
107
- > model = load_model(f"{model_repository}{run_name}", checkpoint_name=checkpoint)
108
- > model = model.to(torch.device('cuda'))
109
- > print(model)
110
  # Generate embedding
111
- > reps = model.representation(torch_one_hot, lengths)
112
- > print(reps.shape)
113
  # torch.Size([1, 256])
114
  ```
115
 
 
82
  Here is example code
83
  ```
84
  # Sequence for short mRNA
85
+ >>> seq=(
86
  'TCATCTGGATTATACATATTTCGCAATGAAAGAGAGGAAGAAAAGGAAGCAGCAAAATATGTGGAGGCCCA'
87
  'ACAAAAGAGACTAGAAGCCTTATTCACTAAAATTCAGGAGGAATTTGAAGAACATGAAGTTACTTCCTCC'
88
  'ACTGAAGTCTTGAACCCCCCAAAGTCATCCATGAGGGTTGGAATCAACTTCTGAAAACACAACAAAACCA'
 
92
  'TCTGCTGAACAAAAGGAACAAGGTTTATCAAGGGATGTCACAACCGTGTGGAAGTTGCGTATTGTAAGCTATTC'
93
  )
94
  # One hot encode function
95
+ >>> oh = seq_to_oh(seq)
96
+ >>> one_hot = seq_to_oh(seq)
97
+ >>> one_hot = one_hot.T
98
+ >>> torch_one_hot = torch.tensor(one_hot, dtype=torch.float32)
99
+ >>> torch_one_hot = torch_one_hot.unsqueeze(0)
100
+ >>> print(torch_one_hot.shape)
101
+ >>> torch_one_hot = torch_one_hot.to(device='cuda')
102
+ >>> lengths = torch.tensor([torch_one_hot.shape[2]]).to(device='cuda')
103
  # Load Orthrus
104
+ >>> run_name="orthrus_base_4_track"
105
+ >>> checkpoint="epoch=18-step=20000.ckpt"
106
+ >>> model_repository="./models"
107
+ >>> model = load_model(f"{model_repository}{run_name}", checkpoint_name=checkpoint)
108
+ >>> model = model.to(torch.device('cuda'))
109
+ >>> print(model)
110
  # Generate embedding
111
+ >>> reps = model.representation(torch_one_hot, lengths)
112
+ >>> print(reps.shape)
113
  # torch.Size([1, 256])
114
  ```
115