Update README.md
Browse files
README.md
CHANGED
@@ -78,10 +78,10 @@ with torch.inference_mode():
|
|
78 |
|
79 |
|
80 |
# model for the sequence-level tasks
|
81 |
-
model = AutoModelForSequenceClassification.
|
82 |
|
83 |
# model for the token-level tasks
|
84 |
-
model = AutoModelForTokenClassification.
|
85 |
|
86 |
```
|
87 |
|
@@ -112,7 +112,7 @@ prompt=['', 'MLFVVL', 'LDL', 'VTQA']
|
|
112 |
|
113 |
for idx, each in enumerate(prompt):
|
114 |
print(f"Begin generating idx: {idx} with prompt {each}")
|
115 |
-
output = model.chat(tokenizer, each)
|
116 |
print(f"\nEnd generation with length: {len(output.split())} - seqs: {output}\n")
|
117 |
```
|
118 |
|
|
|
78 |
|
79 |
|
80 |
# model for the sequence-level tasks
|
81 |
+
model = AutoModelForSequenceClassification.from_pretrained(config, trust_remote_code=True, torch_dtype=torch.half)
|
82 |
|
83 |
# model for the token-level tasks
|
84 |
+
model = AutoModelForTokenClassification.from_pretrained(config, trust_remote_code=True, torch_dtype=torch.half)
|
85 |
|
86 |
```
|
87 |
|
|
|
112 |
|
113 |
for idx, each in enumerate(prompt):
|
114 |
print(f"Begin generating idx: {idx} with prompt {each}")
|
115 |
+
output = model.chat(tokenizer, each, **gen_kwargs)
|
116 |
print(f"\nEnd generation with length: {len(output.split())} - seqs: {output}\n")
|
117 |
```
|
118 |
|