Bo1015 commited on
Commit
0bedba4
·
verified ·
1 Parent(s): 9c1318b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -78,10 +78,10 @@ with torch.inference_mode():
78
 
79
 
80
  # model for the sequence-level tasks
81
- model = AutoModelForSequenceClassification.from_config(config, trust_remote_code=True, torch_dtype=torch.bfloat16)
82
 
83
  # model for the token-level tasks
84
- model = AutoModelForTokenClassification.from_config(config, trust_remote_code=True, torch_dtype=torch.bfloat16)
85
 
86
  ```
87
 
@@ -112,7 +112,7 @@ prompt=['', 'MLFVVL', 'LDL', 'VTQA']
112
 
113
  for idx, each in enumerate(prompt):
114
  print(f"Begin generating idx: {idx} with prompt {each}")
115
- output = model.chat(tokenizer, each)
116
  print(f"\nEnd generation with length: {len(output.split())} - seqs: {output}\n")
117
  ```
118
 
 
78
 
79
 
80
  # model for the sequence-level tasks
81
+ model = AutoModelForSequenceClassification.from_pretrained(config, trust_remote_code=True, torch_dtype=torch.half)
82
 
83
  # model for the token-level tasks
84
+ model = AutoModelForTokenClassification.from_pretrained(config, trust_remote_code=True, torch_dtype=torch.half)
85
 
86
  ```
87
 
 
112
 
113
  for idx, each in enumerate(prompt):
114
  print(f"Begin generating idx: {idx} with prompt {each}")
115
+ output = model.chat(tokenizer, each, **gen_kwargs)
116
  print(f"\nEnd generation with length: {len(output.split())} - seqs: {output}\n")
117
  ```
118