dyyyyyyyy commited on
Commit
f89bb53
1 Parent(s): 01b3c50

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -2
README.md CHANGED
@@ -48,13 +48,15 @@ pip install torch>=2.1.0 datasets>=2.17.0 deepspeed>=0.13.4 accelerate>=0.27.2 t
48
 
49
  Below is an example using `GNER-T5`
50
  ```python
 
51
  >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
52
  >>> tokenizer = AutoTokenizer.from_pretrained("dyyyyyyyy/GNER-T5-xxl")
53
- >>> model = AutoModelForSeq2SeqLM.from_pretrained("dyyyyyyyy/GNER-T5-xxl")
54
  >>> model = model.eval()
55
  >>> instruction_template = "Please analyze the sentence provided, identifying the type of entity for each word on a token-by-token basis.\nOutput format is: word_1(label_1), word_2(label_2), ...\nWe'll use the BIO-format to label the entities, where:\n1. B- (Begin) indicates the start of a named entity.\n2. I- (Inside) is used for words within a named entity but are not the first word.\n3. O (Outside) denotes words that are not part of a named entity.\n"
 
56
  >>> entity_labels = ["genre", "rating", "review", "plot", "song", "average ratings", "director", "character", "trailer", "year", "actor", "title"]
57
- >>> instruction = f"{instruction_template}\nUse the specific entity tags: {', '.join(label_list)} and O.\n"
58
  >>> inputs = tokenizer(instruction, return_tensors="pt").to("cuda")
59
  >>> outputs = model.generate(**inputs, max_new_tokens=640)
60
  >>> response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
48
 
49
  Below is an example using `GNER-T5`
50
  ```python
51
+ >>> import torch
52
  >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
53
  >>> tokenizer = AutoTokenizer.from_pretrained("dyyyyyyyy/GNER-T5-xxl")
54
+ >>> model = AutoModelForSeq2SeqLM.from_pretrained("dyyyyyyyy/GNER-T5-xxl", torch_dtype=torch.bfloat16).cuda()
55
  >>> model = model.eval()
56
  >>> instruction_template = "Please analyze the sentence provided, identifying the type of entity for each word on a token-by-token basis.\nOutput format is: word_1(label_1), word_2(label_2), ...\nWe'll use the BIO-format to label the entities, where:\n1. B- (Begin) indicates the start of a named entity.\n2. I- (Inside) is used for words within a named entity but are not the first word.\n3. O (Outside) denotes words that are not part of a named entity.\n"
57
+ >>> sentence = "did george clooney make a musical in the 1980s"
58
  >>> entity_labels = ["genre", "rating", "review", "plot", "song", "average ratings", "director", "character", "trailer", "year", "actor", "title"]
59
+ >>> instruction = f"{instruction_template}\nUse the specific entity tags: {', '.join(entity_labels)} and O.\nSentence: {sentence}"
60
  >>> inputs = tokenizer(instruction, return_tensors="pt").to("cuda")
61
  >>> outputs = model.generate(**inputs, max_new_tokens=640)
62
  >>> response = tokenizer.decode(outputs[0], skip_special_tokens=True)