liamcripwell commited on
Commit
8c54698
1 Parent(s): 0b2054b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -5,7 +5,7 @@ language:
5
  ---
6
  # Structure Extraction Model by NuMind 🔥
7
 
8
- NuExtract-large is a fine-tuned version of [phi-3-small](https://huggingface.co/microsoft/Phi-3-small-8k-instruct), on a private high-quality syntatic dataset for information extraction.
9
  To use the model, provide an input text (less than 2000 tokens) and a JSON schema describing the information you need to extract.
10
 
11
  Note: This model is purely extractive, so each information output by the model is present as it is in the text. You can also provide an example of output to help the model understand your task more precisely.
@@ -45,7 +45,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
45
  import torch
46
 
47
 
48
- def predict_NuExtract(model,tokenizer,text, schema,example = ["","",""]):
49
  schema = json.dumps(json.loads(schema), indent=4)
50
  input_llm = "<|input|>\n### Template:\n" + schema + "\n"
51
  for i in example:
@@ -53,13 +53,13 @@ def predict_NuExtract(model,tokenizer,text, schema,example = ["","",""]):
53
  input_llm += "### Example:\n"+ json.dumps(json.loads(i), indent=4)+"\n"
54
 
55
  input_llm += "### Text:\n"+text +"\n<|output|>\n"
56
- input_ids = tokenizer(input_llm, return_tensors="pt",truncation = True, max_length = 4000).to("cuda")
57
 
58
  output = tokenizer.decode(model.generate(**input_ids)[0], skip_special_tokens=True)
59
  return output.split("<|output|>")[1].split("<|end-output|>")[0]
60
 
61
 
62
- model = AutoModelForCausalLM.from_pretrained("numind/NuExtract", trust_remote_code=True,torch_dtype=torch.bfloat16)
63
  tokenizer = AutoTokenizer.from_pretrained("numind/NuExtract", trust_remote_code=True)
64
 
65
  model.to("cuda")
@@ -91,7 +91,7 @@ schema = """{
91
  }
92
  }"""
93
 
94
- prediction = predict_NuExtract(model,tokenizer,text, schema,example = ["","",""])
95
  print(prediction)
96
 
97
  ```
 
5
  ---
6
  # Structure Extraction Model by NuMind 🔥
7
 
8
+ NuExtract-large is a fine-tuned version of [phi-3-small](https://huggingface.co/microsoft/Phi-3-small-8k-instruct), on a private high-quality synthetic dataset for information extraction.
9
  To use the model, provide an input text (less than 2000 tokens) and a JSON schema describing the information you need to extract.
10
 
11
  Note: This model is purely extractive, so each information output by the model is present as it is in the text. You can also provide an example of output to help the model understand your task more precisely.
 
45
  import torch
46
 
47
 
48
+ def predict_NuExtract(model, tokenizer, text, schema, example=["","",""]):
49
  schema = json.dumps(json.loads(schema), indent=4)
50
  input_llm = "<|input|>\n### Template:\n" + schema + "\n"
51
  for i in example:
 
53
  input_llm += "### Example:\n"+ json.dumps(json.loads(i), indent=4)+"\n"
54
 
55
  input_llm += "### Text:\n"+text +"\n<|output|>\n"
56
+ input_ids = tokenizer(input_llm, return_tensors="pt", truncation=True, max_length=4000).to("cuda")
57
 
58
  output = tokenizer.decode(model.generate(**input_ids)[0], skip_special_tokens=True)
59
  return output.split("<|output|>")[1].split("<|end-output|>")[0]
60
 
61
 
62
+ model = AutoModelForCausalLM.from_pretrained("numind/NuExtract", trust_remote_code=True, torch_dtype=torch.bfloat16)
63
  tokenizer = AutoTokenizer.from_pretrained("numind/NuExtract", trust_remote_code=True)
64
 
65
  model.to("cuda")
 
91
  }
92
  }"""
93
 
94
+ prediction = predict_NuExtract(model, tokenizer, text, schema, example=["","",""])
95
  print(prediction)
96
 
97
  ```