Update README.md
Browse files
README.md
CHANGED
@@ -4,75 +4,26 @@ language:
|
|
4 |
- en
|
5 |
pipeline_tag: question-answering
|
6 |
---
|
7 |
-
#
|
8 |
|
9 |
<!-- Provide a quick summary of what the model is/does. -->
|
10 |
|
11 |
This model is fine-tuned with LLaMA with 8 Nvidia A100-80G GPUs using 3,000,000 groups of conversations in the context of mathematics by students and facilitators on Algebra Nation (https://www.mathnation.com/). Llama-mt-lora consists of 32 layers and over 7 billion parameters, consuming up to 13.5 gigabytes of disk space. Researchers can experiment with and finetune the model to help construct math conversational AI that can effectively respond generation in a mathematical context.
|
12 |
### Here is how to use it with texts in HuggingFace
|
13 |
```python
|
14 |
-
import
|
15 |
-
import
|
16 |
-
from transformers import LlamaTokenizer, AutoModelForCausalLM
|
17 |
-
tokenizer = LlamaTokenizer.from_pretrained("Fan21/Llama-mt-lora")
|
18 |
-
mdoel = LlamaForCausalLM.from_pretrained(
|
19 |
-
"Fan21/Llama-mt-lora",
|
20 |
-
load_in_8bit=False,
|
21 |
-
torch_dtype=torch.float16,
|
22 |
-
device_map="auto",
|
23 |
-
)
|
24 |
-
def generate_prompt(instruction, input=None):
|
25 |
-
if input:
|
26 |
-
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
27 |
-
### Instruction:
|
28 |
-
{instruction}
|
29 |
-
### Input:
|
30 |
-
{input}
|
31 |
-
### Response:"""
|
32 |
-
else:
|
33 |
-
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
34 |
-
### Instruction:
|
35 |
-
{instruction}
|
36 |
-
### Response:"""
|
37 |
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
)
|
48 |
-
|
49 |
-
|
50 |
-
input_ids = inputs["input_ids"].to(device)
|
51 |
-
generation_config = GenerationConfig(
|
52 |
-
temperature=temperature,
|
53 |
-
top_p=top_p,
|
54 |
-
top_k=top_k,
|
55 |
-
num_beams=num_beams,
|
56 |
-
**kwargs,
|
57 |
-
)
|
58 |
-
with torch.no_grad():
|
59 |
-
generation_output = model.generate(
|
60 |
-
input_ids=input_ids,
|
61 |
-
generation_config=generation_config,
|
62 |
-
return_dict_in_generate=True,
|
63 |
-
output_scores=True,
|
64 |
-
max_new_tokens=max_new_tokens,
|
65 |
-
)
|
66 |
-
s = generation_output.sequences[0]
|
67 |
-
output = tokenizer.decode(s)
|
68 |
-
return output.split("### Response:")[1].strip()
|
69 |
-
instruction = 'write your instruction here'
|
70 |
-
inputs = 'write your inputs here'
|
71 |
-
output= evaluate(instruction,
|
72 |
-
input=inputs,
|
73 |
-
temperature=0.1,#change the parameters by yourself
|
74 |
-
top_p=0.75,
|
75 |
-
top_k=40,
|
76 |
-
num_beams=4,
|
77 |
-
max_new_tokens=128,)
|
78 |
```
|
|
|
4 |
- en
|
5 |
pipeline_tag: question-answering
|
6 |
---
|
7 |
+
# git_20
|
8 |
|
9 |
<!-- Provide a quick summary of what the model is/does. -->
|
10 |
|
11 |
This model is fine-tuned with LLaMA with 8 Nvidia A100-80G GPUs using 3,000,000 groups of conversations in the context of mathematics by students and facilitators on Algebra Nation (https://www.mathnation.com/). Llama-mt-lora consists of 32 layers and over 7 billion parameters, consuming up to 13.5 gigabytes of disk space. Researchers can experiment with and finetune the model to help construct math conversational AI that can effectively respond generation in a mathematical context.
|
12 |
### Here is how to use it with texts in HuggingFace
|
13 |
```python
|
14 |
+
from transformers import AutoModelForCausalLM
|
15 |
+
from transformers import AutoProcessor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
model = AutoModelForCausalLM.from_pretrained("Fan21/git_20")
|
18 |
+
processor = AutoProcessor.from_pretrained("Fan21/git_20")
|
19 |
+
|
20 |
+
image_path ='Please enter the image address here'
|
21 |
+
image = Image.open(image_path)
|
22 |
+
width, height = image.size
|
23 |
+
display(image.resize((int(1 * width), int(1 * height))))
|
24 |
+
pixel_values = processor(images=image, return_tensors="pt").pixel_values
|
25 |
+
with torch.no_grad():
|
26 |
+
outputs = model.generate(pixel_values=pixel_values, max_length=50)
|
27 |
+
|
28 |
+
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
```
|