Files changed (1) hide show
  1. README.md +15 -5
README.md CHANGED
@@ -1,12 +1,22 @@
1
  ---
2
- license: apache-2.0
3
  language:
4
  - en
5
- library_name: transformers
6
- pipeline_tag: image-text-to-text
7
  tags:
8
  - multimodal
9
  - aria
 
 
 
 
 
 
 
 
 
 
10
  ---
11
  <!-- <p align="center">
12
  <br>Aria</br>
@@ -103,11 +113,11 @@ inputs = {k: v.to(model.device) for k, v in inputs.items()}
103
  with torch.inference_mode(), torch.cuda.amp.autocast(dtype=torch.bfloat16):
104
  output = model.generate(
105
  **inputs,
106
- max_new_tokens=500,
107
  stop_strings=["<|im_end|>"],
108
  tokenizer=processor.tokenizer,
109
  do_sample=True,
110
- temperature=0.9,
111
  )
112
  output_ids = output[0][inputs["input_ids"].shape[1]:]
113
  result = processor.decode(output_ids, skip_special_tokens=True)
 
1
  ---
2
+ license: creativeml-openrail-m
3
  language:
4
  - en
5
+ library_name: fasttext
6
+ pipeline_tag: any-to-any
7
  tags:
8
  - multimodal
9
  - aria
10
+ datasets:
11
+ - fka/awesome-chatgpt-prompts
12
+ - nvidia/OpenMathInstruct-2
13
+ - neuralwork/arxiver
14
+ metrics:
15
+ - accuracy
16
+ - bertscore
17
+ base_model:
18
+ - black-forest-labs/FLUX.1-dev
19
+ new_version: openai/whisper-large-v3-turbo
20
  ---
21
  <!-- <p align="center">
22
  <br>Aria</br>
 
113
  with torch.inference_mode(), torch.cuda.amp.autocast(dtype=torch.bfloat16):
114
  output = model.generate(
115
  **inputs,
116
+ max_new_tokens=99999,
117
  stop_strings=["<|im_end|>"],
118
  tokenizer=processor.tokenizer,
119
  do_sample=True,
120
+ temperature=1.9,
121
  )
122
  output_ids = output[0][inputs["input_ids"].shape[1]:]
123
  result = processor.decode(output_ids, skip_special_tokens=True)