ko-barTNumText / README.md
lIlBrother's picture
Update: tag test
f182a2e
|
raw
history blame
2.64 kB
---
language:
- ko # Example: fr
license: apache-2.0 # Example: apache-2.0 or any license from https://hf.co/docs/hub/repositories-licenses
library_name: transformers # Optional. Example: keras or any library from https://github.com/huggingface/hub-docs/blob/main/js/src/lib/interfaces/Libraries.ts
tags:
- translation # Example: audio
- Text Generation
datasets:
- aihub # Example: common_voice. Use dataset id from https://hf.co/datasets
metrics:
- bleu # Example: wer. Use metric id from https://hf.co/metrics
- rouge
# Optional. Add this if you want to encode your eval results in a structured way.
model-index:
- name: barTNumText
results:
- task:
type: translation # Required. Example: automatic-speech-recognition
name: translation # Optional. Example: Speech Recognition
metrics:
- type: bleu # Required. Example: wer. Use metric id from https://hf.co/metrics
value: 0.9161441917016176 # Required. Example: 20.90
name: eval_bleu # Optional. Example: Test WER
verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
- type: rouge1 # Required. Example: wer. Use metric id from https://hf.co/metrics
value: 0.9502159661745533 # Required. Example: 20.90
name: eval_rouge1 # Optional. Example: Test WER
verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
- type: rouge2 # Required. Example: wer. Use metric id from https://hf.co/metrics
value: 0.9313935147887745 # Required. Example: 20.90
name: eval_rouge2 # Optional. Example: Test WER
verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
- type: rougeL # Required. Example: wer. Use metric id from https://hf.co/metrics
value: 0.950015374196916 # Required. Example: 20.90
name: eval_rougeL # Optional. Example: Test WER
verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
- type: rougeLsum # Required. Example: wer. Use metric id from https://hf.co/metrics
value: 0.9500390902948073 # Required. Example: 20.90
name: eval_rougeLsum # Optional. Example: Test WER
verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
---