lIlBrother commited on
Commit
a9f1e24
1 Parent(s): 9ba3613

Init: model card 추가

Browse files
Files changed (1) hide show
  1. README.md +42 -2
README.md CHANGED
@@ -1,3 +1,43 @@
1
  ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - ko # Example: fr
4
+ license: apache-2.0 # Example: apache-2.0 or any license from https://hf.co/docs/hub/repositories-licenses
5
+ library_name: transformers # Optional. Example: keras or any library from https://github.com/huggingface/hub-docs/blob/main/js/src/lib/interfaces/Libraries.ts
6
+ tags:
7
+ - translation # Example: audio
8
+ - text-generation # Example: automatic-speech-recognition
9
+ datasets:
10
+ - aihub # Example: common_voice. Use dataset id from https://hf.co/datasets
11
+ metrics:
12
+ - bleu # Example: wer. Use metric id from https://hf.co/metrics
13
+ - rouge
14
+
15
+ # Optional. Add this if you want to encode your eval results in a structured way.
16
+ model-index:
17
+ - name: barTNumText
18
+ results:
19
+ - task:
20
+ type: translation # Required. Example: automatic-speech-recognition
21
+ name: translation # Optional. Example: Speech Recognition
22
+ metrics:
23
+ - type: bleu # Required. Example: wer. Use metric id from https://hf.co/metrics
24
+ value: 0.9161441917016176 # Required. Example: 20.90
25
+ name: eval_bleu # Optional. Example: Test WER
26
+ verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
27
+ - type: rouge1 # Required. Example: wer. Use metric id from https://hf.co/metrics
28
+ value: 0.9502159661745533 # Required. Example: 20.90
29
+ name: eval_rouge1 # Optional. Example: Test WER
30
+ verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
31
+ - type: rouge2 # Required. Example: wer. Use metric id from https://hf.co/metrics
32
+ value: 0.9313935147887745 # Required. Example: 20.90
33
+ name: eval_rouge2 # Optional. Example: Test WER
34
+ verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
35
+ - type: rougeL # Required. Example: wer. Use metric id from https://hf.co/metrics
36
+ value: 0.950015374196916 # Required. Example: 20.90
37
+ name: eval_rougeL # Optional. Example: Test WER
38
+ verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
39
+ - type: rougeLsum # Required. Example: wer. Use metric id from https://hf.co/metrics
40
+ value: 0.9500390902948073 # Required. Example: 20.90
41
+ name: eval_rougeLsum # Optional. Example: Test WER
42
+ verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
43
+ ---