Ezi Ozoani commited on
Commit
2d5ffb9
β€’
1 Parent(s): efc4d3b
Files changed (49) hide show
  1. 1_πŸ“_form.py +279 -0
  2. README.md +5 -5
  3. __pycache__/extract_code.cpython-39.pyc +0 -0
  4. __pycache__/markdownTagExtract.cpython-39.pyc +0 -0
  5. __pycache__/middleMan.cpython-39.pyc +0 -0
  6. __pycache__/persist.cpython-37.pyc +0 -0
  7. __pycache__/persist.cpython-39.pyc +0 -0
  8. __pycache__/specific_extraction.cpython-39.pyc +0 -0
  9. combined.md +141 -0
  10. current_card.md +222 -0
  11. current_editable.md +141 -0
  12. extract_code.py +532 -0
  13. language_model_template1.md +331 -0
  14. lets_combine.md +1 -0
  15. markdownTagExtract.cpython-39.pyc +0 -0
  16. markdownTagExtract.py +99 -0
  17. middleMan.cpython-39.pyc +0 -0
  18. middleMan.py +178 -0
  19. modelcard_template_new_spec.md +222 -0
  20. out_markd.md +1042 -0
  21. output.md +5 -0
  22. pages/10_ πŸ“_Technical Specifications.py +61 -0
  23. pages/11_ πŸ“¬_Model_Card_Contact.py +26 -0
  24. pages/12_πŸ‘©β€πŸ’»_How_To_Get_Started.py +30 -0
  25. pages/13_πŸ”–_Model_Card_Authors.py +27 -0
  26. pages/14_πŸ“š_Glossary.py +26 -0
  27. pages/15_More_Information.py +26 -0
  28. pages/1_πŸ‘€_CardProgress.py +15 -0
  29. pages/2_πŸ“œ_Model_Details.py +75 -0
  30. pages/3_ πŸ—_Uses.py +51 -0
  31. pages/4_⚠️_Limits_and_Risks.py +36 -0
  32. pages/5_πŸ‹οΈβ€β™€οΈ_Model_training.py +90 -0
  33. pages/6_πŸ”¬_Model_Evaluation.py +66 -0
  34. pages/7_πŸ”Ž_Model_Examination.py +35 -0
  35. pages/8_🌏_Environmental_Impact.py +56 -0
  36. pages/9_πŸ“Œ_Citation.py +48 -0
  37. pages/__pycache__/HowToGetStarted.cpython-39.pyc +0 -0
  38. pages/__pycache__/firstPage.cpython-39.pyc +0 -0
  39. pages/__pycache__/viewCardProgress.cpython-39.pyc +0 -0
  40. persist.cpython-39.pyc +0 -0
  41. persist.py +26 -0
  42. requirements.txt +4 -0
  43. specific_extraction.py +528 -0
  44. style.css +0 -0
  45. temp_uploaded_filed_Dir/modelcard_template_new_spec.md +201 -0
  46. template.md +136 -0
  47. test_markdown_out.py +30 -0
  48. testing_layout.py +71 -0
  49. viewCardProgress(old).py +101 -0
1_πŸ“_form.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from yaml import load
2
+ from persist import persist, load_widget_state
3
+ import streamlit as st
4
+ from io import StringIO
5
+ import tempfile
6
+ from pathlib import Path
7
+ import requests
8
+ from huggingface_hub import hf_hub_download, upload_file
9
+ import pandas as pd
10
+ from huggingface_hub import create_repo
11
+ import os
12
+ from middleMan import parse_into_jinja_markdown as pj
13
+ #from pages import 1_πŸ‘€_CardProgress
14
+
15
+ @st.cache
16
+ def get_cached_data():
17
+ languages_df = pd.read_html("https://hf.co/languages")[0]
18
+ languages_map = pd.Series(languages_df["Language"].values, index=languages_df["ISO code"]).to_dict()
19
+
20
+ license_df = pd.read_html("https://huggingface.co/docs/hub/repositories-licenses")[0]
21
+ license_map = pd.Series(
22
+ license_df["License identifier (to use in model card)"].values, index=license_df.Fullname
23
+ ).to_dict()
24
+
25
+ available_metrics = [x['id'] for x in requests.get('https://huggingface.co/api/metrics').json()]
26
+
27
+ r = requests.get('https://huggingface.co/api/models-tags-by-type')
28
+ tags_data = r.json()
29
+ libraries = [x['id'] for x in tags_data['library']]
30
+ tasks = [x['id'] for x in tags_data['pipeline_tag']]
31
+ return languages_map, license_map, available_metrics, libraries, tasks
32
+
33
+ def card_upload(card_info,repo_id,token):
34
+ #commit_message=None,
35
+ repo_type = "space"
36
+ commit_description=None,
37
+ revision=None,
38
+ create_pr=None
39
+ with tempfile.TemporaryDirectory() as tmpdir:
40
+ tmp_path = Path(tmpdir) / "README.md"
41
+ tmp_path.write_text(str(card_info))
42
+ url = upload_file(
43
+ path_or_fileobj=str(tmp_path),
44
+ path_in_repo="README.md",
45
+ repo_id=repo_id,
46
+ token=token,
47
+ repo_type=repo_type,
48
+ identical_ok=True,
49
+ revision=revision,
50
+ )
51
+ return url
52
+
53
+ def validate(self, repo_type="model"):
54
+ """Validates card against Hugging Face Hub's model card validation logic.
55
+ Using this function requires access to the internet, so it is only called
56
+ internally by `modelcards.ModelCard.push_to_hub`.
57
+ Args:
58
+ repo_type (`str`, *optional*):
59
+ The type of Hugging Face repo to push to. Defaults to None, which will use
60
+ use "model". Other options are "dataset" and "space".
61
+ """
62
+ if repo_type is None:
63
+ repo_type = "model"
64
+
65
+ # TODO - compare against repo types constant in huggingface_hub if we move this object there.
66
+ if repo_type not in ["model", "space", "dataset"]:
67
+ raise RuntimeError(
68
+ "Provided repo_type '{repo_type}' should be one of ['model', 'space',"
69
+ " 'dataset']."
70
+ )
71
+
72
+ body = {
73
+ "repoType": repo_type,
74
+ "content": str(self),
75
+ }
76
+ headers = {"Accept": "text/plain"}
77
+
78
+ try:
79
+ r = requests.post(
80
+ "https://huggingface.co/api/validate-yaml", body, headers=headers
81
+ )
82
+ r.raise_for_status()
83
+ except requests.exceptions.HTTPError as exc:
84
+ if r.status_code == 400:
85
+ raise RuntimeError(r.text)
86
+ else:
87
+ raise exc
88
+
89
+
90
+ ## Save uploaded [markdown] file to directory to be used by jinja parser function
91
+ def save_uploadedfile(uploadedfile):
92
+ with open(os.path.join("temp_uploaded_filed_Dir",uploadedfile.name),"wb") as f:
93
+ f.write(uploadedfile.getbuffer())
94
+ st.success("Saved File:{} to temp_uploaded_filed_Dir".format(uploadedfile.name))
95
+ return uploadedfile.name
96
+
97
+ def main():
98
+ if "model_name" not in st.session_state:
99
+ # Initialize session state.
100
+ st.session_state.update({
101
+ "input_model_name": "",
102
+ "languages": [],
103
+ "license": "",
104
+ "library_name": "",
105
+ "datasets": "",
106
+ "metrics": [],
107
+ "task": "",
108
+ "tags": "",
109
+ "model_description": "Some cool model...",
110
+ "shared_by": "",
111
+ "the_authors":"",
112
+ "Model_details_text": "",
113
+ "Model_developers": "",
114
+
115
+ "Model_how_to": "",
116
+
117
+ "Model_uses": "",
118
+ "Direct_Use": "",
119
+ "Downstream_Use":"",
120
+ "Out-of-Scope_Use":"",
121
+
122
+ "Model_Limits_n_Risks": "",
123
+ "Recommendations":"",
124
+
125
+ "training_data": "",
126
+ "preprocessing":"",
127
+ "Speeds_Sizes_Times":"",
128
+
129
+
130
+
131
+ "Model_Eval": "",
132
+ "Testing_Data":"",
133
+ "Factors":"",
134
+ "Metrics":"",
135
+ "Model_Results":"",
136
+
137
+ "Model_c02_emitted": "",
138
+ "Model_hardware":"",
139
+ "hours_used":"",
140
+ "Model_cloud_provider":"",
141
+ "Model_cloud_region":"",
142
+
143
+ "Model_cite": "",
144
+ "paper_url": "",
145
+ "github_url": "",
146
+ "bibtex_citation": "",
147
+ "APA_citation":"",
148
+
149
+ "Model_examin":"",
150
+ "Model_card_contact":"",
151
+ "Model_card_authors":"",
152
+ "Glossary":"",
153
+ "More_info":"",
154
+
155
+ "Model_specs":"",
156
+ "compute_infrastructure":"",
157
+ "technical_specs_software":"",
158
+
159
+ "check_box": bool,
160
+ "markdown_upload":" ",
161
+ "legal_view":bool,
162
+ "researcher_view":bool,
163
+ "beginner_technical_view":bool,
164
+ "markdown_state":"",
165
+ })
166
+ ## getting cache for each warnings
167
+ languages_map, license_map, available_metrics, libraries, tasks = get_cached_data()
168
+
169
+ ## form UI setting
170
+ st.header("Model Card Form")
171
+
172
+ warning_placeholder = st.empty()
173
+
174
+ st.text_input("Model Name", key=persist("model_name"))
175
+ st.text_area("Model Description", help="The model description provides basic details about the model. This includes the architecture, version, if it was introduced in a paper, if an original implementation is available, the author, and general information about the model. Any copyright should be attributed here. General information about training procedures, parameters, and important disclaimers can also be mentioned in this section.", key=persist('model_description'))
176
+ st.multiselect("Language(s)", list(languages_map), format_func=lambda x: languages_map[x], help="The language(s) associated with this model. If this is not a text-based model, you should specify whatever lanuage is used in the dataset. For instance, if the dataset's labels are in english, you should select English here.", key=persist("languages"))
177
+ st.selectbox("License", [""] + list(license_map.values()), help="The license associated with this model.", key=persist("license"))
178
+ st.selectbox("Library Name", [""] + libraries, help="The name of the library this model came from (Ex. pytorch, timm, spacy, keras, etc.). This is usually automatically detected in model repos, so it is not required.", key=persist('library_name'))
179
+ st.text_input("Datasets (comma separated)", help="The dataset(s) used to train this model. Use dataset id from https://hf.co/datasets.", key=persist("datasets"))
180
+ st.multiselect("Metrics", available_metrics, help="Metrics used in the training/evaluation of this model. Use metric id from https://hf.co/metrics.", key=persist("metrics"))
181
+ st.selectbox("Task", [""] + tasks, help="What task does this model aim to solve?", key=persist('task'))
182
+ st.text_input("Tags (comma separated)", help="Additional tags to add which will be filterable on https://hf.co/models. (Ex. image-classification, vision, resnet)", key=persist("tags"))
183
+ st.text_input("Author(s) (comma separated)", help="The authors who developed this model. If you trained this model, the author is you.", key=persist("the_authors"))
184
+ st.text_input("Related Research Paper", help="Research paper related to this model.", key=persist("paper_url"))
185
+ st.text_input("Related GitHub Repository", help="Link to a GitHub repository used in the development of this model", key=persist("github_url"))
186
+ st.text_area("Bibtex Citation", help="Bibtex citations for related work", key=persist("bibtex_citations"))
187
+ st.text_input("Carbon Emitted:", help="You can estimate carbon emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700)", key=persist("Model_c02_emitted"))
188
+ # warnings setting
189
+ languages=st.session_state.languages or None
190
+ license=st.session_state.license or None
191
+ task = st.session_state.task or None
192
+ markdown_upload = st.session_state.markdown_upload
193
+ #uploaded_model_card = st.session_state.uploaded_model
194
+ # Handle any warnings...
195
+ do_warn = False
196
+ warning_msg = "Warning: The following fields are required but have not been filled in: "
197
+ if not languages:
198
+ warning_msg += "\n- Languages"
199
+ do_warn = True
200
+ if not license:
201
+ warning_msg += "\n- License"
202
+ do_warn = True
203
+ if not task or not markdown_upload:
204
+ warning_msg += "\n- Please choose a task or upload a model card"
205
+ do_warn = True
206
+ if do_warn:
207
+ warning_placeholder.error(warning_msg)
208
+
209
+ with st.sidebar:
210
+
211
+ ######################################################
212
+ ### Uploading a model card from local drive
213
+ ######################################################
214
+ st.markdown("## Upload Model Card")
215
+
216
+ st.markdown("#### Model Card must be in markdown (.md) format.")
217
+
218
+ # Read a single file
219
+ uploaded_file = st.file_uploader("Choose a file", type = ['md'], help = 'Please choose a markdown (.md) file type to upload')
220
+ if uploaded_file is not None:
221
+
222
+ file_details = {"FileName":uploaded_file.name,"FileType":uploaded_file.type}
223
+ name_of_uploaded_file = save_uploadedfile(uploaded_file)
224
+
225
+ st.session_state.markdown_upload = name_of_uploaded_file ## uploaded model card
226
+
227
+ elif st.session_state.task =='fill-mask' or 'translation' or 'token-classification' or ' sentence-similarity' or 'summarization' or 'question-answering' or 'text2text-generation' or 'text-classification' or 'text-generation' or 'conversational':
228
+ #st.session_state.markdown_upload = open(
229
+ # "language_model_template1.md", "r+"
230
+ #).read()
231
+ st.session_state.markdown_upload = "language_model_template1.md" ## language model template
232
+
233
+ elif st.session_state.task:
234
+
235
+ st.session_state.markdown_upload = "current_card.md" ## default non language model template
236
+
237
+ #########################################
238
+ ### Uploading model card to HUB
239
+ #########################################
240
+ out_markdown =open( st.session_state.markdown_upload, "r+"
241
+ ).read()
242
+ print_out_final = f"{out_markdown}"
243
+ st.markdown("## Export Loaded Model Card to Hub")
244
+ with st.form("Upload to πŸ€— Hub"):
245
+ st.markdown("Use a token with write access from [here](https://hf.co/settings/tokens)")
246
+ token = st.text_input("Token", type='password')
247
+ repo_id = st.text_input("Repo ID")
248
+ submit = st.form_submit_button('Upload to πŸ€— Hub', help='The current model card will be uploaded to a branch in the supplied repo ')
249
+
250
+ if submit:
251
+ if len(repo_id.split('/')) == 2:
252
+ repo_url = create_repo(repo_id, exist_ok=True, token=token)
253
+ new_url = card_upload(pj(),repo_id, token=token)
254
+ st.success(f"Pushed the card to the repo [here]({new_url})!") # note: was repo_url
255
+ else:
256
+ st.error("Repo ID invalid. It should be username/repo-name. For example: nateraw/food")
257
+
258
+
259
+ #########################################
260
+ ### Download model card
261
+ #########################################
262
+
263
+
264
+ st.markdown("## Download current Model Card")
265
+
266
+ if st.session_state.model_name is None or st.session_state.model_name== ' ':
267
+ downloaded_file_name = 'current_model_card.md'
268
+ else:
269
+ downloaded_file_name = st.session_state.model_name+'_'+'model_card.md'
270
+ download_status = st.download_button(label = 'Download Model Card', data = pj(), file_name = downloaded_file_name, help = "The current model card will be downloaded as a markdown (.md) file")
271
+ if download_status == True:
272
+ st.success("Your current model card, successfully downloaded πŸ€—")
273
+
274
+
275
+
276
+
277
+ if __name__ == '__main__':
278
+ load_widget_state()
279
+ main()
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: Model Cards Writing Tool
3
- emoji: πŸƒ
4
- colorFrom: purple
5
  colorTo: yellow
6
  sdk: streamlit
7
  sdk_version: 1.10.0
8
- app_file: app.py
9
  pinned: false
10
- license: openrail
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Modelcard Creator
3
+ emoji: ⚑
4
+ colorFrom: red
5
  colorTo: yellow
6
  sdk: streamlit
7
  sdk_version: 1.10.0
8
+ app_file: 1_πŸ“_form.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
__pycache__/extract_code.cpython-39.pyc ADDED
Binary file (14.4 kB). View file
 
__pycache__/markdownTagExtract.cpython-39.pyc ADDED
Binary file (1.74 kB). View file
 
__pycache__/middleMan.cpython-39.pyc ADDED
Binary file (4.84 kB). View file
 
__pycache__/persist.cpython-37.pyc ADDED
Binary file (853 Bytes). View file
 
__pycache__/persist.cpython-39.pyc ADDED
Binary file (873 Bytes). View file
 
__pycache__/specific_extraction.cpython-39.pyc ADDED
Binary file (10.8 kB). View file
 
combined.md ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - es
4
+ license: apache-2.0
5
+ library_name: keras
6
+ tags:
7
+ - autogenerated-modelcard
8
+ ---
9
+
10
+ # MyModelName
11
+
12
+ ## Table of Contents
13
+ - [MyModelName](#-model_id--defaultmymodelname-true)
14
+ - [Table of Contents](#table-of-contents)
15
+ - [Model Details](#model-details)
16
+ - [How to Get Started with the Model](#how-to-get-started-with-the-model)
17
+ - [Uses](#uses)
18
+ - [Direct Use](#direct-use)
19
+ - [Downstream Use](#downstream-use)
20
+ - [Misuse and Out-of-scope Use](#misuse-and-out-of-scope-use)
21
+ - [Limitations and Biases](#limitations-and-biases)
22
+ - [Training](#training)
23
+ - [Training Data](#training-data)
24
+ - [Training Procedure](#training-procedure)
25
+ - [Evaluation Results](#evaluation-results)
26
+ - [Environmental Impact](#environmental-impact)
27
+ - [Citation Information](#citation-information)
28
+
29
+
30
+ <model_details>
31
+ ## Model Details
32
+
33
+ <!-- Give an overview of your model, the relevant research paper, who trained it, etc. -->
34
+
35
+ Some cool model...
36
+
37
+ - Developed by:
38
+ - Language(s):
39
+ - License: This model is licensed under the apache-2.0 license
40
+ - Resources for more information:
41
+
42
+
43
+
44
+ </model_details>
45
+
46
+ <how_to_start>
47
+ ## How to Get Started with the Model
48
+
49
+ Use the code below to get started with the model.
50
+
51
+ ```python
52
+ # A nice code snippet here that describes how to use the model...
53
+ ```
54
+ </how_to_start>
55
+
56
+ <uses>
57
+
58
+ ## Uses
59
+
60
+ #### Direct Use
61
+
62
+ <!-- Describe what kind of tasks this model can be used for directly or problems it can solve. -->
63
+
64
+ [More Information Needed]
65
+
66
+ #### Downstream Use
67
+
68
+ <!-- Describe how this model could be leveraged by a downstream model (if applicable) -->
69
+
70
+ [More Information Needed]
71
+
72
+ #### Misuse and Out-of-scope Use
73
+
74
+ <!-- Describe ways in which this model ***should not*** be used. -->
75
+
76
+ [More Information Needed]
77
+ </uses>
78
+
79
+ <Limitations_and_Biases>
80
+
81
+ ## Limitations and Biases
82
+
83
+ <!-- Describe limitations and biases of this model or models of it's type. -->
84
+
85
+ **CONTENT WARNING: Readers should be aware this section contains content that is disturbing, offensive, and can propogate historical and current stereotypes.**
86
+
87
+ [More Information Needed]
88
+
89
+ </Limitations_and_Biases>
90
+
91
+ <Training>
92
+
93
+ ## Training
94
+
95
+ #### Training Data
96
+
97
+ <!-- Describe the dataset used to train this model. -->
98
+ <!-- Refer to data card if dataset is provided and exists on the hub -->
99
+
100
+ See the data card for additional information.
101
+
102
+ #### Training Procedure
103
+
104
+ <!-- Describe the preprocessing, hardware used, training hyperparameters, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ </Training>
109
+
110
+ <Eval_Results>
111
+ ## Evaluation Results
112
+
113
+ <!-- Describe evaluation results of this model across any datasets it was evaluated on. -->
114
+
115
+ [More Information Needed]
116
+
117
+ </Eval_Results>
118
+
119
+ <E_Impact>
120
+ ## Environmental Impact
121
+
122
+ <!-- Provide information to document the environmental impact of this model -->
123
+
124
+ You can estimate carbon emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700)
125
+
126
+ - **Hardware Type:**
127
+ - **Hours used:**
128
+ - **Cloud Provider:**
129
+ - **Compute Region:**
130
+ - **Carbon Emitted:**
131
+
132
+ </E_Impact>
133
+
134
+ <Cite>
135
+
136
+ ## Citation Information
137
+
138
+ ```bibtex
139
+
140
+ ```
141
+ </Cite>
current_card.md ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ {{card_data}}
3
+ ---
4
+
5
+ # {{ model_id }}
6
+
7
+ <!--> Provide a quick summary of what the model is/does. <!-->
8
+
9
+ # Table of Contents
10
+
11
+ - [{{ model_id }}](#-model_id-)
12
+ - [Table of Contents](#table-of-contents)
13
+ - [Model Details](#model-details)
14
+ - [Model Description](#model-description)
15
+ - [Uses](#uses)
16
+ - [Direct Use](#direct-use)
17
+ - [Downstream Use [Optional]](#downstream-use-optional)
18
+ - [Out-of-Scope Use](#out-of-scope-use)
19
+ - [Bias, Risks, and Limitations](#bias-risks-and-limitations)
20
+ - [Recommendations](#recommendations)
21
+ - [Training Details](#training-details)
22
+ - [Training Data](#training-data)
23
+ - [Training Procedure](#training-procedure)
24
+ - [Preprocessing](#preprocessing)
25
+ - [Speeds, Sizes, Times](#speeds-sizes-times)
26
+ - [Evaluation](#evaluation)
27
+ - [Testing Data, Factors & Metrics](#testing-data-factors--metrics)
28
+ - [Testing Data](#testing-data)
29
+ - [Factors](#factors)
30
+ - [Metrics](#metrics)
31
+ - [Results](#results)
32
+ - [Model Examination](#model-examination)
33
+ - [Environmental Impact](#environmental-impact)
34
+ - [Technical Specifications [optional]](#technical-specifications-optional)
35
+ - [Model Architecture and Objective](#model-architecture-and-objective)
36
+ - [Compute Infrastructure](#compute-infrastructure)
37
+ - [Hardware](#hardware)
38
+ - [Software](#software)
39
+ - [Citation](#citation)
40
+ - [Glossary [optional]](#glossary-optional)
41
+ - [More Information [optional]](#more-information-optional)
42
+ - [Model Card Authors [optional]](#model-card-authors-optional)
43
+ - [Model Card Contact](#model-card-contact)
44
+ - [How to Get Started with the Model](#how-to-get-started-with-the-model)
45
+
46
+
47
+ # Model Details
48
+
49
+ ## Model Description
50
+
51
+ <!--> Provide a longer summary of what this model is. <!-->
52
+ {{ the_model_description | default("More information needed", true)}}
53
+
54
+ - **Developed by:** {{ developers | default("More information needed", true)}}
55
+ - **Shared by [Optional]:** {{ shared_by | default("More information needed", true)}}
56
+ - **Model type:** Language model
57
+ - **Language(s) (NLP):** {{ language | default("More information needed", true)}}
58
+ - **License:** {{ license | default("More information needed", true)}}
59
+ - **Related Models:** {{ related_models | default("More information needed", true)}}
60
+ - **Parent Model:** {{ parent_model | default("More information needed", true)}}
61
+ - **Resources for more information:** {{ more_resources | default("More information needed", true)}}
62
+
63
+ # Uses
64
+
65
+ <!--> Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. <!-->
66
+
67
+ ## Direct Use
68
+
69
+ <!--> This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. <!-->
70
+
71
+ {{ direct_use | default("More information needed", true)}}
72
+
73
+ ## Downstream Use [Optional]
74
+
75
+ <!--> This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app <!-->
76
+
77
+ {{ downstream_use | default("More information needed", true)}}
78
+
79
+ ## Out-of-Scope Use
80
+
81
+ <!--> This section addresses misuse, malicious use, and uses that the model will not work well for. <!-->
82
+
83
+ {{ out_of_scope_use | default("More information needed", true)}}
84
+
85
+ # Bias, Risks, and Limitations
86
+
87
+ <!--> This section is meant to convey both technical and sociotechnical limitations. <!-->
88
+
89
+ {{ bias_risks_limitations | default("More information needed", true)}}
90
+
91
+ ## Recommendations
92
+
93
+ <!--> This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. <!-->
94
+
95
+ {{ bias_recommendations | default("Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recomendations.", true)}}
96
+
97
+ # Training Details
98
+
99
+ ## Training Data
100
+
101
+ <!--> This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. <!-->
102
+
103
+ {{ training_data | default("More information needed", true)}}
104
+
105
+ ## Training Procedure
106
+
107
+ <!--> This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. <!-->
108
+
109
+ ### Preprocessing
110
+
111
+ {{ preprocessing | default("More information needed", true)}}
112
+
113
+ ### Speeds, Sizes, Times
114
+
115
+ <!--> This section provides information about throughput, start/end time, checkpoint size if relevant, etc. <!-->
116
+
117
+ {{ speeds_sizes_times | default("More information needed", true)}}
118
+
119
+ # Evaluation
120
+
121
+ <!--> This section describes the evaluation protocols and provides the results. <!-->
122
+
123
+ ## Testing Data, Factors & Metrics
124
+
125
+ ### Testing Data
126
+
127
+ <!--> This should link to a Data Card if possible. <!-->
128
+
129
+ {{ testing_data | default("More information needed", true)}}
130
+
131
+ ### Factors
132
+
133
+ <!--> These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. <!-->
134
+
135
+ {{ testing_factors | default("More information needed", true)}}
136
+
137
+ ### Metrics
138
+
139
+ <!--> These are the evaluation metrics being used, ideally with a description of why. <!-->
140
+
141
+ {{ testing_metrics | default("More information needed", true)}}
142
+
143
+ ## Results
144
+
145
+ {{ results | default("More information needed", true)}}
146
+
147
+ # Model Examination
148
+
149
+ {{ model_examination | default("More information needed", true)}}
150
+
151
+ # Environmental Impact
152
+
153
+ <!--> Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly <!-->
154
+
155
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
156
+
157
+ - **Hardware Type:** {{ hardware | default("More information needed", true)}}
158
+ - **Hours used:** {{ hours_used | default("More information needed", true)}}
159
+ - **Cloud Provider:** {{ cloud_provider | default("More information needed", true)}}
160
+ - **Compute Region:** {{ cloud_region | default("More information needed", true)}}
161
+ - **Carbon Emitted:** {{ co2_emitted | default("More information needed", true)}}
162
+
163
+ # Technical Specifications [optional]
164
+
165
+ ## Model Architecture and Objective
166
+
167
+ {{ model_specs | default("More information needed", true)}}
168
+
169
+ ## Compute Infrastructure
170
+
171
+ {{ compute_infrastructure | default("More information needed", true)}}
172
+
173
+ ### Hardware
174
+
175
+ {{ hardware | default("More information needed", true)}}
176
+
177
+ ### Software
178
+
179
+ {{ software | default("More information needed", true)}}
180
+
181
+ # Citation
182
+
183
+ <!--> If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. <!-->
184
+
185
+ **BibTeX:**
186
+
187
+ {{ citation_bibtex | default("More information needed", true)}}
188
+
189
+ **APA:**
190
+
191
+ {{ citation_apa | default("More information needed", true)}}
192
+
193
+ # Glossary [optional]
194
+
195
+ <!--> If relevant, include terms and calculations in this section that can help readers understand the model or model card. <!-->
196
+
197
+ {{ glossary | default("More information needed", true)}}
198
+
199
+ # More Information [optional]
200
+
201
+ {{ more_information | default("More information needed", true)}}
202
+
203
+ # Model Card Authors [optional]
204
+
205
+ {{ model_card_authors | default("More information needed", true)}}
206
+
207
+ # Model Card Contact
208
+
209
+ {{ model_card_contact | default("More information needed", true)}}
210
+
211
+ # How to Get Started with the Model
212
+
213
+ Use the code below to get started with the model.
214
+
215
+ <details>
216
+ <summary> Click to expand </summary>
217
+
218
+ {{ get_started_code | default("More information needed", true)}}
219
+
220
+ </details>
221
+
222
+
current_editable.md ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - de
4
+ license: bigscience-bloom-rail-1.0
5
+ library_name: keras
6
+ tags:
7
+ - autogenerated-modelcard
8
+ ---
9
+
10
+ # tethre
11
+
12
+ ## Table of Contents
13
+ - [tethre](#-model_id--defaultmymodelname-true)
14
+ - [Table of Contents](#table-of-contents)
15
+ - [Model Details](#model-details)
16
+ - [How to Get Started with the Model](#how-to-get-started-with-the-model)
17
+ - [Uses](#uses)
18
+ - [Direct Use](#direct-use)
19
+ - [Downstream Use](#downstream-use)
20
+ - [Misuse and Out-of-scope Use](#misuse-and-out-of-scope-use)
21
+ - [Limitations and Biases](#limitations-and-biases)
22
+ - [Training](#training)
23
+ - [Training Data](#training-data)
24
+ - [Training Procedure](#training-procedure)
25
+ - [Evaluation Results](#evaluation-results)
26
+ - [Environmental Impact](#environmental-impact)
27
+ - [Citation Information](#citation-information)
28
+
29
+
30
+ <model_details>
31
+ ## Model Details
32
+
33
+ <!-- Give an overview of your model, the relevant research paper, who trained it, etc. -->
34
+
35
+ hhrirergenjfngdg
36
+
37
+ - Developed by:
38
+ - Language(s):
39
+ - License: This model is licensed under the bigscience-bloom-rail-1.0 license
40
+ - Resources for more information:
41
+
42
+
43
+
44
+ </model_details>
45
+
46
+ <how_to_start>
47
+ ## How to Get Started with the Model
48
+
49
+ Use the code below to get started with the model.
50
+
51
+ ```python
52
+ # A nice code snippet here that describes how to use the model...
53
+ ```
54
+ </how_to_start>
55
+
56
+ <uses>
57
+
58
+ ## Uses
59
+
60
+ #### Direct Use
61
+
62
+ <!-- Describe what kind of tasks this model can be used for directly or problems it can solve. -->
63
+
64
+ [More Information Needed]
65
+
66
+ #### Downstream Use
67
+
68
+ <!-- Describe how this model could be leveraged by a downstream model (if applicable) -->
69
+
70
+ [More Information Needed]
71
+
72
+ #### Misuse and Out-of-scope Use
73
+
74
+ <!-- Describe ways in which this model ***should not*** be used. -->
75
+
76
+ [More Information Needed]
77
+ </uses>
78
+
79
+ <Limitations_and_Biases>
80
+
81
+ ## Limitations and Biases
82
+
83
+ <!-- Describe limitations and biases of this model or models of it's type. -->
84
+
85
+ **CONTENT WARNING: Readers should be aware this section contains content that is disturbing, offensive, and can propogate historical and current stereotypes.**
86
+
87
+ [More Information Needed]
88
+
89
+ </Limitations_and_Biases>
90
+
91
+ <Training>
92
+
93
+ ## Training
94
+
95
+ #### Training Data
96
+
97
+ <!-- Describe the dataset used to train this model. -->
98
+ <!-- Refer to data card if dataset is provided and exists on the hub -->
99
+
100
+ See the data card for additional information.
101
+
102
+ #### Training Procedure
103
+
104
+ <!-- Describe the preprocessing, hardware used, training hyperparameters, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ </Training>
109
+
110
+ <Eval_Results>
111
+ ## Evaluation Results
112
+
113
+ <!-- Describe evaluation results of this model across any datasets it was evaluated on. -->
114
+
115
+ [More Information Needed]
116
+
117
+ </Eval_Results>
118
+
119
+ <E_Impact>
120
+ ## Environmental Impact
121
+
122
+ <!-- Provide information to document the environmental impact of this model -->
123
+
124
+ You can estimate carbon emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700)
125
+
126
+ - **Hardware Type:**
127
+ - **Hours used:**
128
+ - **Cloud Provider:**
129
+ - **Compute Region:**
130
+ - **Carbon Emitted:**
131
+
132
+ </E_Impact>
133
+
134
+ <Cite>
135
+
136
+ ## Citation Information
137
+
138
+ ```bibtex
139
+
140
+ ```
141
+ </Cite>
extract_code.py ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import re
4
+
5
+ """
6
+ Extracts code from the file "./Libraries.ts".
7
+ (Note that "Libraries.ts", must be in the same directory as
8
+ this script).
9
+ """
10
+
11
+ file = None
12
+
13
+ def read_file(library: str, model_name: str) -> str:
14
+ text = file
15
+
16
+ match = re.search('const ' + library + '.*', text, re.DOTALL).group()
17
+ if match:
18
+ text = match[match.index('`') + 1:match.index('`;')].replace('${model.id}', model_name)
19
+
20
+ return text
21
+
22
+ file = """
23
+ import type { ModelData } from "./Types";
24
+ /**
25
+ * Add your new library here.
26
+ */
27
+ export enum ModelLibrary {
28
+ "adapter-transformers" = "Adapter Transformers",
29
+ "allennlp" = "allenNLP",
30
+ "asteroid" = "Asteroid",
31
+ "diffusers" = "Diffusers",
32
+ "espnet" = "ESPnet",
33
+ "fairseq" = "Fairseq",
34
+ "flair" = "Flair",
35
+ "keras" = "Keras",
36
+ "nemo" = "NeMo",
37
+ "pyannote-audio" = "pyannote.audio",
38
+ "sentence-transformers" = "Sentence Transformers",
39
+ "sklearn" = "Scikit-learn",
40
+ "spacy" = "spaCy",
41
+ "speechbrain" = "speechbrain",
42
+ "tensorflowtts" = "TensorFlowTTS",
43
+ "timm" = "Timm",
44
+ "fastai" = "fastai",
45
+ "transformers" = "Transformers",
46
+ "stanza" = "Stanza",
47
+ "fasttext" = "fastText",
48
+ "stable-baselines3" = "Stable-Baselines3",
49
+ "ml-agents" = "ML-Agents",
50
+ }
51
+
52
+ export const ALL_MODEL_LIBRARY_KEYS = Object.keys(ModelLibrary) as (keyof typeof ModelLibrary)[];
53
+
54
+
55
+ /**
56
+ * Elements configurable by a model library.
57
+ */
58
+ export interface LibraryUiElement {
59
+ /**
60
+ * Name displayed on the main
61
+ * call-to-action button on the model page.
62
+ */
63
+ btnLabel: string;
64
+ /**
65
+ * Repo name
66
+ */
67
+ repoName: string;
68
+ /**
69
+ * URL to library's repo
70
+ */
71
+ repoUrl: string;
72
+ /**
73
+ * Code snippet displayed on model page
74
+ */
75
+ snippet: (model: ModelData) => string;
76
+ }
77
+
78
+ function nameWithoutNamespace(modelId: string): string {
79
+ const splitted = modelId.split("/");
80
+ return splitted.length === 1 ? splitted[0] : splitted[1];
81
+ }
82
+
83
+ //#region snippets
84
+
85
+ const adapter_transformers = (model: ModelData) =>
86
+ `from transformers import ${model.config?.adapter_transformers?.model_class}
87
+
88
+ model = ${model.config?.adapter_transformers?.model_class}.from_pretrained("${model.config?.adapter_transformers?.{model.id}}")
89
+ model.load_adapter("${model.id}", source="hf")`;
90
+
91
+ const allennlpUnknown = (model: ModelData) =>
92
+ `import allennlp_models
93
+ from allennlp.predictors.predictor import Predictor
94
+
95
+ predictor = Predictor.from_path("hf://${model.id}")`;
96
+
97
+ const allennlpQuestionAnswering = (model: ModelData) =>
98
+ `import allennlp_models
99
+ from allennlp.predictors.predictor import Predictor
100
+
101
+ predictor = Predictor.from_path("hf://${model.id}")
102
+ predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "question": "Where do I live?"}
103
+ predictions = predictor.predict_json(predictor_input)`;
104
+
105
+ const allennlp = (model: ModelData) => {
106
+ if (model.tags?.includes("question-answering")) {
107
+ return allennlpQuestionAnswering(model);
108
+ }
109
+ return allennlpUnknown(model);
110
+ };
111
+
112
+ const asteroid = (model: ModelData) =>
113
+ `from asteroid.models import BaseModel
114
+
115
+ model = BaseModel.from_pretrained("${model.id}")`;
116
+
117
+ const diffusers = (model: ModelData) =>
118
+ `from diffusers import DiffusionPipeline
119
+
120
+ pipeline = DiffusionPipeline.from_pretrained("${model.id}"${model.private ? ", use_auth_token=True" : ""})`;
121
+
122
+ const espnetTTS = (model: ModelData) =>
123
+ `from espnet2.bin.tts_inference import Text2Speech
124
+
125
+ model = Text2Speech.from_pretrained("${model.id}")
126
+
127
+ speech, *_ = model("text to generate speech from")`;
128
+
129
+ const espnetASR = (model: ModelData) =>
130
+ `from espnet2.bin.asr_inference import Speech2Text
131
+
132
+ model = Speech2Text.from_pretrained(
133
+ "${model.id}"
134
+ )
135
+
136
+ speech, rate = soundfile.read("speech.wav")
137
+ text, *_ = model(speech)`;
138
+
139
+ const espnetUnknown = () =>
140
+ `unknown model type (must be text-to-speech or automatic-speech-recognition)`;
141
+
142
+ const espnet = (model: ModelData) => {
143
+ if (model.tags?.includes("text-to-speech")) {
144
+ return espnetTTS(model);
145
+ } else if (model.tags?.includes("automatic-speech-recognition")) {
146
+ return espnetASR(model);
147
+ }
148
+ return espnetUnknown();
149
+ };
150
+
151
+ const fairseq = (model: ModelData) =>
152
+ `from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
153
+
154
+ models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
155
+ "${model.id}"
156
+ )`;
157
+
158
+
159
+ const flair = (model: ModelData) =>
160
+ `from flair.models import SequenceTagger
161
+
162
+ tagger = SequenceTagger.load("${model.id}")`;
163
+
164
+ const keras = (model: ModelData) =>
165
+ `from huggingface_hub import from_pretrained_keras
166
+
167
+ model = from_pretrained_keras("${model.id}")
168
+ `;
169
+
170
+ const pyannote_audio_pipeline = (model: ModelData) =>
171
+ `from pyannote.audio import Pipeline
172
+
173
+ pipeline = Pipeline.from_pretrained("${model.id}")
174
+
175
+ # inference on the whole file
176
+ pipeline("file.wav")
177
+
178
+ # inference on an excerpt
179
+ from pyannote.core import Segment
180
+ excerpt = Segment(start=2.0, end=5.0)
181
+
182
+ from pyannote.audio import Audio
183
+ waveform, sample_rate = Audio().crop("file.wav", excerpt)
184
+ pipeline({"waveform": waveform, "sample_rate": sample_rate})`;
185
+
186
+ const pyannote_audio_model = (model: ModelData) =>
187
+ `from pyannote.audio import Model, Inference
188
+
189
+ model = Model.from_pretrained("${model.id}")
190
+ inference = Inference(model)
191
+
192
+ # inference on the whole file
193
+ inference("file.wav")
194
+
195
+ # inference on an excerpt
196
+ from pyannote.core import Segment
197
+ excerpt = Segment(start=2.0, end=5.0)
198
+ inference.crop("file.wav", excerpt)`;
199
+
200
+ const pyannote_audio = (model: ModelData) => {
201
+ if (model.tags?.includes("pyannote-audio-pipeline")) {
202
+ return pyannote_audio_pipeline(model);
203
+ }
204
+ return pyannote_audio_model(model);
205
+ };
206
+
207
+ const tensorflowttsTextToMel = (model: ModelData) =>
208
+ `from tensorflow_tts.inference import AutoProcessor, TFAutoModel
209
+
210
+ processor = AutoProcessor.from_pretrained("${model.id}")
211
+ model = TFAutoModel.from_pretrained("${model.id}")
212
+ `;
213
+
214
+ const tensorflowttsMelToWav = (model: ModelData) =>
215
+ `from tensorflow_tts.inference import TFAutoModel
216
+
217
+ model = TFAutoModel.from_pretrained("${model.id}")
218
+ audios = model.inference(mels)
219
+ `;
220
+
221
+ const tensorflowttsUnknown = (model: ModelData) =>
222
+ `from tensorflow_tts.inference import TFAutoModel
223
+
224
+ model = TFAutoModel.from_pretrained("${model.id}")
225
+ `;
226
+
227
+ const tensorflowtts = (model: ModelData) => {
228
+ if (model.tags?.includes("text-to-mel")) {
229
+ return tensorflowttsTextToMel(model);
230
+ } else if (model.tags?.includes("mel-to-wav")) {
231
+ return tensorflowttsMelToWav(model);
232
+ }
233
+ return tensorflowttsUnknown(model);
234
+ };
235
+
236
+ const timm = (model: ModelData) =>
237
+ `import timm
238
+
239
+ model = timm.create_model("hf_hub:${model.id}", pretrained=True)`;
240
+
241
+ const sklearn = (model: ModelData) =>
242
+ `from huggingface_hub import hf_hub_download
243
+ import joblib
244
+
245
+ model = joblib.load(
246
+ hf_hub_download("${model.id}", "sklearn_model.joblib")
247
+ )`;
248
+
249
+ const fastai = (model: ModelData) =>
250
+ `from huggingface_hub import from_pretrained_fastai
251
+
252
+ learn = from_pretrained_fastai("${model.id}")`;
253
+
254
+ const sentenceTransformers = (model: ModelData) =>
255
+ `from sentence_transformers import SentenceTransformer
256
+
257
+ model = SentenceTransformer("${model.id}")`;
258
+
259
+ const spacy = (model: ModelData) =>
260
+ `!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl
261
+
262
+ # Using spacy.load().
263
+ import spacy
264
+ nlp = spacy.load("${nameWithoutNamespace(model.id)}")
265
+
266
+ # Importing as module.
267
+ import ${nameWithoutNamespace(model.id)}
268
+ nlp = ${nameWithoutNamespace(model.id)}.load()`;
269
+
270
+ const stanza = (model: ModelData) =>
271
+ `import stanza
272
+
273
+ stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}")
274
+ nlp = stanza.Pipeline("${nameWithoutNamespace(model.id).replace("stanza-", "")}")`;
275
+
276
+
277
+ const speechBrainMethod = (speechbrainInterface: string) => {
278
+ switch (speechbrainInterface) {
279
+ case "EncoderClassifier":
280
+ return "classify_file";
281
+ case "EncoderDecoderASR":
282
+ case "EncoderASR":
283
+ return "transcribe_file";
284
+ case "SpectralMaskEnhancement":
285
+ return "enhance_file";
286
+ case "SepformerSeparation":
287
+ return "separate_file";
288
+ default:
289
+ return undefined;
290
+ }
291
+ };
292
+
293
+ const speechbrain = (model: ModelData) => {
294
+ const speechbrainInterface = model.config?.speechbrain?.interface;
295
+ if (speechbrainInterface === undefined) {
296
+ return `# interface not specified in config.json`;
297
+ }
298
+
299
+ const speechbrainMethod = speechBrainMethod(speechbrainInterface);
300
+ if (speechbrainMethod === undefined) {
301
+ return `# interface in config.json invalid`;
302
+ }
303
+
304
+ return `from speechbrain.pretrained import ${speechbrainInterface}
305
+ model = ${speechbrainInterface}.from_hparams(
306
+ "${model.id}"
307
+ )
308
+ model.${speechbrainMethod}("file.wav")`;
309
+ };
310
+
311
+ const transformers = (model: ModelData) => {
312
+ const info = model.transformersInfo;
313
+ if (!info) {
314
+ return `# ⚠️ Type of model unknown`;
315
+ }
316
+ if (info.processor) {
317
+ const varName = info.processor === "AutoTokenizer" ? "tokenizer"
318
+ : info.processor === "AutoFeatureExtractor" ? "extractor"
319
+ : "processor"
320
+ ;
321
+ return [
322
+ `from transformers import ${info.processor}, ${info.auto_model}`,
323
+ "",
324
+ `${varName} = ${info.processor}.from_pretrained("${model.id}"${model.private ? ", use_auth_token=True" : ""})`,
325
+ "",
326
+ `model = ${info.auto_model}.from_pretrained("${model.id}"${model.private ? ", use_auth_token=True" : ""})`,
327
+ ].join("\n");
328
+ } else {
329
+ return [
330
+ `from transformers import ${info.auto_model}`,
331
+ "",
332
+ `model = ${info.auto_model}.from_pretrained("${model.id}"${model.private ? ", use_auth_token=True" : ""})`,
333
+ ].join("\n");
334
+ }
335
+ };
336
+
337
+ const fasttext = (model: ModelData) =>
338
+ `from huggingface_hub import hf_hub_download
339
+ import fasttext
340
+
341
+ model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))`;
342
+
343
+ const stableBaselines3 = (model: ModelData) =>
344
+ `from huggingface_sb3 import load_from_hub
345
+ checkpoint = load_from_hub(
346
+ repo_id="${model.id}",
347
+ filename="{MODEL FILENAME}.zip",
348
+ )`;
349
+
350
+ const nemoDomainResolver = (domain: string, model: ModelData): string | undefined => {
351
+ const modelName = `${nameWithoutNamespace(model.id)}.nemo`;
352
+
353
+ switch (domain) {
354
+ case "ASR":
355
+ return `import nemo.collections.asr as nemo_asr
356
+ asr_model = nemo_asr.models.ASRModel.from_pretrained("${model.id}")
357
+
358
+ transcriptions = asr_model.transcribe(["file.wav"])`;
359
+ default:
360
+ return undefined;
361
+ }
362
+ };
363
+
364
+ const mlAgents = (model: ModelData) =>
365
+ `mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./downloads"`;
366
+
367
+ const nemo = (model: ModelData) => {
368
+ let command: string | undefined = undefined;
369
+ // Resolve the tag to a nemo domain/sub-domain
370
+ if (model.tags?.includes("automatic-speech-recognition")) {
371
+ command = nemoDomainResolver("ASR", model);
372
+ }
373
+
374
+ return command ?? `# tag did not correspond to a valid NeMo domain.`;
375
+ };
376
+
377
+ //#endregion
378
+
379
+
380
+
381
+ export const MODEL_LIBRARIES_UI_ELEMENTS: { [key in keyof typeof ModelLibrary]?: LibraryUiElement } = {
382
+ // ^^ TODO(remove the optional ? marker when Stanza snippet is available)
383
+ "adapter-transformers": {
384
+ btnLabel: "Adapter Transformers",
385
+ repoName: "adapter-transformers",
386
+ repoUrl: "https://github.com/Adapter-Hub/adapter-transformers",
387
+ snippet: adapter_transformers,
388
+ },
389
+ "allennlp": {
390
+ btnLabel: "AllenNLP",
391
+ repoName: "AllenNLP",
392
+ repoUrl: "https://github.com/allenai/allennlp",
393
+ snippet: allennlp,
394
+ },
395
+ "asteroid": {
396
+ btnLabel: "Asteroid",
397
+ repoName: "Asteroid",
398
+ repoUrl: "https://github.com/asteroid-team/asteroid",
399
+ snippet: asteroid,
400
+ },
401
+ "diffusers": {
402
+ btnLabel: "Diffusers",
403
+ repoName: "πŸ€—/diffusers",
404
+ repoUrl: "https://github.com/huggingface/diffusers",
405
+ snippet: diffusers,
406
+ },
407
+ "espnet": {
408
+ btnLabel: "ESPnet",
409
+ repoName: "ESPnet",
410
+ repoUrl: "https://github.com/espnet/espnet",
411
+ snippet: espnet,
412
+ },
413
+ "fairseq": {
414
+ btnLabel: "Fairseq",
415
+ repoName: "fairseq",
416
+ repoUrl: "https://github.com/pytorch/fairseq",
417
+ snippet: fairseq,
418
+ },
419
+ "flair": {
420
+ btnLabel: "Flair",
421
+ repoName: "Flair",
422
+ repoUrl: "https://github.com/flairNLP/flair",
423
+ snippet: flair,
424
+ },
425
+ "keras": {
426
+ btnLabel: "Keras",
427
+ repoName: "Keras",
428
+ repoUrl: "https://github.com/keras-team/keras",
429
+ snippet: keras,
430
+ },
431
+ "nemo": {
432
+ btnLabel: "NeMo",
433
+ repoName: "NeMo",
434
+ repoUrl: "https://github.com/NVIDIA/NeMo",
435
+ snippet: nemo,
436
+ },
437
+ "pyannote-audio": {
438
+ btnLabel: "pyannote.audio",
439
+ repoName: "pyannote-audio",
440
+ repoUrl: "https://github.com/pyannote/pyannote-audio",
441
+ snippet: pyannote_audio,
442
+ },
443
+ "sentence-transformers": {
444
+ btnLabel: "sentence-transformers",
445
+ repoName: "sentence-transformers",
446
+ repoUrl: "https://github.com/UKPLab/sentence-transformers",
447
+ snippet: sentenceTransformers,
448
+ },
449
+ "sklearn": {
450
+ btnLabel: "Scikit-learn",
451
+ repoName: "Scikit-learn",
452
+ repoUrl: "https://github.com/scikit-learn/scikit-learn",
453
+ snippet: sklearn,
454
+ },
455
+ "fastai": {
456
+ btnLabel: "fastai",
457
+ repoName: "fastai",
458
+ repoUrl: "https://github.com/fastai/fastai",
459
+ snippet: fastai,
460
+ },
461
+ "spacy": {
462
+ btnLabel: "spaCy",
463
+ repoName: "spaCy",
464
+ repoUrl: "https://github.com/explosion/spaCy",
465
+ snippet: spacy,
466
+ },
467
+ "speechbrain": {
468
+ btnLabel: "speechbrain",
469
+ repoName: "speechbrain",
470
+ repoUrl: "https://github.com/speechbrain/speechbrain",
471
+ snippet: speechbrain,
472
+ },
473
+ "stanza": {
474
+ btnLabel: "Stanza",
475
+ repoName: "stanza",
476
+ repoUrl: "https://github.com/stanfordnlp/stanza",
477
+ snippet: stanza,
478
+ },
479
+ "tensorflowtts": {
480
+ btnLabel: "TensorFlowTTS",
481
+ repoName: "TensorFlowTTS",
482
+ repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS",
483
+ snippet: tensorflowtts,
484
+ },
485
+ "timm": {
486
+ btnLabel: "timm",
487
+ repoName: "pytorch-image-models",
488
+ repoUrl: "https://github.com/rwightman/pytorch-image-models",
489
+ snippet: timm,
490
+ },
491
+ "transformers": {
492
+ btnLabel: "Transformers",
493
+ repoName: "πŸ€—/transformers",
494
+ repoUrl: "https://github.com/huggingface/transformers",
495
+ snippet: transformers,
496
+ },
497
+ "fasttext": {
498
+ btnLabel: "fastText",
499
+ repoName: "fastText",
500
+ repoUrl: "https://fasttext.cc/",
501
+ snippet: fasttext,
502
+ },
503
+ "stable-baselines3": {
504
+ btnLabel: "stable-baselines3",
505
+ repoName: "stable-baselines3",
506
+ repoUrl: "https://github.com/huggingface/huggingface_sb3",
507
+ snippet: stableBaselines3,
508
+ },
509
+ "ml-agents": {
510
+ btnLabel: "ml-agents",
511
+ repoName: "ml-agents",
512
+ repoUrl: "https://github.com/huggingface/ml-agents",
513
+ snippet: mlAgents,
514
+ },
515
+ } as const;
516
+ """
517
+
518
+
519
+ if __name__ == '__main__':
520
+ import sys
521
+ library_name = "keras"
522
+ model_name = "Distillgpt2"
523
+ print(read_file(library_name, model_name))
524
+
525
+ """"
526
+ try:
527
+ args = sys.argv[1:]
528
+ if args:
529
+ print(read_file(args[0], args[1]))
530
+ except IndexError:
531
+ pass
532
+ """
language_model_template1.md ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ {{card_data}}
3
+ ---
4
+
5
+ {% set lm_task_entries = {
6
+ 'text-generation': {
7
+ 'direct_use': "The model can be used for text generation.",
8
+ 'downstream_use': "To learn more about this task and potential downstream uses, see the Hugging Face [text generation docs](https://huggingface.co/tasks/text-generation)",
9
+ 'misuse': "The model was not trained to be factual or true representations of people or events, and therefore using the models to generate such content is out-of-scope for the abilities of this model."
10
+ },
11
+ 'question-answering': {
12
+ 'direct_use': "The model can be used for question answering.",
13
+ 'downstream_use': "Potential types of question answering include extractive QA, open generative QA, and closed generative QA. To learn more about this task and potential downstream uses, see the Hugging Face [question answering docs](https://huggingface.co/tasks/question-answering)",
14
+ 'misuse': "The model was not trained to be factual or true representations of people or events, and therefore using the models to generate such content is out-of-scope for the abilities of this model."
15
+ },
16
+ 'fill-mask': {
17
+ 'direct_use': "The model can be used for masked language modeling.",
18
+ 'downstream_use': "Masked language modeling are sometimes used to train large models for domain-specific problems. To learn more about this task and potential downstream uses, see the Hugging Face [fill mask docs](https://huggingface.co/tasks/fill-mask)",
19
+ 'misuse': "The model was not trained to be factual or true representations of people or events, and therefore using the models to generate such content is out-of-scope for the abilities of this model."
20
+ },
21
+ 'sentence_similarity': {
22
+ 'direct_use': "The model can be used for sentence similarity, the task of determining how similar two texts are.",
23
+ 'downstream_use': "Potential downstream use cases may include information retreival and clustering or grouping. To learn more about sentence similarity and potential downstream uses, see the Hugging Face [sentence similarity docs](https://huggingface.co/tasks/sentence-similarity)",
24
+ 'misuse': ""
25
+ },
26
+ 'summarization': {
27
+ 'direct_use': "The model can be used for summarization.",
28
+ 'downstream_use': "To learn more about summarization and potential downstream uses, see the Hugging Face [summarization docs](https://huggingface.co/tasks/summarization).",
29
+ 'misuse': "The model was not trained to be factual or true representations of people or events, and therefore using the models to generate such content is out-of-scope for the abilities of this model."
30
+ },
31
+ 'text_classification': {
32
+ 'direct_use': "The model can be used for text classification, the task of assigning a label or class to a given text.",
33
+ 'downstream_use': "Potential downstream use cases include sentiment analysis, natural language inference, and assessing grammatical correctness. To learn more about text classification and other potential downstream uses, see the Hugging Face [text classification docs](https://huggingface.co/tasks/text-classification).",
34
+ 'misuse': ""
35
+ },
36
+ 'token_classification': {
37
+ 'direct_use': "The model can be used for token classification, a natural language understanding task in which a label is assigned to some tokens in a text.",
38
+ 'downstream_use': "Potential downstream use cases include Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. To learn more about token classification and other potential downstream use cases, see the Hugging Face [token classification docs](https://huggingface.co/tasks/token-classification).",
39
+ 'misuse': ""
40
+ },
41
+ 'translation': {
42
+ 'direct_use': "The model can be used for translation, the task of converting text from one language to another.",
43
+ 'downstream_use': "Potential downstream use cases include use cases that leverage conversational agents across different languages. To learn more about translation and other potential downstream use cases, see the Hugging Face [translation docs](https://huggingface.co/tasks/translation).",
44
+ 'misuse': ""
45
+ },
46
+ } %}
47
+
48
+ {% set task_list = [
49
+ 'text_generation',
50
+ 'question_answering',
51
+ 'fill_mask',
52
+ 'sentence_similarity',
53
+ 'summarization',
54
+ 'text_classification',
55
+ 'token_classification',
56
+ 'translation'
57
+ ] %}
58
+
59
+
60
+ # Model Card for {{ model_id }}
61
+
62
+ <!-- Provide a quick summary of what the model is/does. [Optional] -->
63
+ {{ the_model_description }}
64
+
65
+ {% if model_card_user == "policymaker" %}
66
+ <details>
67
+ <summary> Click to expand policymaker version of model card </summary>
68
+
69
+ # Table of Contents
70
+
71
+ 1. [Model Details](#model-details)
72
+ 2. [Uses](#uses)
73
+ 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
74
+ 4. [Model Examination](#model-examination)
75
+ 5. [Environmental Impact](#environmental-impact)
76
+ 6. [Citation](#citation)
77
+ 7. [Glossary](#glossary-optional)
78
+ 8. [More Information](#more-information-optional)
79
+ 9. [Model Card Authors](#model-card-authors-optional)
80
+ 10. [Model Card Contact](#model-card-contact)
81
+
82
+ </details>
83
+
84
+ {% endif %}
85
+
86
+
87
+ # Table of Contents
88
+
89
+ - [Model Card for {{ model_id }}](#model-card-for--model_id-)
90
+ - [Table of Contents](#table-of-contents)
91
+ - [Table of Contents](#table-of-contents-1)
92
+ - [Model Details](#model-details)
93
+ - [Model Description](#model-description)
94
+ - [Uses](#uses)
95
+ - [Direct Use](#direct-use)
96
+ - [Downstream Use [Optional]](#downstream-use-optional)
97
+ - [Out-of-Scope Use](#out-of-scope-use)
98
+ - [Bias, Risks, and Limitations](#bias-risks-and-limitations)
99
+ - [Recommendations](#recommendations)
100
+ - [Training Details](#training-details)
101
+ - [Training Data](#training-data)
102
+ - [Training Procedure](#training-procedure)
103
+ - [Preprocessing](#preprocessing)
104
+ - [Speeds, Sizes, Times](#speeds-sizes-times)
105
+ - [Evaluation](#evaluation)
106
+ - [Testing Data, Factors & Metrics](#testing-data-factors--metrics)
107
+ - [Testing Data](#testing-data)
108
+ - [Factors](#factors)
109
+ - [Metrics](#metrics)
110
+ - [Results](#results)
111
+ - [Model Examination](#model-examination)
112
+ - [Environmental Impact](#environmental-impact)
113
+ - [Technical Specifications [optional]](#technical-specifications-optional)
114
+ - [Model Architecture and Objective](#model-architecture-and-objective)
115
+ - [Compute Infrastructure](#compute-infrastructure)
116
+ - [Hardware](#hardware)
117
+ - [Software](#software)
118
+ - [Citation](#citation)
119
+ - [Glossary [optional]](#glossary-optional)
120
+ - [More Information [optional]](#more-information-optional)
121
+ - [Model Card Authors [optional]](#model-card-authors-optional)
122
+ - [Model Card Contact](#model-card-contact)
123
+ - [How to Get Started with the Model](#how-to-get-started-with-the-model)
124
+
125
+
126
+ # Model Details
127
+
128
+ ## Model Description
129
+
130
+ <!-- Provide a longer summary of what this model is/does. -->
131
+ {{ the_model_description }}
132
+
133
+ - **Developed by:** {{ developers | join(', ') | default("More information needed", true)}}
134
+ - **Shared by [Optional]:** {{ shared_by | join(', ') | default("More information needed", true)}}
135
+ - **Model type:** {{ model_type | default("Language model", true)}}
136
+ - **Language(s) (NLP):** {{ language | join(', ') | default("More information needed", true)}}
137
+ - **License:** {{ license | default("More information needed", true)}}
138
+ - **Related Models:** {{ related_models | join(', ') | default("More information needed", true)}}
139
+ - **Parent Model:** {{ parent_model | default("More information needed", true)}}
140
+ - **Resources for more information:** {{ more_resources | default("More information needed", true)}}
141
+ {{ " - [GitHub Repo]({0})".format(repo_link) if repo_link }}
142
+ {{ " - [Associated Paper]({0})".format(paper_link) if paper_link }}
143
+ {{ " - [Blog Post]({0})".format(blog_link) if blog_link }}
144
+
145
+ # Uses
146
+
147
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
148
+
149
+ ## Direct Use
150
+
151
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
152
+ <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say "more info needed." -->
153
+ {% if direct_use is defined %}
154
+ {{ direct_use }}
155
+ {% elif model_task in task_list %}
156
+ {{ lm_task_entries[model_task]['direct_use'] }}
157
+ {% else %}
158
+ More information needed.
159
+ {% endif %}
160
+
161
+ ## Downstream Use [Optional]
162
+
163
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
164
+ <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say "more info needed." -->
165
+ {% if downstream_use is defined %}
166
+ {{ downstream_use }}
167
+ {% elif model_task in task_list %}
168
+ {{ lm_task_entries[model_task]['downstream_use'] }}
169
+ {% else %}
170
+ More information needed.
171
+ {% endif %}
172
+
173
+ ## Out-of-Scope Use
174
+
175
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
176
+ <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say "more info needed." -->
177
+ {% if out_of_scope_use is defined %}
178
+ {{ out_of_scope_use }}
179
+ {% elif model_task in task_list %}
180
+ The model should not be used to intentionally create hostile or alienating environments for people. {{ lm_task_entries[model_task]['misuse'] }}
181
+ {% else %}
182
+ More information needed.
183
+ {% endif %}
184
+
185
+ # Bias, Risks, and Limitations
186
+
187
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
188
+ {% if bias_risks_limiations is defined %}
189
+ {{ bias_risks_limitations }}
190
+ {% else %}
191
+ Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). Predictions generated by the model may include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups.
192
+ {% endif %}
193
+
194
+ ## Recommendations
195
+
196
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
197
+
198
+ {% if bias_recommendations is defined %}
199
+ {{ bias_recommendations }}
200
+ {% else %}
201
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recomendations.
202
+ {% endif %}
203
+
204
+ # Training Details
205
+
206
+ ## Training Data
207
+
208
+ <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
209
+
210
+ {{ training_data | default("More information on training data needed", true)}}
211
+ {{ "See the associated [dataset card]({0}) for further details.".format(training_datacard_link) if training_data_card_link }}
212
+
213
+ ## Training Procedure
214
+
215
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
216
+
217
+ ### Preprocessing
218
+
219
+ {{ preprocessing | default("More information needed", true)}}
220
+
221
+ ### Speeds, Sizes, Times
222
+
223
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
224
+
225
+ {{ speeds_sizes_times | default("More information needed", true)}}
226
+
227
+ # Evaluation
228
+
229
+ <!-- This section describes the evaluation protocols and provides the results. -->
230
+
231
+ ## Testing Data, Factors & Metrics
232
+
233
+ ### Testing Data
234
+
235
+ <!-- This should link to a Data Card if possible. -->
236
+
237
+ {{ testing_data | default("More information needed", true)}}
238
+ {{ "See the associated [dataset card]({0}) for further details.".format(testing_datacard_link) if testing_data_card_link }}
239
+
240
+ ### Factors
241
+
242
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
243
+
244
+ {{ testing_factors | default("More information needed", true)}}
245
+
246
+ ### Metrics
247
+
248
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
249
+
250
+ {{ testing_metrics | default("More information needed", true)}}
251
+
252
+ ## Results
253
+
254
+ {{ results | default("More information needed", true)}}
255
+
256
+ # Model Examination
257
+
258
+ {{ model_examination | default("More information needed", true)}}
259
+
260
+ # Environmental Impact
261
+
262
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
263
+
264
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
265
+
266
+ - **Hardware Type:** {{ hardware | default("More information needed", true)}}
267
+ - **Hours used:** {{ hours_used | default("More information needed", true)}}
268
+ - **Cloud Provider:** {{ cloud_provider | default("More information needed", true)}}
269
+ - **Compute Region:** {{ cloud_region | default("More information needed", true)}}
270
+ - **Carbon Emitted:** {{ co2_emitted | default("More information needed", true)}}
271
+
272
+ # Technical Specifications [optional]
273
+
274
+ ## Model Architecture and Objective
275
+
276
+ {{ model_specs | default("More information needed", true)}}
277
+
278
+ ## Compute Infrastructure
279
+
280
+ {{ compute_infrastructure | default("More information needed", true)}}
281
+
282
+ ### Hardware
283
+
284
+ {{ hardware | default("More information needed", true)}}
285
+
286
+ ### Software
287
+
288
+ {{ software | default("More information needed", true)}}
289
+
290
+ # Citation
291
+
292
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
293
+
294
+ **BibTeX:**
295
+
296
+ {{ citation_bibtex | default("More information needed", true)}}
297
+
298
+ **APA:**
299
+
300
+ {{ citation_apa | default("More information needed", true)}}
301
+
302
+ # Glossary [optional]
303
+
304
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
305
+
306
+ {{ glossary | default("More information needed", true)}}
307
+
308
+ # More Information [optional]
309
+
310
+ {{ more_information | default("More information needed", true)}}
311
+
312
+ # Model Card Authors [optional]
313
+
314
+ <!-- This section provides another layer of transparency and accountability. Whose views is this model card representing? How many voices were included in its construction? Etc. -->
315
+
316
+ {{ model_card_authors | join(', ') | default("More information needed", true)}}
317
+
318
+ # Model Card Contact
319
+
320
+ {{ model_card_contact | join(', ') | default("More information needed", true)}}
321
+
322
+ # How to Get Started with the Model
323
+
324
+ Use the code below to get started with the model.
325
+
326
+ <details>
327
+ <summary> Click to expand </summary>
328
+
329
+ {{ get_started_code | default("More information needed", true)}}
330
+
331
+ </details>
lets_combine.md ADDED
@@ -0,0 +1 @@
 
 
1
+ '<details> <summary> Click to expand </summary>\n\n# Model Details\n## Model Description\n<!--> Provide a longer summary of what this model is. <!-->\n- **Developed by:** {{ developers | default("More information needed", true)}}- **Shared by [Optional]:** {{ shared_by | default("More information needed", true)}}- **Model type:** Language model- **Language(s) (NLP):** {{ language | default("More information needed", true)}}- **License:** {{ license | default("More information needed", true)}}- **Related Models:** {{ related_models | default("More information needed", true)}} - **Parent Model:** {{ parent_model | default("More information needed", true)}}- **Resources for more information:** {{ more_resources | default("More information needed", true)}} </details>\n# Uses\n<!--> Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. <!-->\n## Direct Use\n<!--> This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. <!-->\n## Downstream Use [Optional]\n<!--> This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app <!-->\n## Out-of-Scope Use\n<!--> This section addresses misuse, malicious use, and uses that the model will not work well for. <!--> '
markdownTagExtract.cpython-39.pyc ADDED
Binary file (1.75 kB). View file
 
markdownTagExtract.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #from lib import tag_checker
2
+ import glob
3
+ import fileinput
4
+ import os
5
+
6
+ def tag_checker(file,start_header,end_header):
7
+ markdown_fp = open(file, "r")
8
+
9
+ # Needed for later
10
+ idea_list = []
11
+ idea_counter = 0
12
+
13
+ start_t = start_header
14
+ end_t = end_header
15
+
16
+ inside_tag = False
17
+ for line in markdown_fp:
18
+ start_tag = start_t in line
19
+ end_tag = end_t in line
20
+ outside_tag = not inside_tag
21
+
22
+ if start_tag and outside_tag:
23
+ # Start tag
24
+ tag_start_index = line.index(start_t) + len(end_t)
25
+ line = line[tag_start_index:]
26
+
27
+ # This is where we'll store the idea
28
+ idea_list.append("")
29
+
30
+ inside_tag = True
31
+
32
+ if end_tag and inside_tag:
33
+ # End tag
34
+ end_tag_index = line.index(end_t)
35
+
36
+ line = line[:end_tag_index]
37
+
38
+ idea_list[idea_counter] += line
39
+ idea_counter += 1
40
+ inside_tag = False
41
+
42
+ if inside_tag:
43
+ # Extract
44
+ idea_list[idea_counter] += line
45
+ markdown_fp.close()
46
+ return idea_list
47
+
48
+ def listToString(s):
49
+
50
+ # initialize an empty string
51
+ str1 = ""
52
+
53
+ # traverse in the string
54
+ for ele in s:
55
+ str1 += ele
56
+
57
+ # return string
58
+ return str1
59
+
60
+
61
+ def to_markdown(new_file, text_list):
62
+ new_file_name = open(new_file, "w")
63
+
64
+ #new_file_name.write("# Collection of ideas\n")
65
+
66
+ for i, idea in enumerate(text_list):
67
+ new_file_name.write(idea + "\n")
68
+
69
+ new_file_name.close()
70
+
71
+ def combine_markdowns(document1, original_document):
72
+ pat = document1
73
+ with open(original_document, 'w') as fout:
74
+ for line in sorted(fileinput.input(glob.glob(pat))):
75
+ fout.write(line)
76
+ return original_document
77
+
78
+ if __name__ == "__main__":
79
+ file = "template.md"
80
+ header_1_start = '<how_to_start>'
81
+ header_1_end = '</how_to_start>'
82
+
83
+ header_2_start = '<how_to_start>'
84
+ header_2_end = '</how_to_start>'
85
+
86
+
87
+ how_to_start = (tag_checker(file,header_2_start,header_2_end))
88
+
89
+ intended_use_limits = (tag_checker(file,header_2_start,header_2_end))
90
+ string_s = listToString(how_to_start)
91
+ print(string_s)
92
+ combine_markdowns = how_to_start + intended_use_limits
93
+
94
+
95
+ #to_markdown ('combined.md',combine_markdowns)
96
+
97
+
98
+
99
+
middleMan.cpython-39.pyc ADDED
Binary file (2.37 kB). View file
 
middleMan.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ #from pages.viewCardProgress import get_card
4
+ from modelcards import CardData, ModelCard
5
+ from markdownTagExtract import tag_checker,listToString,to_markdown
6
+ #from specific_extraction import extract_it
7
+ from modelcards import CardData, ModelCard
8
+ from jinja2 import Environment, FileSystemLoader
9
+
10
+
11
+ def is_float(value):
12
+ try:
13
+ float(value)
14
+ return True
15
+ except:
16
+ return False
17
+
18
+ ## Handles parsing jinja variable templates
19
+ def parse_into_jinja_markdown():
20
+ env = Environment(loader=FileSystemLoader('.'), autoescape=True)
21
+ temp = env.get_template(st.session_state.markdown_upload)
22
+
23
+ return (temp.render(model_id = st.session_state["model_name"],
24
+ the_model_description = st.session_state["model_description"],developers=st.session_state["Model_developers"],shared_by = st.session_state["shared_by"],license = st.session_state['license'],
25
+ direct_use = st.session_state["Direct_Use"], downstream_use = st.session_state["Downstream_Use"],out_of_scope_use = st.session_state["Out-of-Scope_Use"],
26
+ bias_risks_limitations = st.session_state["Model_Limits_n_Risks"], bias_recommendations = st.session_state['Recommendations'],
27
+ model_examination = st.session_state['Model_examin'],
28
+ hardware= st.session_state['Model_hardware'], hours_used = st.session_state['hours_used'], cloud_provider = st.session_state['Model_cloud_provider'], cloud_region = st.session_state['Model_cloud_region'], co2_emitted = st.session_state['Model_c02_emitted'],
29
+ citation_bibtex= st.session_state["APA_citation"], citation_apa = st.session_state['bibtex_citation'],
30
+ training_data = st.session_state['training_data'], preprocessing =st.session_state['preprocessing'], speeds_sizes_times = st.session_state['Speeds_Sizes_Times'],
31
+ model_specs = st.session_state['Model_specs'], compute_infrastructure = st.session_state['compute_infrastructure'],software = st.session_state['technical_specs_software'],
32
+ glossary = st.session_state['Glossary'],
33
+ more_information = st.session_state['More_info'],
34
+ model_card_authors = st.session_state['the_authors'],
35
+ model_card_contact = st.session_state['Model_card_contact'],
36
+ get_started_code =st.session_state["Model_how_to"]
37
+ ))
38
+
39
+
40
+
41
+ ################################################################
42
+ ################################################################
43
+ ################################################################
44
+ ################## Below CURRENTLY Deprecated ##################
45
+ ################################################################
46
+ ################################################################
47
+ ################################################################
48
+
49
+ def get_card():
50
+ languages=st.session_state.languages or None
51
+ license=st.session_state.license or None
52
+ library_name = st.session_state.library_name or None
53
+ tags= [x.strip() for x in st.session_state.tags.split(',') if x.strip()]
54
+ tags.append("autogenerated-modelcard")
55
+ datasets= [x.strip() for x in st.session_state.datasets.split(',') if x.strip()] or None
56
+ metrics=st.session_state.metrics or None
57
+ model_name = st.session_state.model_name or None
58
+ model_description = st.session_state.model_description or None
59
+ authors = st.session_state.Model_card_authors or None
60
+ paper_url = st.session_state.paper_url or None
61
+ github_url = st.session_state.github_url or None
62
+ bibtex_citations = st.session_state.bibtex_citations or None
63
+ emissions = float(st.session_state.Model_c02_emitted) if is_float(st.session_state.Model_c02_emitted) else None # BUG
64
+
65
+ # Handle any warnings...
66
+ do_warn = False
67
+ warning_msg = "Warning: The following fields are required but have not been filled in: "
68
+ if not languages:
69
+ warning_msg += "\n- Languages"
70
+ do_warn = True
71
+ if not license:
72
+ warning_msg += "\n- License"
73
+ do_warn = True
74
+ if do_warn:
75
+ st.error(warning_msg)
76
+ st.stop()
77
+
78
+ # Generate and display card
79
+ card_data = CardData(
80
+ language=languages,
81
+ license=license,
82
+ library_name=library_name,
83
+ tags=tags,
84
+ datasets=datasets,
85
+ metrics=metrics,
86
+ )
87
+ if emissions:
88
+ card_data.co2_eq_emissions = {'emissions': emissions}
89
+
90
+ card = ModelCard.from_template(
91
+ card_data,
92
+ template_path='template.md',
93
+ model_id=model_name,
94
+ # Template kwargs:
95
+ model_description=model_description,
96
+ license=license,
97
+ authors=authors,
98
+ paper_url=paper_url,
99
+ github_url=github_url,
100
+ bibtex_citations=bibtex_citations,
101
+ emissions=emissions
102
+ )
103
+ return card
104
+
105
+
106
+ def apply_view(page_state, not_code_pull,text_passed):
107
+ not_important_section = True
108
+ if st.session_state.legal_view == True:
109
+ #user_view = 'legal_view'
110
+ user_view_collapse={'Model_details_text','Model_uses','Model_Eval','Model_carbon','Model_cite', 'Glossary','Model_card_authors'}
111
+
112
+ elif st.session_state.researcher_view == True:
113
+ #user_view = 'researcher_view'
114
+ user_view_collapse={'Model_details_text','Model_how_to','Model_training','Model_Limits_n_Risks', 'Glossary', 'Model_card_contact', 'Citation'}
115
+
116
+ else:
117
+ #user_view = 'beginner_technical_view'
118
+ user_view_collapse={'Model_details_text','Model_how_to','Model_Eval','Model_uses', 'Glossary'} # Add Techical Spec
119
+
120
+
121
+ for value in user_view_collapse:
122
+ if value == page_state:
123
+ not_important_section = False
124
+
125
+ if not_important_section == True: #and st.session_state[user_view]:
126
+ #st.markdown("here")
127
+ text_return = out_text_out(not_code_pull,page_state,text_passed)
128
+ out_text = "<details> <summary> Click to expand </summary>" +text_return + "</details>"
129
+ return (out_text)
130
+
131
+ #out_text = "<details>" + out_text + "</details>"
132
+ else:
133
+ text_return = out_text_out(not_code_pull,page_state,text_passed)
134
+ out_text = text_return
135
+ return (out_text)
136
+
137
+ def out_text_out(not_code_pull,page_state,out_text):
138
+ if not_code_pull == True:
139
+ out_text = extract_it(page_state)
140
+ return(out_text)
141
+ else:
142
+ out_text = out_text
143
+ return(out_text)
144
+
145
+ def writingPrompt(page_state, help_text, out_text):
146
+ #st.session_state.check_box = False
147
+ #extracted_how_to= tag_checker(markdown,start_tag,end_tag)
148
+
149
+
150
+
151
+ #see_suggestion = column.checkbox("See Writing Prompt")
152
+
153
+ st.session_state.check_box = True
154
+ variable_output_prompt = st.text_area("Enter some text",height = 500, value =out_text, key=persist(out_text),
155
+ help=help_text)
156
+ st.session_state.page_state = persist(variable_output_prompt)
157
+ #out_text = extract_it(page_state)
158
+
159
+
160
+ #else:
161
+ #st.session_state.check_box = True
162
+ ##st.session_state.check_box = False
163
+ #variable_output_prompt = st.text_area("Enter Text",value = ' ',key=persist(page_state),height = 500,help =help_text)
164
+
165
+ return variable_output_prompt
166
+
167
+
168
+
169
+ def extract_section(current_template, start_tag, end_tag):
170
+ current_Card_markdown= current_template
171
+
172
+ extracted_how_to= tag_checker(current_Card_markdown,start_tag,end_tag)
173
+ out_text = ' '.join(extracted_how_to)
174
+ return out_text
175
+
176
+ def main():
177
+ #card.save('current_card.md')
178
+ return
modelcard_template_new_spec.md ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ {{card_data}}
3
+ ---
4
+
5
+ # {{ model_id }}
6
+
7
+ <!--> Provide a quick summary of what the model is/does. <!-->
8
+
9
+ # Table of Contents
10
+
11
+ - [{{ model_id }}](#-model_id-)
12
+ - [Table of Contents](#table-of-contents)
13
+ - [Model Details](#model-details)
14
+ - [Model Description](#model-description)
15
+ - [Uses](#uses)
16
+ - [Direct Use](#direct-use)
17
+ - [Downstream Use [Optional]](#downstream-use-optional)
18
+ - [Out-of-Scope Use](#out-of-scope-use)
19
+ - [Bias, Risks, and Limitations](#bias-risks-and-limitations)
20
+ - [Recommendations](#recommendations)
21
+ - [Training Details](#training-details)
22
+ - [Training Data](#training-data)
23
+ - [Training Procedure](#training-procedure)
24
+ - [Preprocessing](#preprocessing)
25
+ - [Speeds, Sizes, Times](#speeds-sizes-times)
26
+ - [Evaluation](#evaluation)
27
+ - [Testing Data, Factors & Metrics](#testing-data-factors--metrics)
28
+ - [Testing Data](#testing-data)
29
+ - [Factors](#factors)
30
+ - [Metrics](#metrics)
31
+ - [Results](#results)
32
+ - [Model Examination](#model-examination)
33
+ - [Environmental Impact](#environmental-impact)
34
+ - [Technical Specifications [optional]](#technical-specifications-optional)
35
+ - [Model Architecture and Objective](#model-architecture-and-objective)
36
+ - [Compute Infrastructure](#compute-infrastructure)
37
+ - [Hardware](#hardware)
38
+ - [Software](#software)
39
+ - [Citation](#citation)
40
+ - [Glossary [optional]](#glossary-optional)
41
+ - [More Information [optional]](#more-information-optional)
42
+ - [Model Card Authors [optional]](#model-card-authors-optional)
43
+ - [Model Card Contact](#model-card-contact)
44
+ - [How to Get Started with the Model](#how-to-get-started-with-the-model)
45
+
46
+
47
+ # Model Details
48
+
49
+ ## Model Description
50
+
51
+ <!--> Provide a longer summary of what this model is. <!-->
52
+ {{ the_model_description | default("More information needed", true)}}
53
+
54
+ - **Developed by:** {{ developers | default("More information needed", true)}}
55
+ - **Shared by [Optional]:** {{ shared_by | default("More information needed", true)}}
56
+ - **Model type:** Language model
57
+ - **Language(s) (NLP):** {{ language | default("More information needed", true)}}
58
+ - **License:** {{ license | default("More information needed", true)}}
59
+ - **Related Models:** {{ related_models | default("More information needed", true)}}
60
+ - **Parent Model:** {{ parent_model | default("More information needed", true)}}
61
+ - **Resources for more information:** {{ more_resources | default("More information needed", true)}}
62
+
63
+ # Uses
64
+
65
+ <!--> Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. <!-->
66
+
67
+ ## Direct Use
68
+
69
+ <!--> This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. <!-->
70
+
71
+ {{ direct_use | default("More information needed", true)}}
72
+
73
+ ## Downstream Use [Optional]
74
+
75
+ <!--> This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app <!-->
76
+
77
+ {{ downstream_use | default("More information needed", true)}}
78
+
79
+ ## Out-of-Scope Use
80
+
81
+ <!--> This section addresses misuse, malicious use, and uses that the model will not work well for. <!-->
82
+
83
+ {{ out_of_scope_use | default("More information needed", true)}}
84
+
85
+ # Bias, Risks, and Limitations
86
+
87
+ <!--> This section is meant to convey both technical and sociotechnical limitations. <!-->
88
+
89
+ {{ bias_risks_limitations | default("More information needed", true)}}
90
+
91
+ ## Recommendations
92
+
93
+ <!--> This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. <!-->
94
+
95
+ {{ bias_recommendations | default("Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recomendations.", true)}}
96
+
97
+ # Training Details
98
+
99
+ ## Training Data
100
+
101
+ <!--> This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. <!-->
102
+
103
+ {{ training_data | default("More information needed", true)}}
104
+
105
+ ## Training Procedure
106
+
107
+ <!--> This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. <!-->
108
+
109
+ ### Preprocessing
110
+
111
+ {{ preprocessing | default("More information needed", true)}}
112
+
113
+ ### Speeds, Sizes, Times
114
+
115
+ <!--> This section provides information about throughput, start/end time, checkpoint size if relevant, etc. <!-->
116
+
117
+ {{ speeds_sizes_times | default("More information needed", true)}}
118
+
119
+ # Evaluation
120
+
121
+ <!--> This section describes the evaluation protocols and provides the results. <!-->
122
+
123
+ ## Testing Data, Factors & Metrics
124
+
125
+ ### Testing Data
126
+
127
+ <!--> This should link to a Data Card if possible. <!-->
128
+
129
+ {{ testing_data | default("More information needed", true)}}
130
+
131
+ ### Factors
132
+
133
+ <!--> These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. <!-->
134
+
135
+ {{ testing_factors | default("More information needed", true)}}
136
+
137
+ ### Metrics
138
+
139
+ <!--> These are the evaluation metrics being used, ideally with a description of why. <!-->
140
+
141
+ {{ testing_metrics | default("More information needed", true)}}
142
+
143
+ ## Results
144
+
145
+ {{ results | default("More information needed", true)}}
146
+
147
+ # Model Examination
148
+
149
+ {{ model_examination | default("More information needed", true)}}
150
+
151
+ # Environmental Impact
152
+
153
+ <!--> Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly <!-->
154
+
155
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
156
+
157
+ - **Hardware Type:** {{ hardware | default("More information needed", true)}}
158
+ - **Hours used:** {{ hours_used | default("More information needed", true)}}
159
+ - **Cloud Provider:** {{ cloud_provider | default("More information needed", true)}}
160
+ - **Compute Region:** {{ cloud_region | default("More information needed", true)}}
161
+ - **Carbon Emitted:** {{ co2_emitted | default("More information needed", true)}}
162
+
163
+ # Technical Specifications [optional]
164
+
165
+ ## Model Architecture and Objective
166
+
167
+ {{ model_specs | default("More information needed", true)}}
168
+
169
+ ## Compute Infrastructure
170
+
171
+ {{ compute_infrastructure | default("More information needed", true)}}
172
+
173
+ ### Hardware
174
+
175
+ {{ hardware | default("More information needed", true)}}
176
+
177
+ ### Software
178
+
179
+ {{ software | default("More information needed", true)}}
180
+
181
+ # Citation
182
+
183
+ <!--> If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. <!-->
184
+
185
+ **BibTeX:**
186
+
187
+ {{ citation_bibtex | default("More information needed", true)}}
188
+
189
+ **APA:**
190
+
191
+ {{ citation_apa | default("More information needed", true)}}
192
+
193
+ # Glossary [optional]
194
+
195
+ <!--> If relevant, include terms and calculations in this section that can help readers understand the model or model card. <!-->
196
+
197
+ {{ glossary | default("More information needed", true)}}
198
+
199
+ # More Information [optional]
200
+
201
+ {{ more_information | default("More information needed", true)}}
202
+
203
+ # Model Card Authors [optional]
204
+
205
+ {{ model_card_authors | default("More information needed", true)}}
206
+
207
+ # Model Card Contact
208
+
209
+ {{ model_card_contact | default("More information needed", true)}}
210
+
211
+ # How to Get Started with the Model
212
+
213
+ Use the code below to get started with the model.
214
+
215
+ <details>
216
+ <summary> Click to expand </summary>
217
+
218
+ {{ get_started_code | default("More information needed", true)}}
219
+
220
+ </details>
221
+
222
+
out_markd.md ADDED
@@ -0,0 +1,1042 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+
3
+
4
+
5
+
6
+
7
+ #
8
+
9
+ M
10
+ o
11
+ d
12
+ e
13
+ l
14
+
15
+ D
16
+ e
17
+ t
18
+ a
19
+ i
20
+ l
21
+ s
22
+
23
+
24
+ #
25
+ #
26
+
27
+ M
28
+ o
29
+ d
30
+ e
31
+ l
32
+
33
+ D
34
+ e
35
+ s
36
+ c
37
+ r
38
+ i
39
+ p
40
+ t
41
+ i
42
+ o
43
+ n
44
+
45
+
46
+ <
47
+ !
48
+ -
49
+ -
50
+ >
51
+
52
+ P
53
+ r
54
+ o
55
+ v
56
+ i
57
+ d
58
+ e
59
+
60
+ a
61
+
62
+ l
63
+ o
64
+ n
65
+ g
66
+ e
67
+ r
68
+
69
+ s
70
+ u
71
+ m
72
+ m
73
+ a
74
+ r
75
+ y
76
+
77
+ o
78
+ f
79
+
80
+ w
81
+ h
82
+ a
83
+ t
84
+
85
+ t
86
+ h
87
+ i
88
+ s
89
+
90
+ m
91
+ o
92
+ d
93
+ e
94
+ l
95
+
96
+ i
97
+ s
98
+ .
99
+
100
+ <
101
+ !
102
+ -
103
+ -
104
+ >
105
+
106
+
107
+ -
108
+
109
+ *
110
+ *
111
+ D
112
+ e
113
+ v
114
+ e
115
+ l
116
+ o
117
+ p
118
+ e
119
+ d
120
+
121
+ b
122
+ y
123
+ :
124
+ *
125
+ *
126
+
127
+ {
128
+ {
129
+
130
+ d
131
+ e
132
+ v
133
+ e
134
+ l
135
+ o
136
+ p
137
+ e
138
+ r
139
+ s
140
+
141
+ |
142
+
143
+ d
144
+ e
145
+ f
146
+ a
147
+ u
148
+ l
149
+ t
150
+ (
151
+ "
152
+ M
153
+ o
154
+ r
155
+ e
156
+
157
+ i
158
+ n
159
+ f
160
+ o
161
+ r
162
+ m
163
+ a
164
+ t
165
+ i
166
+ o
167
+ n
168
+
169
+ n
170
+ e
171
+ e
172
+ d
173
+ e
174
+ d
175
+ "
176
+ ,
177
+
178
+ t
179
+ r
180
+ u
181
+ e
182
+ )
183
+ }
184
+ }
185
+ -
186
+
187
+ *
188
+ *
189
+ S
190
+ h
191
+ a
192
+ r
193
+ e
194
+ d
195
+
196
+ b
197
+ y
198
+
199
+ [
200
+ O
201
+ p
202
+ t
203
+ i
204
+ o
205
+ n
206
+ a
207
+ l
208
+ ]
209
+ :
210
+ *
211
+ *
212
+
213
+ {
214
+ {
215
+
216
+ s
217
+ h
218
+ a
219
+ r
220
+ e
221
+ d
222
+ _
223
+ b
224
+ y
225
+
226
+ |
227
+
228
+ d
229
+ e
230
+ f
231
+ a
232
+ u
233
+ l
234
+ t
235
+ (
236
+ "
237
+ M
238
+ o
239
+ r
240
+ e
241
+
242
+ i
243
+ n
244
+ f
245
+ o
246
+ r
247
+ m
248
+ a
249
+ t
250
+ i
251
+ o
252
+ n
253
+
254
+ n
255
+ e
256
+ e
257
+ d
258
+ e
259
+ d
260
+ "
261
+ ,
262
+
263
+ t
264
+ r
265
+ u
266
+ e
267
+ )
268
+ }
269
+ }
270
+ -
271
+
272
+ *
273
+ *
274
+ M
275
+ o
276
+ d
277
+ e
278
+ l
279
+
280
+ t
281
+ y
282
+ p
283
+ e
284
+ :
285
+ *
286
+ *
287
+
288
+ L
289
+ a
290
+ n
291
+ g
292
+ u
293
+ a
294
+ g
295
+ e
296
+
297
+ m
298
+ o
299
+ d
300
+ e
301
+ l
302
+ -
303
+
304
+ *
305
+ *
306
+ L
307
+ a
308
+ n
309
+ g
310
+ u
311
+ a
312
+ g
313
+ e
314
+ (
315
+ s
316
+ )
317
+
318
+ (
319
+ N
320
+ L
321
+ P
322
+ )
323
+ :
324
+ *
325
+ *
326
+
327
+ {
328
+ {
329
+
330
+ l
331
+ a
332
+ n
333
+ g
334
+ u
335
+ a
336
+ g
337
+ e
338
+
339
+ |
340
+
341
+ d
342
+ e
343
+ f
344
+ a
345
+ u
346
+ l
347
+ t
348
+ (
349
+ "
350
+ M
351
+ o
352
+ r
353
+ e
354
+
355
+ i
356
+ n
357
+ f
358
+ o
359
+ r
360
+ m
361
+ a
362
+ t
363
+ i
364
+ o
365
+ n
366
+
367
+ n
368
+ e
369
+ e
370
+ d
371
+ e
372
+ d
373
+ "
374
+ ,
375
+
376
+ t
377
+ r
378
+ u
379
+ e
380
+ )
381
+ }
382
+ }
383
+ -
384
+
385
+ *
386
+ *
387
+ L
388
+ i
389
+ c
390
+ e
391
+ n
392
+ s
393
+ e
394
+ :
395
+ *
396
+ *
397
+
398
+ {
399
+ {
400
+
401
+ l
402
+ i
403
+ c
404
+ e
405
+ n
406
+ s
407
+ e
408
+
409
+ |
410
+
411
+ d
412
+ e
413
+ f
414
+ a
415
+ u
416
+ l
417
+ t
418
+ (
419
+ "
420
+ M
421
+ o
422
+ r
423
+ e
424
+
425
+ i
426
+ n
427
+ f
428
+ o
429
+ r
430
+ m
431
+ a
432
+ t
433
+ i
434
+ o
435
+ n
436
+
437
+ n
438
+ e
439
+ e
440
+ d
441
+ e
442
+ d
443
+ "
444
+ ,
445
+
446
+ t
447
+ r
448
+ u
449
+ e
450
+ )
451
+ }
452
+ }
453
+ -
454
+
455
+ *
456
+ *
457
+ R
458
+ e
459
+ l
460
+ a
461
+ t
462
+ e
463
+ d
464
+
465
+ M
466
+ o
467
+ d
468
+ e
469
+ l
470
+ s
471
+ :
472
+ *
473
+ *
474
+
475
+ {
476
+ {
477
+
478
+ r
479
+ e
480
+ l
481
+ a
482
+ t
483
+ e
484
+ d
485
+ _
486
+ m
487
+ o
488
+ d
489
+ e
490
+ l
491
+ s
492
+
493
+ |
494
+
495
+ d
496
+ e
497
+ f
498
+ a
499
+ u
500
+ l
501
+ t
502
+ (
503
+ "
504
+ M
505
+ o
506
+ r
507
+ e
508
+
509
+ i
510
+ n
511
+ f
512
+ o
513
+ r
514
+ m
515
+ a
516
+ t
517
+ i
518
+ o
519
+ n
520
+
521
+ n
522
+ e
523
+ e
524
+ d
525
+ e
526
+ d
527
+ "
528
+ ,
529
+
530
+ t
531
+ r
532
+ u
533
+ e
534
+ )
535
+ }
536
+ }
537
+
538
+
539
+
540
+
541
+ -
542
+
543
+ *
544
+ *
545
+ P
546
+ a
547
+ r
548
+ e
549
+ n
550
+ t
551
+
552
+ M
553
+ o
554
+ d
555
+ e
556
+ l
557
+ :
558
+ *
559
+ *
560
+
561
+ {
562
+ {
563
+
564
+ p
565
+ a
566
+ r
567
+ e
568
+ n
569
+ t
570
+ _
571
+ m
572
+ o
573
+ d
574
+ e
575
+ l
576
+
577
+ |
578
+
579
+ d
580
+ e
581
+ f
582
+ a
583
+ u
584
+ l
585
+ t
586
+ (
587
+ "
588
+ M
589
+ o
590
+ r
591
+ e
592
+
593
+ i
594
+ n
595
+ f
596
+ o
597
+ r
598
+ m
599
+ a
600
+ t
601
+ i
602
+ o
603
+ n
604
+
605
+ n
606
+ e
607
+ e
608
+ d
609
+ e
610
+ d
611
+ "
612
+ ,
613
+
614
+ t
615
+ r
616
+ u
617
+ e
618
+ )
619
+ }
620
+ }
621
+ -
622
+
623
+ *
624
+ *
625
+ R
626
+ e
627
+ s
628
+ o
629
+ u
630
+ r
631
+ c
632
+ e
633
+ s
634
+
635
+ f
636
+ o
637
+ r
638
+
639
+ m
640
+ o
641
+ r
642
+ e
643
+
644
+ i
645
+ n
646
+ f
647
+ o
648
+ r
649
+ m
650
+ a
651
+ t
652
+ i
653
+ o
654
+ n
655
+ :
656
+ *
657
+ *
658
+
659
+ {
660
+ {
661
+
662
+ m
663
+ o
664
+ r
665
+ e
666
+ _
667
+ r
668
+ e
669
+ s
670
+ o
671
+ u
672
+ r
673
+ c
674
+ e
675
+ s
676
+
677
+ |
678
+
679
+ d
680
+ e
681
+ f
682
+ a
683
+ u
684
+ l
685
+ t
686
+ (
687
+ "
688
+ M
689
+ o
690
+ r
691
+ e
692
+
693
+ i
694
+ n
695
+ f
696
+ o
697
+ r
698
+ m
699
+ a
700
+ t
701
+ i
702
+ o
703
+ n
704
+
705
+ n
706
+ e
707
+ e
708
+ d
709
+ e
710
+ d
711
+ "
712
+ ,
713
+
714
+ t
715
+ r
716
+ u
717
+ e
718
+ )
719
+ }
720
+ }
721
+
722
+ <
723
+ d
724
+ e
725
+ t
726
+ a
727
+ i
728
+ l
729
+ s
730
+ >
731
+
732
+ <
733
+ s
734
+ u
735
+ m
736
+ m
737
+ a
738
+ r
739
+ y
740
+ >
741
+
742
+ C
743
+ l
744
+ i
745
+ c
746
+ k
747
+
748
+ t
749
+ o
750
+
751
+ e
752
+ x
753
+ p
754
+ a
755
+ n
756
+ d
757
+
758
+ <
759
+ /
760
+ s
761
+ u
762
+ m
763
+ m
764
+ a
765
+ r
766
+ y
767
+ >
768
+
769
+
770
+ #
771
+
772
+ B
773
+ i
774
+ a
775
+ s
776
+ ,
777
+
778
+ R
779
+ i
780
+ s
781
+ k
782
+ s
783
+ ,
784
+
785
+ a
786
+ n
787
+ d
788
+
789
+ L
790
+ i
791
+ m
792
+ i
793
+ t
794
+ a
795
+ t
796
+ i
797
+ o
798
+ n
799
+ s
800
+
801
+
802
+ <
803
+ !
804
+ -
805
+ -
806
+ >
807
+
808
+ T
809
+ h
810
+ i
811
+ s
812
+
813
+ s
814
+ e
815
+ c
816
+ t
817
+ i
818
+ o
819
+ n
820
+
821
+ i
822
+ s
823
+
824
+ m
825
+ e
826
+ a
827
+ n
828
+ t
829
+
830
+ t
831
+ o
832
+
833
+ c
834
+ o
835
+ n
836
+ v
837
+ e
838
+ y
839
+
840
+ b
841
+ o
842
+ t
843
+ h
844
+
845
+ t
846
+ e
847
+ c
848
+ h
849
+ n
850
+ i
851
+ c
852
+ a
853
+ l
854
+
855
+ a
856
+ n
857
+ d
858
+
859
+ s
860
+ o
861
+ c
862
+ i
863
+ o
864
+ t
865
+ e
866
+ c
867
+ h
868
+ n
869
+ i
870
+ c
871
+ a
872
+ l
873
+
874
+ l
875
+ i
876
+ m
877
+ i
878
+ t
879
+ a
880
+ t
881
+ i
882
+ o
883
+ n
884
+ s
885
+ .
886
+
887
+ <
888
+ !
889
+ -
890
+ -
891
+ >
892
+
893
+
894
+ #
895
+ #
896
+
897
+ R
898
+ e
899
+ c
900
+ o
901
+ m
902
+ m
903
+ e
904
+ n
905
+ d
906
+ a
907
+ t
908
+ i
909
+ o
910
+ n
911
+ s
912
+
913
+
914
+ <
915
+ !
916
+ -
917
+ -
918
+ >
919
+
920
+ T
921
+ h
922
+ i
923
+ s
924
+
925
+ s
926
+ e
927
+ c
928
+ t
929
+ i
930
+ o
931
+ n
932
+
933
+ i
934
+ s
935
+
936
+ m
937
+ e
938
+ a
939
+ n
940
+ t
941
+
942
+ t
943
+ o
944
+
945
+ c
946
+ o
947
+ n
948
+ v
949
+ e
950
+ y
951
+
952
+ r
953
+ e
954
+ c
955
+ o
956
+ m
957
+ m
958
+ e
959
+ n
960
+ d
961
+ a
962
+ t
963
+ i
964
+ o
965
+ n
966
+ s
967
+
968
+ w
969
+ i
970
+ t
971
+ h
972
+
973
+ r
974
+ e
975
+ s
976
+ p
977
+ e
978
+ c
979
+ t
980
+
981
+ t
982
+ o
983
+
984
+ t
985
+ h
986
+ e
987
+
988
+ b
989
+ i
990
+ a
991
+ s
992
+ ,
993
+
994
+ r
995
+ i
996
+ s
997
+ k
998
+ ,
999
+
1000
+ a
1001
+ n
1002
+ d
1003
+
1004
+ t
1005
+ e
1006
+ c
1007
+ h
1008
+ n
1009
+ i
1010
+ c
1011
+ a
1012
+ l
1013
+
1014
+ l
1015
+ i
1016
+ m
1017
+ i
1018
+ t
1019
+ a
1020
+ t
1021
+ i
1022
+ o
1023
+ n
1024
+ s
1025
+ .
1026
+
1027
+ <
1028
+ !
1029
+ -
1030
+ -
1031
+ >
1032
+
1033
+ <
1034
+ /
1035
+ d
1036
+ e
1037
+ t
1038
+ a
1039
+ i
1040
+ l
1041
+ s
1042
+ >
output.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Collection of ideas
2
+ ## Idea 0
3
+
4
+ [1]normal text under header 1
5
+
pages/10_ πŸ“_Technical Specifications.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from middleMan import get_card,writingPrompt,apply_view
4
+ #from specific_extraction import extract_it
5
+
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+ cs_body()
11
+
12
+
13
+
14
+ def cs_body():
15
+
16
+
17
+ st.markdown('# Technical Specifications [optional]')
18
+ st.write("Provide an overview of any additional technical specifications for this model")
19
+ left, right = st.columns([2,4])
20
+
21
+
22
+
23
+ with left:
24
+ st.write("\n")
25
+ st.write("\n")
26
+ st.markdown('### Model Architecture and Objective:')
27
+ st.write("\n")
28
+ st.write("\n")
29
+ st.write("\n")
30
+ st.write("\n")
31
+ st.markdown('### Compute Infrastructure:')
32
+ st.write("\n")
33
+ st.write("\n")
34
+ st.write("\n")
35
+ st.write("\n")
36
+
37
+ st.markdown('##### Hardware:')
38
+ st.write("\n")
39
+ st.write("\n")
40
+ st.write("\n")
41
+ st.write("\n")
42
+ st.write("\n")
43
+ st.write("\n")
44
+ st.write("\n")
45
+ st.markdown('##### Software:')
46
+
47
+ with right:
48
+ #soutput_jinja = parse_into_jinja_markdown()
49
+ st.text_area("", key=persist("Model_specs"))
50
+ #st.write("\n")
51
+ st.text_area("",key=persist("compute_infrastructure"))
52
+ st.text_area("", key=persist("Model_hardware"))
53
+ st.text_area("", key=persist("technical_specs_software"))
54
+
55
+
56
+
57
+
58
+
59
+ if __name__ == '__main__':
60
+ load_widget_state()
61
+ main()
pages/11_ πŸ“¬_Model_Card_Contact.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from middleMan import get_card,writingPrompt,apply_view
4
+ #from specific_extraction import extract_it
5
+
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+ cs_body()
11
+
12
+
13
+
14
+ def cs_body():
15
+
16
+ st.markdown('# Model Card Contact')
17
+ st.text_area("Mediums to use, in order to contact the model creators.", key=persist("Model_card_contact"), )
18
+
19
+
20
+
21
+
22
+
23
+
24
+ if __name__ == '__main__':
25
+ load_widget_state()
26
+ main()
pages/12_πŸ‘©β€πŸ’»_How_To_Get_Started.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ #from middleMan import apply_view,writingPrompt
4
+ from extract_code import read_file
5
+
6
+
7
+
8
+ global variable_output
9
+
10
+ def main():
11
+
12
+ cs_body()
13
+
14
+ def cs_body():
15
+
16
+ library_name = st.session_state.library_name
17
+ model_name = st.session_state.model_name
18
+ model_name_to_str = f"{model_name}"
19
+ library_name_to_str = f"{library_name}"
20
+ text_pass = read_file(library_name_to_str, model_name_to_str) ## get the how to get started code
21
+
22
+ st.markdown('# How to Get Started with the Model')
23
+ st.session_state['Model_how_to'] = text_pass
24
+ st.text_area("Include relevant terms and calculations in this section that can help readers understand the model or model card.",height = 300, key=persist("Model_how_to"))
25
+
26
+
27
+
28
+ if __name__ == '__main__':
29
+ load_widget_state()
30
+ main()
pages/13_πŸ”–_Model_Card_Authors.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from middleMan import get_card,writingPrompt,apply_view
4
+ #from specific_extraction import extract_it
5
+
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+ cs_body()
11
+
12
+
13
+
14
+ def cs_body():
15
+ # Model Cards
16
+ #card = get_card()
17
+ #card.save('current_editable.md')
18
+
19
+ st.markdown('# Model Card Authors [optional]')
20
+ st.text_area("This section provides another layer of transparency and accountability. Whose views is this model card representing? How many voices were included in its construction? Etc.",height = 180, key=persist("the_authors"))
21
+
22
+
23
+
24
+
25
+ if __name__ == '__main__':
26
+ load_widget_state()
27
+ main()
pages/14_πŸ“š_Glossary.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from middleMan import get_card,writingPrompt,apply_view
4
+ #from specific_extraction import extract_it
5
+
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+ cs_body()
11
+
12
+
13
+
14
+ def cs_body():
15
+
16
+ st.markdown('# Glossary [optional]')
17
+ st.text_area("Include relevant terms and calculations in this section that can help readers understand the model or model card.",height = 200, key=persist("Glossary"))
18
+
19
+
20
+
21
+
22
+
23
+
24
+ if __name__ == '__main__':
25
+ load_widget_state()
26
+ main()
pages/15_More_Information.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from middleMan import get_card,writingPrompt,apply_view
4
+ #from specific_extraction import extract_it
5
+
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+ cs_body()
11
+
12
+
13
+
14
+ def cs_body():
15
+
16
+
17
+ st.markdown('# More Information [optional]')
18
+ st.text_area("Any additional information",height = 200, key=persist("More_info"))
19
+
20
+
21
+
22
+
23
+
24
+ if __name__ == '__main__':
25
+ load_widget_state()
26
+ main()
pages/1_πŸ‘€_CardProgress.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ast import parse
2
+ import streamlit as st
3
+ from persist import load_widget_state
4
+ from middleMan import parse_into_jinja_markdown as pj
5
+ import os
6
+
7
+ def main():
8
+ ## call the jinja_parser
9
+ st.write( pj())
10
+
11
+
12
+
13
+ if __name__ == '__main__':
14
+ load_widget_state()
15
+ main()
pages/2_πŸ“œ_Model_Details.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from middleMan import get_card,writingPrompt,apply_view
4
+ import pandas as pd
5
+ import requests
6
+
7
+ #from specific_extraction import extract_it
8
+
9
+
10
+ global variable_output
11
+
12
+ @st.cache
13
+ def get_cached_data():
14
+ languages_df = pd.read_html("https://hf.co/languages")[0]
15
+ languages_map = pd.Series(languages_df["Language"].values, index=languages_df["ISO code"]).to_dict()
16
+
17
+ license_df = pd.read_html("https://huggingface.co/docs/hub/repositories-licenses")[0]
18
+ license_map = pd.Series(
19
+ license_df["License identifier (to use in model card)"].values, index=license_df.Fullname
20
+ ).to_dict()
21
+
22
+ available_metrics = [x['id'] for x in requests.get('https://huggingface.co/api/metrics').json()]
23
+
24
+ r = requests.get('https://huggingface.co/api/models-tags-by-type')
25
+ tags_data = r.json()
26
+ libraries = [x['id'] for x in tags_data['library']]
27
+ tasks = [x['id'] for x in tags_data['pipeline_tag']]
28
+ #return languages_map, license_map, available_metrics, libraries, tasks
29
+ return license_map
30
+
31
+
32
+ def cs_body():
33
+ license_map= get_cached_data()
34
+ #st.set_page_config(layout="wide") ## not yet supported on the hub
35
+ st.markdown('## Model Details')
36
+ st.markdown('### Model Description')
37
+ st.text_area("Provide a 1-2 sentence summary of what this model is.", help="The model description provides basic details about the model. This includes the architecture, version, if it was introduced in a paper, if an original implementation is available, the author, and general information about the model. Any copyright should be attributed here. General information about training procedures, parameters, and important disclaimers can also be mentioned in this section.", key=persist('model_description'))
38
+
39
+ left, right = st.columns([2,6])
40
+
41
+ with left:
42
+ st.write("\n")
43
+ st.write("\n")
44
+ st.markdown('### Developed By:')
45
+ st.write("\n")
46
+ st.write("\n")
47
+ #st.write("\n")
48
+ st.markdown('### Shared By [optional]:')
49
+ st.write("\n")
50
+ st.write("\n")
51
+ st.markdown('### Model Type:')
52
+ st.write("\n")
53
+ st.write("\n")
54
+ st.markdown('### License:')
55
+ with right:
56
+ st.write("\n")
57
+ st.write("\n")
58
+ st.text_input("",help="Developed By work", key=persist("Model_developers"))
59
+ st.write("\n")
60
+ st.write("\n")
61
+
62
+ st.text_input("",help="Shared By work",key=persist("shared_by"))
63
+ st.text_input("",help="Model Type work")
64
+ #st.write("\n")
65
+ st.selectbox("",[""] + list(license_map.values()), help="Licenses work", key=persist("license"))
66
+
67
+ def main():
68
+ cs_body()
69
+
70
+
71
+
72
+
73
+ if __name__ == '__main__':
74
+ load_widget_state()
75
+ main()
pages/3_ πŸ—_Uses.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from middleMan import get_card,writingPrompt,apply_view
4
+
5
+
6
+ global variable_output
7
+
8
+ def main():
9
+
10
+ cs_body()
11
+
12
+ def cs_body():
13
+ # Model Cards
14
+ card = get_card()
15
+
16
+ st.markdown('# Uses')
17
+ st.text_area("Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model",help="Model uses large work")
18
+ left, right = st.columns([2,4])
19
+
20
+ #st.markdown('### Model Description')
21
+
22
+
23
+ with left:
24
+ st.write("\n")
25
+ st.write("\n")
26
+ st.markdown('### Direct Use:')
27
+ st.write("\n")
28
+ st.write("\n")
29
+ st.write("\n")
30
+ st.write("\n")
31
+ st.write("\n")
32
+ st.write("\n")
33
+ #st.write("\n")
34
+ st.markdown('### Downstream Use [Optional]:')
35
+ st.write("\n")
36
+ st.write("\n")
37
+ st.write("\n")
38
+ st.write("\n")
39
+ st.markdown('### Out-of-Scope Use:')
40
+
41
+ with right:
42
+ st.text_area("",help="This section is for the model use without fine-tuning or plugging into a larger ecosystem/app.", key=persist("Direct_Use"))
43
+ st.text_area("",help="This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/ap",key=persist("Downstream_Use"))
44
+ st.text_area("", help=" This section addresses misuse, malicious use, and uses that the model will not work well for.", key=persist("Out-of-Scope_Use"))
45
+
46
+
47
+
48
+
49
+ if __name__ == '__main__':
50
+ load_widget_state()
51
+ main()
pages/4_⚠️_Limits_and_Risks.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from middleMan import writingPrompt,apply_view
4
+
5
+
6
+ global variable_output
7
+
8
+ def main():
9
+ cs_body()
10
+
11
+ def cs_body():
12
+
13
+ st.markdown('# Bias, Risks, and Limitations')
14
+ st.text_area("Use this section to convey both technical and sociotechnical limitations",help="Provide an overview of the possible Limitations and Risks that may be associated with this model", key=persist("Model_Limits_n_Risks"), )
15
+ left, right = st.columns([2,4])
16
+
17
+ #st.markdown('### Model Description')
18
+
19
+
20
+ with left:
21
+ st.write("\n")
22
+ st.write("\n")
23
+ st.markdown('### Recommendations:')
24
+
25
+
26
+ with right:
27
+ #soutput_jinja = parse_into_jinja_markdown()
28
+ st.text_area("",help="This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. ", key=persist("Recommendations"))
29
+ #st.write("\n")
30
+
31
+
32
+
33
+
34
+ if __name__ == '__main__':
35
+ load_widget_state()
36
+ main()
pages/5_πŸ‹οΈβ€β™€οΈ_Model_training.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from pathlib import Path
4
+
5
+ from middleMan import apply_view,writingPrompt
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+
11
+ cs_body()
12
+
13
+
14
+ def cs_body():
15
+
16
+ st.markdown('# Training Details')
17
+ st.write("Provide an overview of the Training Data and Training Procedure for this model")
18
+ left, middle, right = st.columns([2,1,7])
19
+
20
+ with left:
21
+ st.write("\n")
22
+ st.write("\n")
23
+ st.markdown('## Training Data:')
24
+ st.write("\n")
25
+ st.write("\n")
26
+ st.write("\n")
27
+ st.write("\n")
28
+ with middle:
29
+ st.write("\n")
30
+ st.write("\n")
31
+ st.write("\n")
32
+ st.write("\n")
33
+ st.write("\n")
34
+ st.write("\n")
35
+ st.write("\n")
36
+ st.write("\n")
37
+ st.write("\n")
38
+ st.write("\n")
39
+ st.write("\n")
40
+ st.write("\n")
41
+ st.markdown(' \n ## Training Procedure')
42
+ with left:
43
+ st.write("\n")
44
+ st.write("\n")
45
+ st.write("\n")
46
+ st.write("\n")
47
+ st.write("\n")
48
+ st.write("\n")
49
+ st.write("\n")
50
+ st.write("\n")
51
+ st.write("\n")
52
+
53
+ st.markdown('#### Preprocessing:')
54
+ st.write("\n")
55
+ st.write("\n")
56
+ st.write("\n")
57
+ st.write("\n")
58
+ st.write("\n")
59
+ st.write("\n")
60
+ st.write("\n")
61
+ st.markdown('#### Speeds, Sizes, Time:')
62
+
63
+ with right:
64
+ #soutput_jinja = parse_into_jinja_markdown()
65
+
66
+ st.text_area("", key=persist("training_data"))
67
+ #st.write("\n")
68
+ st.write("\n")
69
+ st.write("\n")
70
+ st.write("\n")
71
+ st.write("\n")
72
+ st.write("\n")
73
+ st.write("\n")
74
+ st.write("\n")
75
+ st.write("\n")
76
+ st.write("\n")
77
+ st.write("\n")
78
+ st.write("\n")
79
+
80
+ st.text_area("", key=persist("preprocessing"))
81
+ st.text_area("", help = "This section provides information about throughput, start/end time, checkpoint size if relevant, etc.", key=persist("Speeds_Sizes_Timese"))
82
+
83
+
84
+
85
+
86
+
87
+
88
+ if __name__ == '__main__':
89
+ load_widget_state()
90
+ main()
pages/6_πŸ”¬_Model_Evaluation.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from pathlib import Path
4
+
5
+ from middleMan import apply_view,writingPrompt
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+ cs_body()
11
+
12
+
13
+ def cs_body():
14
+
15
+ #stateVariable = 'Model_Eval'
16
+ #help_text ='Detail the Evaluation Results for this model'
17
+ #col1.header('Model Evaluation')
18
+ st.markdown('# Evaluation')
19
+ st.text_area(" This section describes the evaluation protocols and provides the results. ",help="Detail the Evaluation Results for this model")
20
+ st.markdown('## Testing Data, Factors & Metrics:')
21
+ left, right = st.columns([2,4])
22
+
23
+ #st.markdown('### Model Description')
24
+
25
+
26
+ with left:
27
+ st.write("\n")
28
+ st.write("\n")
29
+ st.markdown('#### Testing Data:')
30
+ st.write("\n")
31
+ st.write("\n")
32
+ st.write("\n")
33
+ st.write("\n")
34
+ st.write("\n")
35
+ st.write("\n")
36
+ #st.write("\n")
37
+ st.markdown('#### Factors:')
38
+ st.write("\n")
39
+ st.write("\n")
40
+ st.write("\n")
41
+ st.write("\n")
42
+ st.write("\n")
43
+ st.write("\n")
44
+ st.markdown('#### Metrics:')
45
+ st.write("\n")
46
+ st.write("\n")
47
+ st.write("\n")
48
+ st.write("\n")
49
+ st.write("\n")
50
+ st.markdown('#### Results:')
51
+
52
+ with right:
53
+ #soutput_jinja = parse_into_jinja_markdown()
54
+ st.text_area("", key=persist("Testing_Data"))
55
+ #st.write("\n")
56
+ st.text_area("",help="These are the things the evaluation is disaggregating by, e.g., subpopulations or domains.",key=persist("Factors"))
57
+ st.text_area("", help=" These are the evaluation metrics being used, ideally with a description of why.", key=persist("Metrics"))
58
+ st.text_area("", key=persist("Model_Results"))
59
+
60
+
61
+
62
+
63
+
64
+ if __name__ == '__main__':
65
+ load_widget_state()
66
+ main()
pages/7_πŸ”Ž_Model_Examination.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from middleMan import get_card,writingPrompt,apply_view
4
+ #from specific_extraction import extract_it
5
+
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+ cs_body()
11
+
12
+
13
+
14
+ def cs_body():
15
+
16
+ #col1.header('Model Examination')
17
+ #stateVariable = "Model_examin"
18
+ #help_text = 'Give an overview of your model, the relevant research paper, who trained it, etc.'
19
+
20
+ st.markdown('# Model Examination')
21
+ st.text_area("Provide an overview of your model, the relevant research paper, who trained it, etc",height = 200, key=persist("Model_examin"), )
22
+ #left, right = st.columns([2,4], gap="small")
23
+
24
+ #st.markdown('### Model Description')
25
+
26
+
27
+
28
+
29
+
30
+
31
+
32
+
33
+ if __name__ == '__main__':
34
+ load_widget_state()
35
+ main()
pages/8_🌏_Environmental_Impact.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from pathlib import Path
4
+
5
+ from middleMan import apply_view,writingPrompt
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+
11
+ cs_body()
12
+
13
+
14
+ def cs_body():
15
+
16
+ stateVariable = 'Model_carbon'
17
+ help_text ='Provide an estimate for the carbon emissions: e.g hardware used, horus spent training, cloud provider '
18
+
19
+ st.markdown('# Environmental Impact')
20
+ st.markdown('###### Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).')
21
+ st.text_area("", help="Provide an estimate for the carbon emissions: e.g hardware used, horus spent training, cloud provider")
22
+
23
+ left, right = st.columns([2,4])
24
+ with left:
25
+ st.write("\n")
26
+ st.write("\n")
27
+ st.markdown('### Hardware Type:')
28
+ st.write("\n")
29
+ st.write("\n")
30
+ #st.write("\n")
31
+ st.markdown('### Hours used:')
32
+ st.write("\n")
33
+ st.write("\n")
34
+ st.markdown('### Cloud Provider:')
35
+ st.write("\n")
36
+ st.write("\n")
37
+ st.markdown('### Compute Region:')
38
+ st.write("\n")
39
+ st.write("\n")
40
+ st.markdown('### Carbon Emitted:')
41
+ with right:
42
+ #soutput_jinja = parse_into_jinja_markdown()
43
+ st.text_input("",key=persist("Model_hardware"))
44
+ #st.write("\n")
45
+ st.text_input("",help="sw",key=persist("hours_used"))
46
+ st.text_input("",key=persist("Model_cloud_provider"))
47
+ st.text_input("",key=persist("Model_cloud_region"))
48
+ st.text_input("",help= 'in grams of CO2eq', key=persist("Model_c02_emitted")) ##to-do: auto calculate
49
+
50
+
51
+
52
+
53
+
54
+ if __name__ == '__main__':
55
+ load_widget_state()
56
+ main()
pages/9_πŸ“Œ_Citation.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from pathlib import Path
4
+
5
+ from middleMan import apply_view,writingPrompt
6
+
7
+ global variable_output
8
+
9
+ def main():
10
+ cs_body()
11
+
12
+ def cs_body():
13
+
14
+ st.markdown('# Citation')
15
+ st.write("If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section")
16
+ left, right = st.columns([2,4])
17
+
18
+ #st.markdown('### Model Description')
19
+
20
+
21
+ with left:
22
+ st.write("\n")
23
+ st.write("\n")
24
+ st.markdown('### BibTeX:')
25
+ st.write("\n")
26
+ st.write("\n")
27
+ st.write("\n")
28
+ st.write("\n")
29
+ st.write("\n")
30
+ st.write("\n")
31
+ st.markdown('### APA:')
32
+
33
+
34
+ with right:
35
+
36
+ st.text_area("", key=persist("bibtex_citation"))
37
+ st.text_area("", key=persist("APA_citation"))
38
+ #st.write("\n")
39
+
40
+
41
+
42
+
43
+
44
+
45
+
46
+ if __name__ == '__main__':
47
+ load_widget_state()
48
+ main()
pages/__pycache__/HowToGetStarted.cpython-39.pyc ADDED
Binary file (1.52 kB). View file
 
pages/__pycache__/firstPage.cpython-39.pyc ADDED
Binary file (1.78 kB). View file
 
pages/__pycache__/viewCardProgress.cpython-39.pyc ADDED
Binary file (2.86 kB). View file
 
persist.cpython-39.pyc ADDED
Binary file (877 Bytes). View file
 
persist.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Thank god this existed.
2
+ # https://gist.github.com/okld/0aba4869ba6fdc8d49132e6974e2e662
3
+
4
+ from streamlit import session_state as _state
5
+
6
+ _PERSIST_STATE_KEY = f"{__name__}_PERSIST"
7
+
8
+
9
+ def persist(key: str) -> str:
10
+ """Mark widget state as persistent."""
11
+ if _PERSIST_STATE_KEY not in _state:
12
+ _state[_PERSIST_STATE_KEY] = set()
13
+
14
+ _state[_PERSIST_STATE_KEY].add(key)
15
+
16
+ return key
17
+
18
+
19
+ def load_widget_state():
20
+ """Load persistent widget state."""
21
+ if _PERSIST_STATE_KEY in _state:
22
+ _state.update({
23
+ key: value
24
+ for key, value in _state.items()
25
+ if key in _state[_PERSIST_STATE_KEY]
26
+ })
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit
2
+ modelcards==0.1.2
3
+ pandas
4
+ lxml
specific_extraction.py ADDED
@@ -0,0 +1,528 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import streamlit as st
3
+ from modelcards import CardData, ModelCard
4
+ from markdownTagExtract import tag_checker,listToString,to_markdown
5
+ #from specific_extraction import extract_it
6
+
7
+
8
+ # from persist import persist
9
+ #global bytes_data
10
+
11
+
12
+ ################################################################
13
+ #### Markdown parser logic #################################
14
+ ################################################################
15
+
16
+ def file_upload():
17
+ bytes_data = st.session_state.markdown_upload
18
+ return bytes_data
19
+
20
+
21
+ # Sets up the basics
22
+ model_card_md = file_upload() # this is where the new model card will be read in from
23
+ model_card_md = model_card_md#.decode("utf-8")
24
+ # Does metadata appear in any other format than this?
25
+ metadata_re = re.compile("^---(.*?)---", re.DOTALL)
26
+ header_re = re.compile("^\s*# (.*)", re.MULTILINE)
27
+ subheader_re = re.compile("^\s*## (.*)", re.MULTILINE)
28
+ subsubheader_re = re.compile("^\s*### (.*)", re.MULTILINE)
29
+ subsubsubheader_re = re.compile("^\s*#### (.*)", re.MULTILINE)
30
+ # We could be a lot more flexible on this re.
31
+ # We require keys to be bold-faced here.
32
+ # We don't have to require bold, as long as it's key:value
33
+ # **License:**
34
+ # Bold terms use ** or __
35
+ # Allows the mixing of ** and __ for bold but eh whatev
36
+ key_value_re = re.compile("^\s*([*_]{2}[^*_]+[*_]{2})([^\n]*)", re.MULTILINE)
37
+ # Hyphens or stars mark list items.
38
+ # Unordered list
39
+ list_item_re = re.compile("^\s*[-*+]\s+.*", re.MULTILINE)
40
+ # This is the ordered list
41
+ enum_re = re.compile("^\s*[0-9].*", re.MULTILINE)
42
+ table_re = re.compile("^\s*\|.*", re.MULTILINE)
43
+ text_item_re = re.compile("^\s*[A-Za-z(](.*)", re.MULTILINE)
44
+ # text_item_re = re.compile("^\s*#\s*.*", re.MULTILINE)
45
+ # Allows the mixing of -* and *- for italics but eh whatev
46
+ italicized_text_item_re = re.compile(
47
+ "^[_*][^_*\s].*\n?.*[^_*][_*]$", flags=re.MULTILINE
48
+ )
49
+ tag_re = re.compile("^\s*<.*", re.MULTILINE)
50
+ image_re = re.compile("!\[.*\]\(.*\)", re.MULTILINE)
51
+
52
+
53
+ subheader_re_dict = {}
54
+ subheader_re_dict[header_re] = subheader_re
55
+ subheader_re_dict[subheader_re] = subsubheader_re
56
+ subheader_re_dict[subsubheader_re] = subsubsubheader_re
57
+
58
+
59
+ def get_metadata(section_text):
60
+ return list(metadata_re.finditer(section_text))
61
+
62
+
63
+ def find_images(section_text):
64
+ return list(image_re.finditer(section_text))
65
+
66
+
67
+ def find_tags(section_text):
68
+ return list(tag_re.finditer(section_text))
69
+
70
+
71
+ def find_tables(section_text):
72
+ return list(table_re.finditer(section_text))
73
+
74
+
75
+ def find_enums(section_text):
76
+ return list(enum_re.finditer(section_text))
77
+
78
+
79
+ # Extracts the stuff from the .md file
80
+ def find_key_values(section_text):
81
+ return list(key_value_re.finditer(section_text))
82
+
83
+
84
+ def find_lists(section_text):
85
+ # Find lists: Those lines starting with either '-' or '*'
86
+ return list(list_item_re.finditer(section_text))
87
+
88
+
89
+ def find_texts(section_text):
90
+ # Find texts: Free writing within a section
91
+ basic_text = list(text_item_re.finditer(section_text))
92
+ ital_text = list(italicized_text_item_re.finditer(section_text))
93
+ free_text = basic_text + ital_text
94
+ return free_text
95
+
96
+
97
+ def find_headers(full_text):
98
+ headers = list(header_re.finditer(full_text))
99
+ subheaders = list(subheader_re.finditer(full_text))
100
+ subsubheaders = list(subsubheader_re.finditer(full_text))
101
+ subsubsubheaders = list(subsubsubheader_re.finditer(full_text))
102
+ return (headers, subheaders, subsubheaders, subsubsubheaders)
103
+
104
+
105
+ metadata_list = get_metadata(model_card_md)
106
+ if metadata_list != []:
107
+ metadata_end = metadata_list[-1].span()[-1]
108
+ print("Metadata extracted")
109
+ # Metadata processing can happen here.
110
+ # For now I'm just ignoring it.
111
+ model_card_md = model_card_md[metadata_end:]
112
+ else:
113
+ print("No metadata found")
114
+
115
+ # Matches of all header types
116
+ headers_list = find_headers(model_card_md)
117
+ print("Headers extracted")
118
+ # This type of header (one #)
119
+ headers = headers_list[0]
120
+ ## This type of header (two ##)
121
+ subheaders = headers_list[1]
122
+ ### This type of header
123
+ subsubheaders = headers_list[2]
124
+ #### This type of header
125
+ subsubsubheaders = headers_list[3]
126
+
127
+ # Matches of bulleted lists
128
+ lists_list = find_lists(model_card_md)
129
+ print("Bulleted lists extracted")
130
+
131
+ enums_list = find_enums(model_card_md)
132
+ print("Enumerated lists extracted")
133
+
134
+ key_value_list = find_key_values(model_card_md)
135
+ print("Key values extracted")
136
+
137
+ tables_list = find_tables(model_card_md)
138
+ print("Tables extracted")
139
+
140
+ tags_list = find_tags(model_card_md)
141
+ print("Markup tags extracted")
142
+
143
+ images_list = find_images(model_card_md)
144
+ print("Images extracted")
145
+
146
+ # Matches of free text within a section
147
+ texts_list = find_texts(model_card_md)
148
+ print("Free text extracted")
149
+
150
+
151
+ # List items have the attribute: value;
152
+ # This provides for special handling of those strings,
153
+ # allowing us to check if it's a list item in order to split/print ok.
154
+ LIST_ITEM = "List item"
155
+ KEY_VALUE = "Key: Value"
156
+ FREE_TEXT = "Free text"
157
+ ENUM_LIST_ITEM = "Enum item"
158
+ TABLE_ITEM = "Table item"
159
+ TAG_ITEM = "Markup tag"
160
+ IMAGE_ITEM = "Image"
161
+
162
+
163
+ def create_span_dict(match_list, match_type):
164
+ """
165
+ Creates a dictionary made out of all the spans.
166
+ This is useful for knowing which types to fill out with what in the app.
167
+ Also useful for checking if there are spans in the .md file that we've missed.
168
+ """
169
+ span_dict = {}
170
+ for match in match_list:
171
+ if len(match.group().strip()) > 0:
172
+ span_dict[(match.span())] = (match.group(), match_type)
173
+ return span_dict
174
+
175
+
176
+ metadata_span_dict = create_span_dict(metadata_list, "Metadata")
177
+ # Makes a little dict for each span type
178
+ header_span_dict = create_span_dict(headers, "# Header")
179
+ subheader_span_dict = create_span_dict(subheaders, "## Subheader")
180
+ subsubheader_span_dict = create_span_dict(subsubheaders, "### Subsubheader")
181
+ subsubsubheader_span_dict = create_span_dict(subsubsubheaders, "#### Subsubsubheader")
182
+ key_value_span_dict = create_span_dict(key_value_list, KEY_VALUE)
183
+ lists_span_dict = create_span_dict(lists_list, LIST_ITEM)
184
+ enums_span_dict = create_span_dict(enums_list, ENUM_LIST_ITEM)
185
+ tables_span_dict = create_span_dict(tables_list, TABLE_ITEM)
186
+ tags_span_dict = create_span_dict(tags_list, TAG_ITEM)
187
+ images_span_dict = create_span_dict(images_list, IMAGE_ITEM)
188
+ texts_span_dict = create_span_dict(texts_list, FREE_TEXT)
189
+
190
+ # We don't have to have these organized by type necessarily.
191
+ # Doing it here for clarity.
192
+ all_spans_dict = {}
193
+ all_spans_dict["headers"] = header_span_dict
194
+ all_spans_dict["subheaders"] = subheader_span_dict
195
+ all_spans_dict["subsubheaders"] = subsubheader_span_dict
196
+ all_spans_dict["subsubsubheaders"] = subsubsubheader_span_dict
197
+ all_spans_dict[LIST_ITEM] = lists_span_dict
198
+ all_spans_dict[KEY_VALUE] = key_value_span_dict
199
+ all_spans_dict[TABLE_ITEM] = tables_span_dict
200
+ all_spans_dict[ENUM_LIST_ITEM] = enums_span_dict
201
+ all_spans_dict[TAG_ITEM] = tags_span_dict
202
+ all_spans_dict[IMAGE_ITEM] = images_span_dict
203
+ all_spans_dict[FREE_TEXT] = texts_span_dict
204
+
205
+
206
+ def get_sorted_spans(spans_dict):
207
+ merged_spans = {}
208
+ for span_dict in spans_dict.values():
209
+ merged_spans.update(span_dict)
210
+ sorted_spans = sorted(merged_spans)
211
+ return sorted_spans, merged_spans
212
+
213
+
214
+ sorted_spans, merged_spans = get_sorted_spans(all_spans_dict)
215
+
216
+ # Sanity/Parse check. Have we captured all spans in the .md file?
217
+ if sorted_spans[0][0] != 0:
218
+ print("FYI, our spans don't start at the start of the file.")
219
+ print("We did not catch this start:")
220
+ print(model_card_md[: sorted_spans[0][0]])
221
+
222
+ for idx in range(len(sorted_spans) - 1):
223
+ last_span_end = sorted_spans[idx][1]
224
+ new_span_start = sorted_spans[idx + 1][0]
225
+ if new_span_start > last_span_end + 1:
226
+ start_nonparse = sorted_spans[idx]
227
+ end_nonparse = sorted_spans[idx + 1]
228
+ text = model_card_md[start_nonparse[1] : end_nonparse[0]]
229
+ if text.strip():
230
+ print("Found an unparsed span in the file:")
231
+ print(start_nonparse)
232
+ print(" ---> ")
233
+ print(end_nonparse)
234
+ print(text)
235
+
236
+ # print(header_span_dict)
237
+ def section_map_to_help_text(text_retrieved):
238
+
239
+ presit_states = {
240
+ "## Model Details": "Give an overview of your model, the relevant research paper, who trained it, etc.",
241
+ "## How to Get Started with the Model": "Give an overview of how to get started with the model",
242
+ "## Limitations and Biases": "Provide an overview of the possible Limitations and Risks that may be associated with this model",
243
+ "## Uses": "Detail the potential uses, intended use and out-of-scope uses for this model",
244
+ "## Training": "Provide an overview of the Training Data and Training Procedure for this model",
245
+ "## Evaluation Results": "Detail the Evaluation Results for this model",
246
+ "## Environmental Impact": "Provide an estimate for the carbon emissions: Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here.",
247
+ "## Citation Information": "How to best cite the model authors",
248
+ "## Glossary": "If relevant, include terms and calculations in this section that can help readers understand the model or model card.",
249
+ "## More Information": "Any additional information",
250
+ "## Model Card Authors": "This section provides another layer of transparency and accountability. Whose views is this model card representing? How many voices were included in its construction? Etc.",
251
+ "Model Card Contact": "Mediums to use, in order to contact the model creators",
252
+ "## Technical Specifications": " Additional technical information",
253
+ '## Model Examination': " Examining the model",
254
+ }
255
+
256
+ for key in presit_states:
257
+ if key == text_retrieved:
258
+ return presit_states(key)
259
+
260
+
261
+ def section_map_to_persist(text_retrieved):
262
+
263
+ presit_states = {
264
+ "Model_details_text": "## Model Details",
265
+ "Model_how_to": "## How to Get Started with the Model",
266
+ "Model_Limits_n_Risks": "## Limitations and Biases",
267
+ "Model_uses": "## Uses",
268
+ "Model_training": "## Training",
269
+ "Model_Eval": "## Evaluation Results",
270
+ "Model_carbon": "## Environmental Impact",
271
+ "Model_cite": "## Citation Information",
272
+ "Glossary": "## Glossary",
273
+ "More_info": "## More Information",
274
+ "Model_card_authors": "## Model Card Authors",
275
+ "Model_card_contact": "## Model Card Contact",
276
+ "Technical_specs": "## Technical specifications",
277
+ "Model_examin": "## Model Examination",
278
+ }
279
+
280
+ for key in presit_states:
281
+ if presit_states[key] == text_retrieved:
282
+ return key
283
+
284
+
285
+ def main():
286
+ # st.write('here')
287
+ print(extract_it("Model_details_text"))
288
+
289
+
290
+ def extract_headers():
291
+ headers = {}
292
+ subheaders = {}
293
+ subsubheaders = {}
294
+ subsubsubheaders = {}
295
+ previous = (None, None, None, None)
296
+
297
+ for s in sorted_spans:
298
+ if merged_spans[s][1] == "# Header":
299
+ headers[s] = (sorted_spans.index(s), previous[0])
300
+ previous = (sorted_spans.index(s), previous[1], previous[2], previous[3])
301
+ if merged_spans[s][1] == "## Subheader":
302
+ subheaders[s] = (sorted_spans.index(s), previous[1])
303
+ previous = (previous[0], sorted_spans.index(s), previous[2], previous[3])
304
+ if merged_spans[s][1] == "### Subsubheader":
305
+ subsubheaders[s] = (sorted_spans.index(s), previous[2])
306
+ previous = (previous[0], previous[1], sorted_spans.index(s), previous[3])
307
+ if merged_spans[s][1] == "#### Subsubsubheader":
308
+ subsubsubheaders[s] = (sorted_spans.index(s), previous[3])
309
+ previous = (previous[0], previous[1], previous[2], sorted_spans.index(s))
310
+
311
+ return headers, subheaders, subsubheaders, subsubsubheaders
312
+
313
+
314
+ def stringify():
315
+ headers, subheaders, subsubheaders, subsubsubheaders = extract_headers()
316
+ headers_strings = {}
317
+ subheaders_strings = {}
318
+ subsubheaders_strings = {}
319
+ subsubsubheaders_strings = {}
320
+
321
+ first = None
322
+ for i in headers:
323
+ if headers[i][1] == None:
324
+ continue
325
+ sub_spans = sorted_spans[headers[i][1] : headers[i][0]]
326
+ lines = []
327
+ for x in sub_spans:
328
+ lines.append(merged_spans[x][0])
329
+ try:
330
+ name = lines[0]
331
+ except:
332
+ name = "Model Details"
333
+ lines = "".join(lines)
334
+ # print(merged_spans[i][0] + "-------------------")
335
+ # print(lines)
336
+ headers_strings[
337
+ name.replace("\n# ", "")
338
+ .replace(" ", "")
339
+ .replace(" ", "")
340
+ .replace("\n", "")
341
+ .replace("{{", "")
342
+ .replace("}}", "")
343
+ ] = lines
344
+ first = i
345
+
346
+ first = None
347
+ for i in subheaders:
348
+ if subheaders[i][1] == None:
349
+ continue
350
+ sub_spans = sorted_spans[subheaders[i][1] : subheaders[i][0]]
351
+ lines = []
352
+ for x in sub_spans:
353
+ if merged_spans[x][1] == "## Subheader" and first == None:
354
+ break
355
+ elif merged_spans[x][1] == "# Header":
356
+ break
357
+ else:
358
+ lines.append(merged_spans[x][0])
359
+ try:
360
+ name = lines[0]
361
+ except:
362
+ name = "Model Details"
363
+ lines = "".join(lines)
364
+ # print(merged_spans[i][0] + "-------------------")
365
+ # print(lines)
366
+ subheaders_strings[
367
+ name.replace("\n# ", "").replace(" ", "").replace(" ", "")
368
+ ] = lines
369
+ first = i
370
+
371
+ first = None
372
+ for i in subsubheaders:
373
+ if subsubheaders[i][1] == None:
374
+ continue
375
+ sub_spans = sorted_spans[subsubheaders[i][1] : subsubheaders[i][0]]
376
+ lines = []
377
+ for x in sub_spans:
378
+ if merged_spans[x][1] == "## Subheader" or (
379
+ merged_spans[x][1] == "### Subsubheader" and first == None
380
+ ):
381
+ break
382
+ else:
383
+ lines.append(merged_spans[x][0])
384
+ lines = "".join(lines)
385
+
386
+ subsubheaders_strings[
387
+ merged_spans[i][0].replace("\n", "").replace("### ", "").replace(" ", "")
388
+ ] = lines
389
+ first = i
390
+
391
+ for i in subsubsubheaders:
392
+ if subsubsubheaders[i][1] == None:
393
+ continue
394
+ sub_spans = sorted_spans[subsubsubheaders[i][1] : subsubsubheaders[i][0]]
395
+ lines = []
396
+ for x in sub_spans:
397
+ if (
398
+ merged_spans[x][1] == "## Subheader"
399
+ or merged_spans[x][1] == "### Subsubheader"
400
+ ):
401
+ break
402
+ else:
403
+ lines.append(merged_spans[x][0])
404
+ lines = "".join(lines)
405
+
406
+ subsubsubheaders_strings[
407
+ merged_spans[i][0].replace("#### ", "").replace("**", "").replace("\n", "")
408
+ ] = lines
409
+
410
+ return (
411
+ headers_strings,
412
+ subheaders_strings,
413
+ subsubheaders_strings,
414
+ subsubsubheaders_strings,
415
+ )
416
+
417
+
418
+ def extract_it(text_to_retrieve):
419
+ print("Span\t\tType\t\tText")
420
+ print("-------------------------------------")
421
+ found_subheader = False
422
+ current_subheader = " "
423
+ page_state = " "
424
+ help_text = " "
425
+ #st.write("in cs- body here")
426
+
427
+ (
428
+ headers_strings,
429
+ subheaders_strings,
430
+ subsubheaders_strings,
431
+ subsubsubheaders_strings,
432
+ ) = stringify()
433
+
434
+ h_keys = list(headers_strings.keys())
435
+ sh_keys = list(subheaders_strings.keys())
436
+ ssh_keys = list(subsubheaders_strings.keys())
437
+ sssh_keys = list(subsubsubheaders_strings.keys())
438
+
439
+ needed = [
440
+ "model details",
441
+ "howto",
442
+ "limitations",
443
+ "uses",
444
+ "training",
445
+ "evaluation",
446
+ "environmental",
447
+ "citation",
448
+ "glossary",
449
+ "more information",
450
+ "authors",
451
+ "contact",
452
+ ] # not sure what keyword should be used for citation, howto, and contact
453
+ # info_strings = {
454
+ # "details": "## Model Details",
455
+ # "howto": "## How to Get Started with the Model",
456
+ # "limitations": "## Limitations and Biases",
457
+ # "uses": "## Uses",
458
+ # "training": "## Training",
459
+ # "evaluation": "## Evaluation Results",
460
+ # "environmental": "## Environmental Impact",
461
+ # "citation": "## Citation Information",
462
+ # "glossary": "## Glossary",
463
+ # "more information": "## More Information",
464
+ # "authors": "## Model Card Authors",
465
+ # "contact": "## Model Card Contact",
466
+ # }
467
+ info_strings = {
468
+ "model details": "",
469
+ "howto": "",
470
+ "limitations": "",
471
+ "uses": "",
472
+ "training": "",
473
+ "evaluation": "",
474
+ "environmental": "",
475
+ "citation": "",
476
+ "glossary": "",
477
+ "more information": "",
478
+ "authors": "",
479
+ "contact": "",
480
+ }
481
+
482
+ for x in needed:
483
+ for l in h_keys:
484
+ if x in l.lower():
485
+ info_strings[x] = info_strings[x] + headers_strings[l]
486
+ for i in sh_keys:
487
+ if x in i.lower():
488
+ info_strings[x] = info_strings[x] + subheaders_strings[i]
489
+ for z in ssh_keys:
490
+ try:
491
+ if x in z.lower():
492
+ info_strings[x] = info_strings[x] + subsubheaders_strings[z]
493
+ except:
494
+ continue
495
+ for y in sssh_keys:
496
+ try:
497
+ if x in y.lower():
498
+ info_strings[x] = info_strings[x] + subsubsubheaders_strings[y]
499
+ except:
500
+ continue
501
+
502
+ extracted_info = {
503
+ "Model_details_text": info_strings["model details"],
504
+ "Model_how_to": info_strings["howto"],
505
+ "Model_Limits_n_Risks": info_strings["limitations"],
506
+ "Model_uses": info_strings["uses"],
507
+ "Model_training": info_strings["training"],
508
+ "Model_Eval": info_strings["evaluation"],
509
+ "Model_carbon": info_strings["environmental"],
510
+ "Model_cite": info_strings["citation"],
511
+ "Glossary": info_strings["glossary"],
512
+ "More_info": info_strings["more information"],
513
+ "Model_card_authors": info_strings["authors"],
514
+ "Model_card_contact": info_strings["contact"],
515
+ "Technical_specs": "## Technical specifications",
516
+ "Model_examin": "## Model Examination",
517
+ }
518
+
519
+ #text_to_retrieve = "Model_details_text"
520
+
521
+ new_t = extracted_info[text_to_retrieve] + " "
522
+
523
+ return(new_t)
524
+
525
+
526
+ if __name__ == "__main__":
527
+
528
+ main()
style.css ADDED
File without changes
temp_uploaded_filed_Dir/modelcard_template_new_spec.md ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ {{card_data}}
3
+ ---
4
+
5
+ # {{ model_id }}
6
+
7
+ <!--> Provide a quick summary of what the model is/does. <!-->
8
+
9
+ # Table of Contents
10
+
11
+ 1. [Model Details](#model-details)
12
+ 2. [Uses](#uses)
13
+ 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations)
14
+ 4. [Training Details](#training-details)
15
+ 5. [Evaluation](#evaluation)
16
+ 6. [Model Examination](#model-examination)
17
+ 7. [Environmental Impact](#environmental-impact)
18
+ 8. [Technical Specifications](#technical-specifications-optional)
19
+ 9. [Citation](#citation)
20
+ 10. [Glossary](#glossary-optional)
21
+ 11. [More Information](#more-information-optional)
22
+ 12. [Model Card Authors](#model-card-authors-optional)
23
+ 13. [Model Card Contact](#model-card-contact)
24
+ 14. [How To Get Started With the Model](#how-to-get-started-with-the-model)
25
+
26
+
27
+ # Model Details
28
+
29
+ ## Model Description
30
+
31
+ <!--> Provide a longer summary of what this model is. <!-->
32
+
33
+ - **Developed by:** {{ developers | default("More information needed", true)}}
34
+ - **Shared by [Optional]:** {{ shared_by | default("More information needed", true)}}
35
+ - **Model type:** Language model
36
+ - **Language(s) (NLP):** {{ language | default("More information needed", true)}}
37
+ - **License:** {{ license | default("More information needed", true)}}
38
+ - **Related Models:** {{ related_models | default("More information needed", true)}}
39
+ - **Parent Model:** {{ parent_model | default("More information needed", true)}}
40
+ - **Resources for more information:** {{ more_resources | default("More information needed", true)}}
41
+
42
+ # Uses
43
+
44
+ <!--> Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. <!-->
45
+
46
+ ## Direct Use
47
+
48
+ <!--> This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. <!-->
49
+
50
+ {{ direct_use | default("More information needed", true)}}
51
+
52
+ ## Downstream Use [Optional]
53
+
54
+ <!--> This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app <!-->
55
+
56
+ {{ downstream_use | default("More information needed", true)}}
57
+
58
+ ## Out-of-Scope Use
59
+
60
+ <!--> This section addresses misuse, malicious use, and uses that the model will not work well for. <!-->
61
+
62
+ {{ out_of_scope_use | default("More information needed", true)}}
63
+
64
+ # Bias, Risks, and Limitations
65
+
66
+ <!--> This section is meant to convey both technical and sociotechnical limitations. <!-->
67
+
68
+ {{ bias_risks_limitations | default("More information needed", true)}}
69
+
70
+ ## Recommendations
71
+
72
+ <!--> This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. <!-->
73
+
74
+ {{ bias_recommendations | default("Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recomendations.", true)}}
75
+
76
+ # Training Details
77
+
78
+ ## Training Data
79
+
80
+ <!--> This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. <!-->
81
+
82
+ {{ training_data | default("More information needed", true)}}
83
+
84
+ ## Training Procedure
85
+
86
+ <!--> This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. <!-->
87
+
88
+ ### Preprocessing
89
+
90
+ {{ preprocessing | default("More information needed", true)}}
91
+
92
+ ### Speeds, Sizes, Times
93
+
94
+ <!--> This section provides information about throughput, start/end time, checkpoint size if relevant, etc. <!-->
95
+
96
+ {{ speeds_sizes_times | default("More information needed", true)}}
97
+
98
+ # Evaluation
99
+
100
+ <!--> This section describes the evaluation protocols and provides the results. <!-->
101
+
102
+ ## Testing Data, Factors & Metrics
103
+
104
+ ### Testing Data
105
+
106
+ <!--> This should link to a Data Card if possible. <!-->
107
+
108
+ {{ testing_data | default("More information needed", true)}}
109
+
110
+ ### Factors
111
+
112
+ <!--> These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. <!-->
113
+
114
+ {{ testing_factors | default("More information needed", true)}}
115
+
116
+ ### Metrics
117
+
118
+ <!--> These are the evaluation metrics being used, ideally with a description of why. <!-->
119
+
120
+ {{ testing_metrics | default("More information needed", true)}}
121
+
122
+ ## Results
123
+
124
+ {{ results | default("More information needed", true)}}
125
+
126
+ # Model Examination
127
+
128
+ {{ model_examination | default("More information needed", true)}}
129
+
130
+ # Environmental Impact
131
+
132
+ <!--> Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly <!-->
133
+
134
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
135
+
136
+ - **Hardware Type:** {{ hardware | default("More information needed", true)}}
137
+ - **Hours used:** {{ hours_used | default("More information needed", true)}}
138
+ - **Cloud Provider:** {{ cloud_provider | default("More information needed", true)}}
139
+ - **Compute Region:** {{ cloud_region | default("More information needed", true)}}
140
+ - **Carbon Emitted:** {{ co2_emitted | default("More information needed", true)}}
141
+
142
+ # Technical Specifications [optional]
143
+
144
+ ## Model Architecture and Objective
145
+
146
+ {{ model_specs | default("More information needed", true)}}
147
+
148
+ ## Compute Infrastructure
149
+
150
+ {{ compute_infrastructure | default("More information needed", true)}}
151
+
152
+ ### Hardware
153
+
154
+ {{ hardware | default("More information needed", true)}}
155
+
156
+ ### Software
157
+
158
+ {{ software | default("More information needed", true)}}
159
+
160
+ # Citation
161
+
162
+ <!--> A. <!-->
163
+
164
+ **BibTeX:**
165
+
166
+ {{ citation_bibtex | default("More information needed", true)}}
167
+
168
+ **APA:**
169
+
170
+ {{ citation_apa | default("More information needed", true)}}
171
+
172
+ # Glossary [optional]
173
+
174
+ <!--> If relevant, include terms and calculations in this section that can help readers understand the model or model card. <!-->
175
+
176
+ {{ glossary | default("More information needed", true)}}
177
+
178
+ # More Information [optional]
179
+
180
+ {{ more_information | default("More information needed", true)}}
181
+
182
+ # Model Card Authors [optional]
183
+
184
+ {{ model_card_authors | default("More information needed", true)}}
185
+
186
+ # Model Card Contact
187
+
188
+ {{ model_card_contact | default("More information needed", true)}}
189
+
190
+ # How to Get Started with the Model
191
+
192
+ Use the code below to get started with the model.
193
+
194
+ <details>
195
+ <summary> Click to expand </summary>
196
+
197
+ {{ get_started_code | default("More information needed", true)}}
198
+
199
+ </details>
200
+
201
+
template.md ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ {{ card_data }}
3
+ ---
4
+
5
+ # {{ model_id | default("MyModelName", true)}}
6
+
7
+ ## Table of Contents
8
+ - [{{ model_id | default("MyModelName", true)}}](#-model_id--defaultmymodelname-true)
9
+ - [Table of Contents](#table-of-contents)
10
+ - [Model Details](#model-details)
11
+ - [How to Get Started with the Model](#how-to-get-started-with-the-model)
12
+ - [Uses](#uses)
13
+ - [Direct Use](#direct-use)
14
+ - [Downstream Use](#downstream-use)
15
+ - [Misuse and Out-of-scope Use](#misuse-and-out-of-scope-use)
16
+ - [Limitations and Biases](#limitations-and-biases)
17
+ - [Training](#training)
18
+ - [Training Data](#training-data)
19
+ - [Training Procedure](#training-procedure)
20
+ - [Evaluation Results](#evaluation-results)
21
+ - [Environmental Impact](#environmental-impact)
22
+ - [Citation Information](#citation-information)
23
+
24
+
25
+ <model_details>
26
+ ## Model Details
27
+
28
+ <!-- Give an overview of your model, the relevant research paper, who trained it, etc. -->
29
+
30
+ {{ model_description if model_description else "[More Information Needed]" }}
31
+
32
+ - Developed by: {{ authors if authors }}
33
+ - Language(s): {{ languages }}
34
+ - License: This model is licensed under the {{ license }}{{ " license" if "license" not in license.lower() }}
35
+ - Resources for more information:
36
+ {{ " - [Research Paper](" + paper_url + ")" if paper_url }}
37
+ {{ " - [GitHub Repo](" + github_url + ")" if github_url }}
38
+
39
+ </model_details>
40
+
41
+ <how_to_start>
42
+ ## How to Get Started with the Model
43
+
44
+ Use the code below to get started with the model.
45
+
46
+ ```python
47
+ # A nice code snippet here that describes how to use the model...
48
+ ```
49
+ </how_to_start>
50
+
51
+ <uses>
52
+
53
+ ## Uses
54
+
55
+ #### Direct Use
56
+
57
+ <!-- Describe what kind of tasks this model can be used for directly or problems it can solve. -->
58
+
59
+ [More Information Needed]
60
+
61
+ #### Downstream Use
62
+
63
+ <!-- Describe how this model could be leveraged by a downstream model (if applicable) -->
64
+
65
+ [More Information Needed]
66
+
67
+ #### Misuse and Out-of-scope Use
68
+
69
+ <!-- Describe ways in which this model ***should not*** be used. -->
70
+
71
+ [More Information Needed]
72
+ </uses>
73
+
74
+ <Limitations_and_Biases>
75
+
76
+ ## Limitations and Biases
77
+
78
+ <!-- Describe limitations and biases of this model or models of it's type. -->
79
+
80
+ **CONTENT WARNING: Readers should be aware this section contains content that is disturbing, offensive, and can propogate historical and current stereotypes.**
81
+
82
+ [More Information Needed]
83
+
84
+ </Limitations_and_Biases>
85
+
86
+ <Training>
87
+
88
+ ## Training
89
+
90
+ #### Training Data
91
+
92
+ <!-- Describe the dataset used to train this model. -->
93
+ <!-- Refer to data card if dataset is provided and exists on the hub -->
94
+
95
+ See the data card for additional information.
96
+
97
+ #### Training Procedure
98
+
99
+ <!-- Describe the preprocessing, hardware used, training hyperparameters, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ </Training>
104
+
105
+ <Eval_Results>
106
+ ## Evaluation Results
107
+
108
+ <!-- Describe evaluation results of this model across any datasets it was evaluated on. -->
109
+
110
+ [More Information Needed]
111
+
112
+ </Eval_Results>
113
+
114
+ <E_Impact>
115
+ ## Environmental Impact
116
+
117
+ <!-- Provide information to document the environmental impact of this model -->
118
+
119
+ You can estimate carbon emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700)
120
+
121
+ - **Hardware Type:**
122
+ - **Hours used:**
123
+ - **Cloud Provider:**
124
+ - **Compute Region:**
125
+ - **Carbon Emitted:** {{ emissions if emissions }}
126
+
127
+ </E_Impact>
128
+
129
+ <Cite>
130
+
131
+ ## Citation Information
132
+
133
+ ```bibtex
134
+ {{ bibtex_citations if bibtex_citations }}
135
+ ```
136
+ </Cite>
test_markdown_out.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from jinja2 import Environment, FileSystemLoader
4
+
5
+ def parse_into_jinja_markdown():
6
+ env = Environment(loader=FileSystemLoader('.'), autoescape=True)
7
+ temp = env.get_template(st.session_state.markdown_upload)
8
+
9
+ return (temp.render(model_id = st.session_state["model_name"],
10
+ the_model_description = st.session_state["model_description"],developers=st.session_state["Model_developers"],shared_by = st.session_state["shared_by"],license = st.session_state['license'],
11
+ direct_use = st.session_state["Direct_Use"], downstream_use = st.session_state["Downstream_Use"],out_of_scope_use = st.session_state["Out-of-Scope_Use"],
12
+ bias_risks_limitations = st.session_state["Model_Limits_n_Risks"], bias_recommendations = st.session_state['Recommendations'],
13
+ model_examination = st.session_state['Model_examin'],
14
+ hardware= st.session_state['Model_hardware'], hours_used = st.session_state['hours_used'], cloud_provider = st.session_state['Model_cloud_provider'], cloud_region = st.session_state['Model_cloud_region'], co2_emitted = st.session_state['Model_c02_emitted'],
15
+ citation_bibtex= st.session_state["APA_citation"], citation_apa = st.session_state['bibtex_citation'],
16
+ training_data = st.session_state['training_data'], preprocessing =st.session_state['preprocessing'], speeds_sizes_times = st.session_state['Speeds_Sizes_Times'],
17
+ model_specs = st.session_state['Model_specs'], compute_infrastructure = st.session_state['compute_infrastructure'],software = st.session_state['technical_specs_software'],
18
+ glossary = st.session_state['Glossary'],
19
+ more_information = st.session_state['More_info'],
20
+ model_card_authors = st.session_state['the_authors'],
21
+ model_card_contact = st.session_state['Model_card_contact'],
22
+ get_started_code =st.session_state["Model_how_to"]
23
+ ))
24
+
25
+ def main():
26
+ st.write( parse_into_jinja_markdown())
27
+
28
+ if __name__ == '__main__':
29
+ load_widget_state()
30
+ main()
testing_layout.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ import pandas as pd
4
+ import requests
5
+
6
+
7
+
8
+
9
+ @st.cache
10
+ def get_cached_data():
11
+ languages_df = pd.read_html("https://hf.co/languages")[0]
12
+ languages_map = pd.Series(languages_df["Language"].values, index=languages_df["ISO code"]).to_dict()
13
+
14
+ license_df = pd.read_html("https://huggingface.co/docs/hub/repositories-licenses")[0]
15
+ license_map = pd.Series(
16
+ license_df["License identifier (to use in model card)"].values, index=license_df.Fullname
17
+ ).to_dict()
18
+
19
+ available_metrics = [x['id'] for x in requests.get('https://huggingface.co/api/metrics').json()]
20
+
21
+ r = requests.get('https://huggingface.co/api/models-tags-by-type')
22
+ tags_data = r.json()
23
+ libraries = [x['id'] for x in tags_data['library']]
24
+ tasks = [x['id'] for x in tags_data['pipeline_tag']]
25
+ #return languages_map, license_map, available_metrics, libraries, tasks
26
+ return license_map
27
+
28
+
29
+
30
+
31
+
32
+ def main():
33
+ license_map= get_cached_data()
34
+ #st.set_page_config(layout="wide")
35
+ st.markdown('## Model Details')
36
+ st.markdown('### Model Description')
37
+ st.text_area("Provide a 1-2 sentence summary of what this model is.", help="The model description provides basic details about the model. This includes the architecture, version, if it was introduced in a paper, if an original implementation is available, the author, and general information about the model. Any copyright should be attributed here. General information about training procedures, parameters, and important disclaimers can also be mentioned in this section.", key=persist('model_description'))
38
+
39
+ left, right = st.columns([2,6], gap="small")
40
+ with left:
41
+ st.write("\n")
42
+ st.write("\n")
43
+ st.markdown('### Developed By:')
44
+ st.write("\n")
45
+ st.write("\n")
46
+ #st.write("\n")
47
+ st.markdown('### Shared By [optional]:')
48
+ st.write("\n")
49
+ st.write("\n")
50
+ st.markdown('### Model Type:')
51
+ st.write("\n")
52
+ st.write("\n")
53
+ st.markdown('### License:')
54
+ with right:
55
+ st.write("\n")
56
+ st.write("\n")
57
+ st.text_input("",help="Developed By work", key=persist("Model_developers"))
58
+ st.write("\n")
59
+ st.write("\n")
60
+
61
+ st.text_input("",help="Shared By work",key=persist("shared_by"))
62
+ st.text_input("",help="Model Type work")
63
+ #st.write("\n")
64
+ st.selectbox("",[""] + list(license_map.values()), help="Licenses work", key=persist("license"))
65
+
66
+
67
+
68
+
69
+ if __name__ == '__main__':
70
+ load_widget_state()
71
+ main()
viewCardProgress(old).py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from persist import persist, load_widget_state
3
+ from modelcards import CardData, ModelCard
4
+ from huggingface_hub import create_repo
5
+
6
+
7
+ def is_float(value):
8
+ try:
9
+ float(value)
10
+ return True
11
+ except:
12
+ return False
13
+
14
+ def get_card():
15
+ languages=st.session_state.languages or None
16
+ license=st.session_state.license or None
17
+ library_name = st.session_state.library_name or None
18
+ tags= [x.strip() for x in st.session_state.tags.split(',') if x.strip()]
19
+ tags.append("autogenerated-modelcard")
20
+ datasets= [x.strip() for x in st.session_state.datasets.split(',') if x.strip()] or None
21
+ metrics=st.session_state.metrics or None
22
+ model_name = st.session_state.model_name or None
23
+ model_description = st.session_state.model_description or None
24
+ #Model_details_text = st.session_state.Model_details_text or None
25
+ #Model_how_to = st.session_state.Model_how_to or None
26
+ authors = st.session_state.authors or None
27
+ paper_url = st.session_state.paper_url or None
28
+ github_url = st.session_state.github_url or None
29
+ bibtex_citations = st.session_state.bibtex_citations or None
30
+ emissions = float(st.session_state.emissions) if is_float(st.session_state.emissions) else None # BUG
31
+
32
+ # Handle any warnings...
33
+ do_warn = False
34
+ warning_msg = "Warning: The following fields are required but have not been filled in: "
35
+ if not languages:
36
+ warning_msg += "\n- Languages"
37
+ do_warn = True
38
+ if not license:
39
+ warning_msg += "\n- License"
40
+ do_warn = True
41
+ if do_warn:
42
+ st.error(warning_msg)
43
+ st.stop()
44
+
45
+ # Generate and display card
46
+ card_data = CardData(
47
+ language=languages,
48
+ license=license,
49
+ library_name=library_name,
50
+ tags=tags,
51
+ datasets=datasets,
52
+ metrics=metrics,
53
+ )
54
+ if emissions:
55
+ card_data.co2_eq_emissions = {'emissions': emissions}
56
+
57
+ card = ModelCard.from_template(
58
+ card_data,
59
+ template_path='template.md',
60
+ model_id=model_name,
61
+ # Template kwargs:
62
+ model_description=model_description,
63
+ license=license,
64
+ authors=authors,
65
+ paper_url=paper_url,
66
+ github_url=github_url,
67
+ bibtex_citations=bibtex_citations,
68
+ emissions=emissions
69
+ )
70
+ return card
71
+
72
+
73
+ def main():
74
+
75
+ card = get_card()
76
+ card.save('current_card.md')
77
+ view_raw = st.sidebar.checkbox("View Raw")
78
+ if view_raw:
79
+ st.text(card)
80
+ else:
81
+ st.markdown(card.text, unsafe_allow_html=True)
82
+
83
+ with st.sidebar:
84
+ with st.form("Upload to πŸ€— Hub"):
85
+ st.markdown("Use a token with write access from [here](https://hf.co/settings/tokens)")
86
+ token = st.text_input("Token", type='password')
87
+ repo_id = st.text_input("Repo ID")
88
+ submit = st.form_submit_button('Upload to πŸ€— Hub')
89
+
90
+ if submit:
91
+ if len(repo_id.split('/')) == 2:
92
+ repo_url = create_repo(repo_id, exist_ok=True, token=token)
93
+ card.push_to_hub(repo_id, token=token)
94
+ st.success(f"Pushed the card to the repo [here]({repo_url}!")
95
+ else:
96
+ st.error("Repo ID invalid. It should be username/repo-name. For example: nateraw/food")
97
+
98
+
99
+ if __name__ == "__main__":
100
+ load_widget_state()
101
+ main()