Quentin Lhoest commited on
Commit
8abf5fd
2 Parent(s): 2f4f38e 9ff0632

Merge pull request #16 from lewtun/fix-extended-datasets

Browse files
Files changed (3) hide show
  1. .gitignore +139 -0
  2. requirements.txt +1 -1
  3. tagging_app.py +7 -11
.gitignore CHANGED
@@ -1,3 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  .idea
2
  metadata_*.json
3
  datasets/
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98
+ __pypackages__/
99
+
100
+ # Celery stuff
101
+ celerybeat-schedule
102
+ celerybeat.pid
103
+
104
+ # SageMath parsed files
105
+ *.sage.py
106
+
107
+ # Environments
108
+ .env
109
+ .venv
110
+ env/
111
+ venv/
112
+ ENV/
113
+ env.bak/
114
+ venv.bak/
115
+
116
+ # Spyder project settings
117
+ .spyderproject
118
+ .spyproject
119
+
120
+ # Rope project settings
121
+ .ropeproject
122
+
123
+ # mkdocs documentation
124
+ /site
125
+
126
+ # mypy
127
+ .mypy_cache/
128
+ .dmypy.json
129
+ dmypy.json
130
+
131
+ # Pyre type checker
132
+ .pyre/
133
+
134
+ # pytype static type analyzer
135
+ .pytype/
136
+
137
+ # Cython debug symbols
138
+ cython_debug/
139
+
140
  .idea
141
  metadata_*.json
142
  datasets/
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
  pyyaml
2
- datasets
3
  streamlit
4
  langcodes[data]
 
1
  pyyaml
2
+ datasets==1.7.0
3
  streamlit
4
  langcodes[data]
tagging_app.py CHANGED
@@ -314,17 +314,16 @@ if "original" in state["source_datasets"]:
314
  pre_select_ext_a += ["original"]
315
  if any([p.startswith("extended") for p in state["source_datasets"]]):
316
  pre_select_ext_a += ["extended"]
317
- state["extended"] = multiselect(
318
  leftcol,
319
  "Relations to existing work",
320
  "Does the dataset contain original data and/or was it extended from other datasets?",
321
  values=pre_select_ext_a,
322
  valid_set=["original", "extended"],
323
  )
324
- state["source_datasets"] = ["original"] if "original" in state["extended"] else []
325
 
326
- if "extended" in state["extended"]:
327
- pre_select_ext_b = [p.split("|")[1] for p in state["source_datasets"] if p.startswith("extended")]
328
  extended_sources = multiselect(
329
  leftcol,
330
  "Linked datasets",
@@ -332,13 +331,8 @@ if "extended" in state["extended"]:
332
  values=pre_select_ext_b,
333
  valid_set=dataset_ids + ["other"],
334
  )
335
- if "other" in extended_sources:
336
- other_extended_sources = leftcol.text_input(
337
- "You selected 'other' dataset. Please enter a short hyphen-separated description:",
338
- value="my-dataset",
339
- )
340
- leftcol.write(f"Registering other-{other_extended_sources} dataset")
341
- extended_sources[extended_sources.index("other")] = f"other-{other_extended_sources}"
342
  state["source_datasets"] += [f"extended|{src}" for src in extended_sources]
343
 
344
 
@@ -358,6 +352,8 @@ current_size_cats = state.get("size_categories") or ["unknown"]
358
  ok, nonok = split_known(current_size_cats, known_size_categories)
359
  if len(nonok) > 0:
360
  leftcol.markdown(f"**Found bad codes in existing tagset**:\n{nonok}")
 
 
361
 
362
 
363
  ########################
 
314
  pre_select_ext_a += ["original"]
315
  if any([p.startswith("extended") for p in state["source_datasets"]]):
316
  pre_select_ext_a += ["extended"]
317
+ state["source_datasets"] = multiselect(
318
  leftcol,
319
  "Relations to existing work",
320
  "Does the dataset contain original data and/or was it extended from other datasets?",
321
  values=pre_select_ext_a,
322
  valid_set=["original", "extended"],
323
  )
 
324
 
325
+ if "extended" in state["source_datasets"]:
326
+ pre_select_ext_b = [p.split("|")[1] for p in state["source_datasets"] if p.startswith("extended|")]
327
  extended_sources = multiselect(
328
  leftcol,
329
  "Linked datasets",
 
331
  values=pre_select_ext_b,
332
  valid_set=dataset_ids + ["other"],
333
  )
334
+ # flush placeholder
335
+ state["source_datasets"].remove("extended")
 
 
 
 
 
336
  state["source_datasets"] += [f"extended|{src}" for src in extended_sources]
337
 
338
 
 
352
  ok, nonok = split_known(current_size_cats, known_size_categories)
353
  if len(nonok) > 0:
354
  leftcol.markdown(f"**Found bad codes in existing tagset**:\n{nonok}")
355
+ else:
356
+ state["size_categories"] = [initial_size_cats]
357
 
358
 
359
  ########################