malvika2003 commited on
Commit
db5855f
1 Parent(s): f29da5b

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .binder/apt.txt +3 -0
  2. .binder/requirements.txt +1 -0
  3. .binder/runtime.txt +1 -0
  4. .ci/aggregate_notebooks_reports.py +59 -0
  5. .ci/check_links.py +102 -0
  6. .ci/check_notebooks.py +79 -0
  7. .ci/ci-requirements.txt +3 -0
  8. .ci/convert_notebooks.py +131 -0
  9. .ci/convert_notebooks.sh +31 -0
  10. .ci/dev-requirements.txt +16 -0
  11. .ci/heavy_ubuntu_gpu.txt +22 -0
  12. .ci/heavy_win_gpu.txt +8 -0
  13. .ci/ignore_convert_execution.txt +64 -0
  14. .ci/ignore_convert_full.txt +2 -0
  15. .ci/ignore_pip_conflicts.txt +29 -0
  16. .ci/ignore_treon_docker.txt +71 -0
  17. .ci/ignore_treon_linux.txt +72 -0
  18. .ci/ignore_treon_mac.txt +74 -0
  19. .ci/ignore_treon_py38.txt +2 -0
  20. .ci/ignore_treon_win.txt +70 -0
  21. .ci/keywords.json +27 -0
  22. .ci/patch_notebooks.py +168 -0
  23. .ci/pip_conflicts_check.sh +70 -0
  24. .ci/spellcheck/.pyspelling.wordlist.txt +877 -0
  25. .ci/spellcheck/.pyspelling.yml +49 -0
  26. .ci/spellcheck/ipynb_filter.py +48 -0
  27. .ci/spellcheck/run_spellcheck.py +25 -0
  28. .ci/table_of_content.py +167 -0
  29. .ci/tagger.py +44 -0
  30. .ci/test_notebooks.py +89 -0
  31. .ci/validate_notebooks.py +335 -0
  32. .docker/.aicoe-ci.yaml +9 -0
  33. .docker/.jupyter/custom/custom.css +1 -0
  34. .docker/.jupyter/nbconfig/common.json +30 -0
  35. .docker/.s2i/bin/assemble +145 -0
  36. .docker/.s2i/bin/assemble.orig +131 -0
  37. .docker/.s2i/bin/run +7 -0
  38. .docker/.s2i/bin/test +28 -0
  39. .docker/.s2i/bin/test_precommit +28 -0
  40. .docker/.thoth.yaml +27 -0
  41. .docker/Pipfile +59 -0
  42. .docker/Pipfile.lock +0 -0
  43. .docker/buildconfig.yaml +41 -0
  44. .docker/builder/assemble +40 -0
  45. .docker/builder/image_metadata.json +9 -0
  46. .docker/builder/run +73 -0
  47. .docker/builder/save-artifacts +3 -0
  48. .docker/gateway/logger.js +16 -0
  49. .docker/gateway/package.json +14 -0
  50. .docker/gateway/routes/webdav.js +14 -0
.binder/apt.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
.binder/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ -r ../requirements.txt
.binder/runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.8
.ci/aggregate_notebooks_reports.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ from pathlib import Path
4
+ from typing import Dict
5
+ from itertools import product
6
+
7
+ REPORTS_DIR = "test_reports"
8
+
9
+
10
+ class ValidationMatrix:
11
+ os = ("ubuntu-20.04", "ubuntu-22.04", "windows-2019", "macos-12")
12
+ python = ("3.8", "3.9", "3.10")
13
+
14
+ @classmethod
15
+ def values(cls):
16
+ return product(cls.os, cls.python)
17
+
18
+
19
+ def get_report_file_path(os: str, python: str) -> Path:
20
+ return Path(REPORTS_DIR) / f"{os}-{python}" / "test_report.csv"
21
+
22
+
23
+ def get_default_status_dict(notebook_name: str) -> Dict:
24
+ default_status = None
25
+
26
+ def _get_python_status_dict():
27
+ return dict((python, default_status) for python in ValidationMatrix.python)
28
+
29
+ return {
30
+ "name": notebook_name,
31
+ "status": dict((os, _get_python_status_dict()) for os in ValidationMatrix.os),
32
+ }
33
+
34
+
35
+ def write_json_file(filename: str, data: Dict):
36
+ with open(filename, "w") as file:
37
+ json.dump(data, file, indent=2)
38
+
39
+
40
+ def main():
41
+ NOTEBOOKS_STATUS_MAP = {}
42
+ for os, python in ValidationMatrix.values():
43
+ report_file_path = get_report_file_path(os, python)
44
+ if not report_file_path.exists():
45
+ print(f'Report file "{report_file_path}" does not exists.')
46
+ continue
47
+ print(f'Processing report file "{report_file_path}".')
48
+ with open(report_file_path, "r") as report_file:
49
+ for row in csv.DictReader(report_file):
50
+ name = row["name"]
51
+ status = row["status"]
52
+ if name not in NOTEBOOKS_STATUS_MAP:
53
+ NOTEBOOKS_STATUS_MAP[name] = get_default_status_dict(name)
54
+ NOTEBOOKS_STATUS_MAP[name]["status"][os][python] = status
55
+ write_json_file(Path(REPORTS_DIR) / "notebooks-status-map.json", NOTEBOOKS_STATUS_MAP)
56
+
57
+
58
+ if __name__ == "__main__":
59
+ main()
.ci/check_links.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import sys
4
+ import mistune
5
+ import requests
6
+ import urllib.parse
7
+
8
+ from pathlib import Path
9
+
10
+ NOTEBOOKS_ROOT = Path(__file__).resolve().parents[1]
11
+
12
+ EXCEPTIONS_URLs = [
13
+ "medium.com",
14
+ "https://www.paddlepaddle.org.cn/",
15
+ "mybinder.org",
16
+ "https://arxiv.org",
17
+ "http://host.robots.ox.ac.uk",
18
+ "https://gitee.com/",
19
+ "https://openai.com/",
20
+ ]
21
+
22
+
23
+ def get_all_ast_nodes(ast_nodes):
24
+ for node in ast_nodes:
25
+ yield node
26
+ if "children" in node:
27
+ yield from get_all_ast_nodes(node["children"])
28
+
29
+
30
+ def get_all_references_from_md(md_path):
31
+ parse_markdown = mistune.create_markdown(renderer=mistune.AstRenderer())
32
+ ast = parse_markdown(md_path.read_text(encoding="UTF-8"))
33
+
34
+ for node in get_all_ast_nodes(ast):
35
+ if node["type"] == "image":
36
+ yield node["src"]
37
+ elif node["type"] == "link":
38
+ yield node["link"]
39
+
40
+
41
+ def validate_colab_url(url: str) -> bool:
42
+ OPENVINO_COLAB_URL_PREFIX = "https://colab.research.google.com/github/openvinotoolkit/openvino_notebooks/blob/latest/"
43
+
44
+ if not url.startswith(OPENVINO_COLAB_URL_PREFIX):
45
+ return
46
+
47
+ notebook_path = url.split(OPENVINO_COLAB_URL_PREFIX)[1]
48
+ absolute_notebook_path = NOTEBOOKS_ROOT / notebook_path
49
+
50
+ if not absolute_notebook_path.exists():
51
+ raise ValueError(f"notebook not found for colab url {url!r}")
52
+
53
+
54
+ def main():
55
+ all_passed = True
56
+
57
+ def complain(message):
58
+ nonlocal all_passed
59
+ all_passed = False
60
+ print(message, file=sys.stderr)
61
+
62
+ for md_path in NOTEBOOKS_ROOT.glob("**/*README*.md"):
63
+ for url in get_all_references_from_md(md_path):
64
+ try:
65
+ components = urllib.parse.urlparse(url)
66
+ except ValueError:
67
+ complain(f"{md_path}: invalid URL reference {url!r}")
68
+ continue
69
+
70
+ if not components.path: # self-link
71
+ continue
72
+
73
+ if not components.scheme and not components.netloc:
74
+ # check if it is relative path on file from repo
75
+ file_name = md_path.parent / components.path
76
+ if not file_name.exists():
77
+ complain(f"{md_path}: invalid URL reference {url!r}")
78
+ continue
79
+
80
+ try:
81
+ validate_colab_url(url)
82
+ except ValueError as err:
83
+ complain(f"{md_path}: {err}")
84
+
85
+ try:
86
+ get = requests.get(url, timeout=10)
87
+ if get.status_code != 200:
88
+ if get.status_code in [500, 429, 443, 403] and any([known_url in url for known_url in EXCEPTIONS_URLs]):
89
+ print(f"SKIP - {md_path}: URL can not be reached {url!r}, status code {get.status_code}")
90
+ continue
91
+ complain(f"{md_path}: URL can not be reached {url!r}, status code {get.status_code}")
92
+ except Exception as err:
93
+ if any([known_url in url for known_url in EXCEPTIONS_URLs]):
94
+ print(f"SKIP - {md_path}: URL can not be reached {url!r}, error {err}")
95
+ else:
96
+ complain(f"{md_path}: URL can not be reached {url!r}, error {err}")
97
+
98
+ sys.exit(0 if all_passed else 1)
99
+
100
+
101
+ if __name__ == "__main__":
102
+ main()
.ci/check_notebooks.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import json
3
+ from table_of_content import find_tc_in_cell
4
+ from patch_notebooks import DEVICE_WIDGET
5
+ from pathlib import Path
6
+
7
+ NOTEBOOKS_ROOT = Path(__file__).resolve().parents[1]
8
+
9
+ EXPECTED_NO_DEVICE = [
10
+ Path("notebooks/auto-device/auto-device.ipynb"), # auto device expected to be used
11
+ Path("notebooks/convert-to-openvino/convert-to-openvino.ipynb"), # device-agnostic
12
+ Path("notebooks/convert-to-openvino/legacy-mo-convert-to-openvino.ipynb"), # device-agnostic
13
+ Path("notebooks/gpu-device/gpu-device.ipynb"), # gpu device expected to be used
14
+ Path("notebooks/hello-npu/hello-npu.ipynb"), # npu device expected to be used
15
+ Path("notebooks/model-server/model-server.ipynb"), # can not change device in docker configuration on the fly
16
+ Path("notebooks/openvino-tokenizers/openvino-tokenizers.ipynb"), # cpu required for loading extensions
17
+ Path("notebooks/sparsity-optimization/sparsity-optimization.ipynb"), # cpu expected to be used
18
+ ]
19
+
20
+
21
+ def find_device_in_cell(cell):
22
+ for line_idx, line in enumerate(cell["source"]):
23
+ if DEVICE_WIDGET in line:
24
+ return line_idx
25
+ return None
26
+
27
+
28
+ def main():
29
+ all_passed = True
30
+ no_tocs = []
31
+ no_device = []
32
+
33
+ def complain(message):
34
+ nonlocal all_passed
35
+ all_passed = False
36
+ print(message, file=sys.stderr)
37
+
38
+ for nb_path in NOTEBOOKS_ROOT.glob("notebooks/**/*.ipynb"):
39
+ with open(nb_path, "r", encoding="utf-8") as notebook_file:
40
+ notebook_json = json.load(notebook_file)
41
+ toc_found = False
42
+ device_found = False
43
+ if nb_path.relative_to(NOTEBOOKS_ROOT) in EXPECTED_NO_DEVICE:
44
+ print(f"SKIPPED: {nb_path.relative_to(NOTEBOOKS_ROOT)} for device wdget check")
45
+ device_found = True
46
+ for cell in notebook_json["cells"]:
47
+ if not toc_found and cell["cell_type"] == "markdown":
48
+ tc_cell, tc_line = find_tc_in_cell(cell)
49
+ if tc_line is not None:
50
+ toc_found = True
51
+
52
+ if not device_found and find_device_in_cell(cell) is not None:
53
+ device_found = True
54
+
55
+ if toc_found and device_found:
56
+ break
57
+ if not toc_found:
58
+ no_tocs.append(str(nb_path.relative_to(NOTEBOOKS_ROOT)))
59
+ complain(f"FAILED: {nb_path.relative_to(NOTEBOOKS_ROOT)}: table of content is not found")
60
+ if not device_found:
61
+ no_device.append(str(nb_path.relative_to(NOTEBOOKS_ROOT)))
62
+ complain(f"FAILED: {nb_path.relative_to(NOTEBOOKS_ROOT)}: device widget is not found")
63
+
64
+ if not all_passed:
65
+ print("SUMMARY:")
66
+ print("==================================")
67
+ if no_tocs:
68
+ print("NO TABLE OF CONTENT:")
69
+ print("\n".join(no_tocs))
70
+ print("==================================")
71
+ if no_device:
72
+ print("NO DEVICE SELECTION:")
73
+ print("\n".join(no_device))
74
+
75
+ sys.exit(0 if all_passed else 1)
76
+
77
+
78
+ if __name__ == "__main__":
79
+ main()
.ci/ci-requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ -r dev-requirements.txt
2
+
3
+ pandoc
.ci/convert_notebooks.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import shutil
3
+ import subprocess # nosec - disable B404:import-subprocess check
4
+ import time
5
+ from pathlib import Path
6
+ import nbformat
7
+
8
+
9
+ def disable_gradio_debug(notebook_path):
10
+ nb = nbformat.read(notebook_path, as_version=nbformat.NO_CONVERT)
11
+ found = False
12
+ for cell in nb["cells"]:
13
+ if "gradio" in cell["source"] and "debug" in cell["source"]:
14
+ found = True
15
+ cell["source"] = cell["source"].replace("debug=True", "debug=False")
16
+
17
+ if found:
18
+ print(f"Disabled gradio debug mode for {notebook_path}")
19
+ nbformat.write(nb, str(notebook_path), version=nbformat.NO_CONVERT)
20
+
21
+
22
+ def arguments():
23
+ parser = argparse.ArgumentParser()
24
+ parser.add_argument("--exclude_execution_file")
25
+ parser.add_argument("--exclude_conversion_file")
26
+ parser.add_argument("--timeout", type=float, default=7200, help="timeout for notebook execution")
27
+ parser.add_argument("--rst_dir", type=Path, help="rst files output directory", default=Path("rst"))
28
+
29
+ return parser.parse_args()
30
+
31
+
32
+ def prepare_ignore_list(input_file):
33
+ with Path(input_file).open("r") as f:
34
+ lines = f.readlines()
35
+ return list(map(str.strip, lines))
36
+
37
+
38
+ def main():
39
+ args = arguments()
40
+ ignore_conversion_list = []
41
+ ignore_execution_list = []
42
+ failed_notebooks = []
43
+ rst_failed = []
44
+ if args.exclude_conversion_file is not None:
45
+ ignore_conversion_list = prepare_ignore_list(args.exclude_conversion_file)
46
+ if args.exclude_execution_file is not None:
47
+ ignore_execution_list = prepare_ignore_list(args.exclude_execution_file)
48
+ root = Path(__file__).parents[1]
49
+ notebooks_dir = root / "notebooks"
50
+ notebooks = sorted(list(notebooks_dir.rglob("**/*.ipynb")))
51
+ for notebook in notebooks:
52
+ notebook_path = notebook.relative_to(root)
53
+ if str(notebook_path) in ignore_conversion_list:
54
+ continue
55
+ disable_gradio_debug(notebook_path)
56
+ notebook_executed = notebook_path.parent / notebook_path.name.replace(".ipynb", "-with-output.ipynb")
57
+ start = time.perf_counter()
58
+ print(f"Convert {notebook_path}")
59
+ if str(notebook_path) not in ignore_execution_list:
60
+ try:
61
+ retcode = subprocess.run(
62
+ [
63
+ "jupyter",
64
+ "nbconvert",
65
+ "--log-level=INFO",
66
+ "--execute",
67
+ "--to",
68
+ "notebook",
69
+ "--output",
70
+ str(notebook_executed),
71
+ "--output-dir",
72
+ str(root),
73
+ "--ExecutePreprocessor.kernel_name=python3",
74
+ str(notebook_path),
75
+ ],
76
+ timeout=args.timeout,
77
+ ).returncode
78
+ except subprocess.TimeoutExpired:
79
+ retcode = -42
80
+ print(f"TIMEOUT: {notebook_path}")
81
+ if retcode:
82
+ failed_notebooks.append(str(notebook_path))
83
+ continue
84
+ else:
85
+ shutil.copyfile(notebook_path, notebook_executed)
86
+ rst_retcode = subprocess.run(
87
+ [
88
+ "jupyter",
89
+ "nbconvert",
90
+ "--to",
91
+ "rst",
92
+ str(notebook_executed),
93
+ "--output-dir",
94
+ str(args.rst_dir),
95
+ "--TagRemovePreprocessor.remove_all_outputs_tags=hide_output --TagRemovePreprocessor.enabled=True",
96
+ ],
97
+ timeout=args.timeout,
98
+ ).returncode
99
+ notebook_rst = args.rst_dir / notebook_executed.name.replace(".ipynb", ".rst")
100
+ # remove all non-printable characters
101
+ subprocess.run(
102
+ [
103
+ "sed",
104
+ "-i",
105
+ "-e",
106
+ "s/\x1b\[[0-9;]*m//g",
107
+ "-e",
108
+ "s/\x1b\[?25h//g",
109
+ "-e",
110
+ "s/\x1b\[?25l//g",
111
+ str(notebook_rst),
112
+ ],
113
+ timeout=args.timeout,
114
+ )
115
+
116
+ end = time.perf_counter() - start
117
+ print(f"Notebook conversion took: {end:.4f} s")
118
+ if rst_retcode:
119
+ rst_failed.append(str(notebook_path))
120
+
121
+ if failed_notebooks:
122
+ print("EXECUTION FAILED:")
123
+ print("\n".join(failed_notebooks))
124
+
125
+ if rst_failed:
126
+ print("RST CONVERSION FAILED:")
127
+ print("\n".join(rst_failed))
128
+
129
+
130
+ if __name__ == "__main__":
131
+ main()
.ci/convert_notebooks.sh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Execute notebooks and convert them to Markdown and HTML
2
+ # Output from notebook cells with tag "hide_output" will be hidden in converted notebooks
3
+
4
+ ignore_list=$*
5
+ rstdir=$PWD"/rst_files"
6
+ binderlist=$rstdir"/notebooks_with_binder_buttons.txt"
7
+ colablist=$rstdir"/notebooks_with_colab_buttons.txt"
8
+ notebooklist=$rstdir"/all_notebooks_paths.txt"
9
+ tagslist=$rstdir"/notebooks_tags.json"
10
+ mkdir -p $rstdir
11
+
12
+ # List all notebooks that contain binder or colab buttons based on readme
13
+ for n in $(git ls-files '*.md'); do
14
+ cat $n | grep -oP "https://mybinder.org/v2.*?-.*?ipynb" | sed 's#%2F#/#g' | sed -e 's|[^/]*/||g' -e 's|.ipynb$||' | sort | uniq >> $binderlist
15
+ cat $n | grep -oP "https://colab.research.google.com/github.*?-.*?ipynb" | sed -e 's|[^/]*/||g' -e 's|.ipynb$||' | sort | uniq >> $colablist
16
+ done
17
+ find notebooks -maxdepth 2 -name "*.ipynb" | sort > $notebooklist
18
+ taggerpath=$(git ls-files "*tagger.py")
19
+ notebookspath=$(git ls-files "*.ipynb"| head -n 1)
20
+ keywordspath=$(git ls-files "*keywords.json")
21
+ python $taggerpath $notebookspath $keywordspath> $tagslist
22
+
23
+ echo "start converting notebooks"
24
+ python $PWD"/.ci/convert_notebooks.py" --rst_dir $rstdir --exclude_execution_file $PWD"/.ci/ignore_convert_execution.txt"
25
+
26
+ # Remove download links to local files. They only work after executing the notebook
27
+ # Replace relative links to other notebooks with relative links to documentation HTML pages
28
+ for f in "$rstdir"/*.rst; do
29
+ sed -i "s/<a href=[\'\"][^%].*download>\(.*\)<\/a>/\1/" "$f"
30
+ sed -r -i "s/(<)\.\.\/(.*)\/.*ipynb(>)/\1\2-with-output.html\3/g" "$f"
31
+ done
.ci/dev-requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Recommended packages for notebook developers
2
+
3
+ # black==21.8 requires typing-extensions>3.10 which is incompatible
4
+ # with other packages
5
+ -r ../requirements.txt
6
+ black[jupyter]==24.3.0 # format Python code
7
+ isort # sort imports
8
+ jupyterlab-code-formatter # format code in notebooks in Jupyter Lab
9
+ jupyterlab-git # checkout and commit code in Jupyter Lab
10
+ nbqa[toolchain] # automatic code quality checking
11
+ nbval # pytest plugin for automatic notebook testing
12
+ treon # test framework for Jupyter Notebooks
13
+ toml # used for validating docker requirements
14
+ mistune==2.0.4 # use for parsing README.md
15
+ requests==2.32.0 # use for checking links
16
+ pyspelling # automating spell checking
.ci/heavy_ubuntu_gpu.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ notebooks/decidiffusion-image-generation/decidiffusion-image-generation.ipynb
2
+ notebooks/distil-whisper-asr/distil-whisper-asr.ipynb
3
+ notebooks/film-slowmo/film-slowmo.ipynb
4
+ notebooks/grounded-segment-anything/grounded-segment-anything.ipynb
5
+ notebooks/instant-id/instant-id.ipynb
6
+ notebooks/knowledge-graphs-conve/knowledge-graphs-conve.ipynb
7
+ notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot.ipynb
8
+ notebooks/llava-next-multimodal-chatbot/llava-next-multimodal-chatbot.ipynb
9
+ notebooks/mms-massively-multilingual-speech/mms-massively-multilingual-speech.ipynb
10
+ notebooks/paint-by-example/paint-by-example.ipynb
11
+ notebooks/qrcode-monster/qrcode-monster.ipynb
12
+ notebooks/quantizing-model-with-accuracy-control/speech-recognition-quantization-wav2vec2.ipynb
13
+ notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb
14
+ notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb
15
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-wav2vec2.ipynb
16
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-data2vec.ipynb
17
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image.ipynb
18
+ notebooks/style-transfer-webcam/style-transfer.ipynb
19
+ notebooks/whisper-subtitles-generation/whisper-convert.ipynb
20
+ notebooks/whisper-subtitles-generation/whisper-nncf-quantize.ipynb
21
+ notebooks/wuerstchen-image-generation/wuerstchen-image-generation.ipynb
22
+ notebooks/zeroscope-text2video/zeroscope-text2video.ipynb
.ci/heavy_win_gpu.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ notebooks/film-slowmo/film-slowmo.ipynb
2
+ notebooks/instant-id/instant-id.ipynb
3
+ notebooks/instruct-pix2pix-image-editing/instruct-pix2pix-image-editing.ipynb
4
+ notebooks/knowledge-graphs-conve/knowledge-graphs-conve.ipynb
5
+ notebooks/mms-massively-multilingual-speech/mms-massively-multilingual-speech.ipynb
6
+ notebooks/qrcode-monster/qrcode-monster.ipynb
7
+ notebooks/sdxl-turbo/sdxl-turbo.ipynb
8
+ notebooks/wuerstchen-image-generation/wuerstchen-image-generation.ipynb
.ci/ignore_convert_execution.txt ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-data2vec.ipynb
2
+ notebooks/gpu-device/gpu-device.ipynb
3
+ notebooks/model-server/model-server.ipynb
4
+ notebooks/quantizing-model-with-accuracy-control/speech-recognition-quantization-wav2vec2.ipynb
5
+ notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb
6
+ notebooks/speech-to-text/speech-to-text.ipynb
7
+ notebooks/grammar-correction/grammar-correction.ipynb
8
+ notebooks/image-inpainting/image-inpainting.ipynb
9
+ notebooks/cross-lingual-books-alignment/cross-lingual-books-alignment.ipynb
10
+ notebooks/stable-diffusion-text-to-image/stable-diffusion-text-to-image.ipynb
11
+ notebooks/whisper-subtitles-generation/whisper-convert.ipynb
12
+ notebooks/whisper-subtitles-generation/whisper-nncf-quantize.ipynb
13
+ notebooks/clip-zero-shot-image-classification/clip-zero-shot-classification.ipynb
14
+ notebooks/yolov8-optimization/yolov8-instance-segmentation.ipynb
15
+ notebooks/yolov8-optimization/yolov8-keypoint-detection.ipynb
16
+ notebooks/yolov8-optimization/yolov8-object-detection.ipynb
17
+ notebooks/yolov8-optimization/yolov8-obb.ipynb
18
+ notebooks/instruct-pix2pix-image-editing/instruct-pix2pix-image-editing.ipynb
19
+ notebooks/clip-language-saliency-map/clip-language-saliency-map.ipynb
20
+ notebooks/blip-visual-language-processing/blip-visual-language-processing.ipynb
21
+ notebooks/controlnet-stable-diffusion/controlnet-stable-diffusion.ipynb
22
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-infinite-zoom.ipynb
23
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image.ipynb
24
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-optimum-demo-comparison.ipynb
25
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-optimum-demo.ipynb
26
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image-demo.ipynb
27
+ notebooks/segment-anything/segment-anything.ipynb
28
+ notebooks/image-bind/image-bind.ipynb
29
+ notebooks/dolly-2-instruction-following/dolly-2-instruction-following.ipynb
30
+ notebooks/riffusion-text-to-music/riffusion-text-to-music.ipynb
31
+ notebooks/named-entity-recognition/named-entity-recognition.ipynb
32
+ notebooks/stable-diffusion-xl/stable-diffusion-xl.ipynb
33
+ notebooks/stable-diffusion-xl/segmind-vegart.ipynb
34
+ notebooks/stable-diffusion-xl/ssd-b1.ipynb
35
+ notebooks/oneformer-segmentation/oneformer-segmentation.ipynb
36
+ notebooks/tiny-sd-image-generation/tiny-sd-image-generation.ipynb
37
+ notebooks/zeroscope-text2video/zeroscope-text2video.ipynb
38
+ notebooks/llm-chatbot/llm-chatbot.ipynb
39
+ notebooks/llm-rag-langchain/llm-rag-langchain.ipynb
40
+ notebooks/mms-massively-multilingual-speech/mms-massively-multilingual-speech.ipynb
41
+ notebooks/bark-text-to-audio/bark-text-to-audio.ipynb
42
+ notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot.ipynb
43
+ notebooks/llava-multimodal-chatbot/videollava-multimodal-chatbot.ipynb
44
+ notebooks/decidiffusion-image-generation/decidiffusion-image-generation.ipynb
45
+ notebooks/pix2struct-docvqa/pix2struct-docvqa.ipynb
46
+ notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb
47
+ notebooks/latent-consistency-models-image-generation/latent-consistency-models-image-generation.ipynb
48
+ notebooks/latent-consistency-models-image-generation/lcm-lora-controlnet.ipynb
49
+ notebooks/qrcode-monster/qrcode-monster.ipynb
50
+ notebooks/wuerstchen-image-generation/wuerstchen-image-generation.ipynb
51
+ notebooks/speculative-sampling/speculative-sampling.ipynb
52
+ notebooks/distil-whisper-asr/distil-whisper-asr.ipynb
53
+ notebooks/film-slowmo/film-slowmo.ipynb
54
+ notebooks/sound-generation-audioldm2/sound-generation-audioldm2.ipynb
55
+ notebooks/sdxl-turbo/sdxl-turbo.ipynb
56
+ notebooks/paint-by-example/paint-by-example.ipynb
57
+ notebooks/stable-zephyr-3b-chatbot/stable-zephyr-3b-chatbot.ipynb
58
+ notebooks/llm-question-answering/llm-question-answering.ipynb
59
+ notebooks/instant-id/instant-id.ipynb
60
+ notebooks/llava-next-multimodal-chatbot/llava-next-multimodal-chatbot.ipynb
61
+ notebooks/stable-video-diffusion/stable-video-diffusion.ipynb
62
+ notebooks/llm-agent-langchain/llm-agent-langchain.ipynb
63
+ notebooks/hello-npu/hello-npu.ipynb
64
+ notebooks/yolov10-optimization/yolov10-optimization.ipynb
.ci/ignore_convert_full.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ notebooks/ct-segmentation-quantize/data-preparation-ct-scan.ipynb
2
+ notebooks/ct-segmentation-quantize/pytorch-monai-training.ipynb
.ci/ignore_pip_conflicts.txt ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb # ultralytics==8.0.43
2
+ notebooks/detectron2-to-openvino/detectron2-to-openvino.ipynb # detectron2@https://github.com/facebookresearch/detectron2
3
+ notebooks/speech-to-text/speech-to-text.ipynb # numpy<1.24
4
+ notebooks/pyannote-speaker-diarization/pyannote-speaker-diarization.ipynb # pyannote-audio@https://github.com/eaidova/pyannote-audio
5
+ notebooks/yolov8-optimization/yolov8-instance-segmentation.ipynb # ultralytics==8.0.43
6
+ notebooks/yolov8-optimization/yolov8-keypoint-detection.ipynb # ultralytics==8.0.159
7
+ notebooks/yolov8-optimization/yolov8-object-detection.ipynb # ultralytics==8.0.43
8
+ notebooks/yolov8-optimization/yolov8-obb.ipynb # ultralytics==8.1.24
9
+ notebooks/llm-chatbot/llm-chatbot.ipynb # nncf@https://github.com/openvinotoolkit/nncf/tree/release_v280
10
+ notebooks/llm-rag-langchain/llm-rag-langchain.ipynb # nncf@https://github.com/openvinotoolkit/nncf/tree/release_v280
11
+ notebooks/bark-text-to-audio/bark-text-to-audio.ipynb # torch==1.13
12
+ notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot.ipynb # transformers<4.35
13
+ notebooks/llava-multimodal-chatbot/videollava-multimodal-chatbot.ipynb # transformers<4.35
14
+ notebooks/paint-by-example/paint-by-example.ipynb # gradio==3.44.1
15
+ notebooks/stable-zephyr-3b-chatbot/stable-zephyr-3b-chatbot.ipynb # install requirements.txt after clone repo
16
+ notebooks/mobilevlm-language-assistant/mobilevlm-language-assistant.ipynb # transformers<4.35
17
+ notebooks/depth-anything/depth-anything.ipynb # install requirements.txt after clone repo
18
+ notebooks/surya-line-level-text-detection/surya-line-level-text-detection.ipynb # requires python >=3.9
19
+ notebooks/mobileclip-video-search/mobileclip-video-search.ipynb # install requirements.txt inside
20
+ notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb # requires python >=3.9
21
+ notebooks/triposr-3d-reconstruction/triposr-3d-reconstruction.ipynb # requires torch that installing in the notebook
22
+ notebooks/animate-anyone/animate-anyone.ipynb # diffusers<=0.24
23
+ notebooks/whisper-subtitles-generation/whisper-convert.ipynb # pytube
24
+ notebooks/grounded-segment-anything/grounded-segment-anything.ipynb # deprecated installer
25
+ notebooks/vision-paddlegan-anime/vision-paddlegan-anime.ipynb # old scipy
26
+ notebooks/vision-paddlegan-superresolution/vision-paddlegan-superresolution.ipynb # old scipy and scikit-image
27
+ notebooks/stable-diffusion-torchdynamo-backend/stable-diffusion-torchdynamo-backend.ipynb
28
+ notebooks/sketch-to-image-pix2pix-turbo/sketch-to-image-pix2pix-turbo.ipynb
29
+ notebooks/yolov10-optimization/yolov10-optimization.ipynb
.ci/ignore_treon_docker.txt ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-wav2vec2.ipynb
2
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-data2vec.ipynb
3
+ notebooks/gpu-device/gpu-device.ipynb
4
+ notebooks/model-server/model-server.ipynb
5
+ notebooks/quantizing-model-with-accuracy-control/speech-recognition-quantization-wav2vec2.ipynb
6
+ notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb
7
+ notebooks/big-transfer-quantization/tensorflow-bit-image-classification-nncf-quantization.ipynb
8
+ notebooks/cross-lingual-books-alignment/cross-lingual-books-alignment.ipynb
9
+ notebooks/stable-diffusion-text-to-image/stable-diffusion-text-to-image.ipynb
10
+ notebooks/whisper-subtitles-generation/whisper-convert.ipynb
11
+ notebooks/whisper-subtitles-generation/whisper-nncf-quantize.ipynb
12
+ notebooks/clip-zero-shot-image-classification/clip-zero-shot-classification.ipynb
13
+ notebooks/instruct-pix2pix-image-editing/instruct-pix2pix-image-editing.ipynb
14
+ notebooks/blip-visual-language-processing/blip-visual-language-processing.ipynb
15
+ notebooks/encodec-audio-compression/encodec-audio-compression.ipynb
16
+ notebooks/controlnet-stable-diffusion/controlnet-stable-diffusion.ipynb
17
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-optimum-demo.ipynb
18
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-infinite-zoom.ipynb
19
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image-demo.ipynb
20
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image.ipynb
21
+ notebooks/segment-anything/segment-anything.ipynb
22
+ notebooks/image-bind/image-bind.ipynb
23
+ notebooks/dolly-2-instruction-following/dolly-2-instruction-following.ipynb
24
+ notebooks/riffusion-text-to-music/riffusion-text-to-music.ipynb
25
+ notebooks/freevc-voice-conversion/freevc-voice-conversion.ipynb
26
+ notebooks/stable-diffusion-xl/stable-diffusion-xl.ipynb
27
+ notebooks/stable-diffusion-xl/segmind-vegart.ipynb
28
+ notebooks/oneformer-segmentation/oneformer-segmentation.ipynb
29
+ notebooks/music-generation/music-generation.ipynb
30
+ notebooks/tiny-sd-image-generation/tiny-sd-image-generation.ipynb
31
+ notebooks/zeroscope-text2video/zeroscope-text2video.ipynb
32
+ notebooks/llm-chatbot/llm-chatbot.ipynb
33
+ notebooks/mms-massively-multilingual-speech/mms-massively-multilingual-speech.ipynb
34
+ notebooks/bark-text-to-audio/bark-text-to-audio.ipynb
35
+ notebooks/llava-multimodal-chatbot/videollava-multimodal-chatbot.ipynb
36
+ notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot.ipynb
37
+ notebooks/decidiffusion-image-generation/decidiffusion-image-generation.ipynb
38
+ notebooks/pix2struct-docvqa/pix2struct-docvqa.ipynb
39
+ notebooks/fast-segment-anything/fast-segment-anything.ipynb
40
+ notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb
41
+ notebooks/latent-consistency-models-image-generation/latent-consistency-models-image-generation.ipynb
42
+ notebooks/latent-consistency-models-image-generation/lcm-lora-controlnet.ipynb
43
+ notebooks/latent-consistency-models-image-generation/latent-consistency-models-optimum-demo.ipynb
44
+ notebooks/qrcode-monster/qrcode-monster.ipynb
45
+ notebooks/wuerstchen-image-generation/wuerstchen-image-generation.ipynb
46
+ notebooks/speculative-sampling/speculative-sampling.ipynb
47
+ notebooks/distil-whisper-asr/distil-whisper-asr.ipynb
48
+ notebooks/film-slowmo/film-slowmo.ipynb
49
+ notebooks/sound-generation-audioldm2/sound-generation-audioldm2.ipynb
50
+ notebooks/sdxl-turbo/sdxl-turbo.ipynb
51
+ notebooks/paint-by-example/paint-by-example.ipynb
52
+ notebooks/stable-zephyr-3b-chatbot/stable-zephyr-3b-chatbot.ipynb
53
+ notebooks/llm-question-answering/llm-question-answering.ipynb
54
+ notebooks/stable-diffusion-torchdynamo-backend/stable-diffusion-torchdynamo-backend.ipynb
55
+ notebooks/stable-diffusion-ip-adapter/stable-diffusion-ip-adapter.ipynb
56
+ notebooks/kosmos2-multimodal-large-language-model/kosmos2-multimodal-large-language-model.ipynb
57
+ notebooks/photo-maker/photo-maker.ipynb
58
+ notebooks/openvoice/openvoice.ipynb
59
+ notebooks/surya-line-level-text-detection/surya-line-level-text-detection.ipynb
60
+ notebooks/instant-id/instant-id.ipynb
61
+ notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb
62
+ notebooks/tensorflow-quantization-aware-training/tensorflow-quantization-aware-training.ipynb
63
+ notebooks/animate-anyone/animate-anyone.ipynb
64
+ notebooks/llava-next-multimodal-chatbot/llava-next-multimodal-chatbot.ipynb
65
+ notebooks/llm-rag-langchain/llm-rag-langchain.ipynb
66
+ notebooks/stable-video-diffusion/stable-video-diffusion.ipynb
67
+ notebooks/llm-agent-langchain/llm-agent-langchain.ipynb
68
+ notebooks/hello-npu/hello-npu.ipynb
69
+ notebooks/stable-cascade-image-generation/stable-cascade-image-generation.ipynb
70
+ notebooks/dynamicrafter-animating-images/dynamicrafter-animating-images.ipynb
71
+ notebooks/yolov10-optimization/yolov10-optimization.ipynb
.ci/ignore_treon_linux.txt ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-wav2vec2.ipynb
2
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-data2vec.ipynb
3
+ notebooks/gpu-device/gpu-device.ipynb
4
+ notebooks/pytorch-post-training-quantization-nncf/pytorch-post-training-quantization-nncf.ipynb
5
+ notebooks/model-server/model-server.ipynb
6
+ notebooks/quantizing-model-with-accuracy-control/speech-recognition-quantization-wav2vec2.ipynb
7
+ notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb
8
+ notebooks/big-transfer-quantization/tensorflow-bit-image-classification-nncf-quantization.ipynb
9
+ notebooks/grammar-correction/grammar-correction.ipynb
10
+ notebooks/cross-lingual-books-alignment/cross-lingual-books-alignment.ipynb
11
+ notebooks/stable-diffusion-text-to-image/stable-diffusion-text-to-image.ipynb
12
+ notebooks/yolov7-optimization/yolov7-optimization.ipynb
13
+ notebooks/whisper-subtitles-generation/whisper-convert.ipynb
14
+ notebooks/whisper-subtitles-generation/whisper-nncf-quantize.ipynb
15
+ notebooks/clip-zero-shot-image-classification/clip-zero-shot-classification.ipynb
16
+ notebooks/instruct-pix2pix-image-editing/instruct-pix2pix-image-editing.ipynb
17
+ notebooks/blip-visual-language-processing/blip-visual-language-processing.ipynb
18
+ notebooks/encodec-audio-compression/encodec-audio-compression.ipynb
19
+ notebooks/controlnet-stable-diffusion/controlnet-stable-diffusion.ipynb
20
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-optimum-demo.ipynb
21
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-infinite-zoom.ipynb
22
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image-demo.ipynb
23
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image.ipynb
24
+ notebooks/segment-anything/segment-anything.ipynb
25
+ notebooks/image-bind/image-bind.ipynb
26
+ notebooks/dolly-2-instruction-following/dolly-2-instruction-following.ipynb
27
+ notebooks/riffusion-text-to-music/riffusion-text-to-music.ipynb
28
+ notebooks/freevc-voice-conversion/freevc-voice-conversion.ipynb
29
+ notebooks/stable-diffusion-xl/stable-diffusion-xl.ipynb
30
+ notebooks/stable-diffusion-xl/segmind-vegart.ipynb
31
+ notebooks/oneformer-segmentation/oneformer-segmentation.ipynb
32
+ notebooks/music-generation/music-generation.ipynb
33
+ notebooks/tiny-sd-image-generation/tiny-sd-image-generation.ipynb
34
+ notebooks/zeroscope-text2video/zeroscope-text2video.ipynb
35
+ notebooks/llm-chatbot/llm-chatbot.ipynb
36
+ notebooks/mms-massively-multilingual-speech/mms-massively-multilingual-speech.ipynb
37
+ notebooks/bark-text-to-audio/bark-text-to-audio.ipynb
38
+ notebooks/llava-multimodal-chatbot/videollava-multimodal-chatbot.ipynb
39
+ notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot.ipynb
40
+ notebooks/decidiffusion-image-generation/decidiffusion-image-generation.ipynb
41
+ notebooks/pix2struct-docvqa/pix2struct-docvqa.ipynb
42
+ notebooks/fast-segment-anything/fast-segment-anything.ipynb
43
+ notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb
44
+ notebooks/latent-consistency-models-image-generation/latent-consistency-models-image-generation.ipynb
45
+ notebooks/latent-consistency-models-image-generation/lcm-lora-controlnet.ipynb
46
+ notebooks/latent-consistency-models-image-generation/latent-consistency-models-optimum-demo.ipynb
47
+ notebooks/qrcode-monster/qrcode-monster.ipynb
48
+ notebooks/wuerstchen-image-generation/wuerstchen-image-generation.ipynb
49
+ notebooks/speculative-sampling/speculative-sampling.ipynb
50
+ notebooks/distil-whisper-asr/distil-whisper-asr.ipynb
51
+ notebooks/film-slowmo/film-slowmo.ipynb
52
+ notebooks/sound-generation-audioldm2/sound-generation-audioldm2.ipynb
53
+ notebooks/sdxl-turbo/sdxl-turbo.ipynb
54
+ notebooks/paint-by-example/paint-by-example.ipynb
55
+ notebooks/stable-zephyr-3b-chatbot/stable-zephyr-3b-chatbot.ipynb
56
+ notebooks/llm-question-answering/llm-question-answering.ipynb
57
+ notebooks/stable-diffusion-torchdynamo-backend/stable-diffusion-torchdynamo-backend.ipynb
58
+ notebooks/stable-diffusion-ip-adapter/stable-diffusion-ip-adapter.ipynb
59
+ notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb
60
+ notebooks/kosmos2-multimodal-large-language-model/kosmos2-multimodal-large-language-model.ipynb
61
+ notebooks/photo-maker/photo-maker.ipynb
62
+ notebooks/openvoice/openvoice.ipynb
63
+ notebooks/instant-id/instant-id.ipynb
64
+ notebooks/animate-anyone/animate-anyone.ipynb
65
+ notebooks/llava-next-multimodal-chatbot/llava-next-multimodal-chatbot.ipynb
66
+ notebooks/llm-rag-langchain/llm-rag-langchain.ipynb
67
+ notebooks/stable-video-diffusion/stable-video-diffusion.ipynb
68
+ notebooks/llm-agent-langchain/llm-agent-langchain.ipynb
69
+ notebooks/hello-npu/hello-npu.ipynb
70
+ notebooks/stable-cascade-image-generation/stable-cascade-image-generation.ipynb
71
+ notebooks/dynamicrafter-animating-images/dynamicrafter-animating-images.ipynb
72
+ notebooks/yolov10-optimization/yolov10-optimization.ipynb
.ci/ignore_treon_mac.txt ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-wav2vec2.ipynb
2
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-data2vec.ipynb
3
+ notebooks/gpu-device/gpu-device.ipynb
4
+ notebooks/model-server/model-server.ipynb
5
+ notebooks/quantizing-model-with-accuracy-control/speech-recognition-quantization-wav2vec2.ipynb
6
+ notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb
7
+ notebooks/big-transfer-quantization/tensorflow-bit-image-classification-nncf-quantization.ipynb
8
+ notebooks/openvino-tokenizers/openvino-tokenizers.ipynb
9
+ notebooks/cross-lingual-books-alignment/cross-lingual-books-alignment.ipynb
10
+ notebooks/stable-diffusion-text-to-image/stable-diffusion-text-to-image.ipynb
11
+ notebooks/whisper-subtitles-generation/whisper-convert.ipynb
12
+ notebooks/whisper-subtitles-generation/whisper-nncf-quantize.ipynb
13
+ notebooks/clip-zero-shot-image-classification/clip-zero-shot-classification.ipynb
14
+ notebooks/instruct-pix2pix-image-editing/instruct-pix2pix-image-editing.ipynb
15
+ notebooks/blip-visual-language-processing/blip-visual-language-processing.ipynb
16
+ notebooks/encodec-audio-compression/encodec-audio-compression.ipynb
17
+ notebooks/controlnet-stable-diffusion/controlnet-stable-diffusion.ipynb
18
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-optimum-demo.ipynb
19
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-infinite-zoom.ipynb
20
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image-demo.ipynb
21
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image.ipynb
22
+ notebooks/segment-anything/segment-anything.ipynb
23
+ notebooks/image-bind/image-bind.ipynb
24
+ notebooks/dolly-2-instruction-following/dolly-2-instruction-following.ipynb
25
+ notebooks/riffusion-text-to-music/riffusion-text-to-music.ipynb
26
+ notebooks/freevc-voice-conversion/freevc-voice-conversion.ipynb
27
+ notebooks/stable-diffusion-xl/stable-diffusion-xl.ipynb
28
+ notebooks/stable-diffusion-xl/segmind-vegart.ipynb
29
+ notebooks/oneformer-segmentation/oneformer-segmentation.ipynb
30
+ notebooks/music-generation/music-generation.ipynb
31
+ notebooks/tiny-sd-image-generation/tiny-sd-image-generation.ipynb
32
+ notebooks/zeroscope-text2video/zeroscope-text2video.ipynb
33
+ notebooks/llm-chatbot/llm-chatbot.ipynb
34
+ notebooks/mms-massively-multilingual-speech/mms-massively-multilingual-speech.ipynb
35
+ notebooks/bark-text-to-audio/bark-text-to-audio.ipynb
36
+ notebooks/llava-multimodal-chatbot/videollava-multimodal-chatbot.ipynb
37
+ notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot.ipynb
38
+ notebooks/decidiffusion-image-generation/decidiffusion-image-generation.ipynb
39
+ notebooks/pix2struct-docvqa/pix2struct-docvqa.ipynb
40
+ notebooks/fast-segment-anything/fast-segment-anything.ipynb
41
+ notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb
42
+ notebooks/latent-consistency-models-image-generation/latent-consistency-models-image-generation.ipynb
43
+ notebooks/latent-consistency-models-image-generation/lcm-lora-controlnet.ipynb
44
+ notebooks/latent-consistency-models-image-generation/latent-consistency-models-optimum-demo.ipynb
45
+ notebooks/qrcode-monster/qrcode-monster.ipynb
46
+ notebooks/wuerstchen-image-generation/wuerstchen-image-generation.ipynb
47
+ notebooks/speculative-sampling/speculative-sampling.ipynb
48
+ notebooks/distil-whisper-asr/distil-whisper-asr.ipynb
49
+ notebooks/film-slowmo/film-slowmo.ipynb
50
+ notebooks/sound-generation-audioldm2/sound-generation-audioldm2.ipynb
51
+ notebooks/sdxl-turbo/sdxl-turbo.ipynb
52
+ notebooks/paint-by-example/paint-by-example.ipynb
53
+ notebooks/stable-zephyr-3b-chatbot/stable-zephyr-3b-chatbot.ipynb
54
+ notebooks/llm-question-answering/llm-question-answering.ipynb
55
+ notebooks/stable-diffusion-torchdynamo-backend/stable-diffusion-torchdynamo-backend.ipynb
56
+ notebooks/stable-diffusion-ip-adapter/stable-diffusion-ip-adapter.ipynb
57
+ notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb
58
+ notebooks/mobilevlm-language-assistant/mobilevlm-language-assistant.ipynb
59
+ notebooks/kosmos2-multimodal-large-language-model/kosmos2-multimodal-large-language-model.ipynb
60
+ notebooks/photo-maker/photo-maker.ipynb
61
+ notebooks/openvoice/openvoice.ipynb
62
+ notebooks/instant-id/instant-id.ipynb
63
+ notebooks/grounded-segment-anything/grounded-segment-anything.ipynb
64
+ notebooks/triposr-3d-reconstruction/triposr-3d-reconstruction.ipynb
65
+ notebooks/animate-anyone/animate-anyone.ipynb
66
+ notebooks/llava-next-multimodal-chatbot/llava-next-multimodal-chatbot.ipynb
67
+ notebooks/llm-rag-langchain/llm-rag-langchain.ipynb
68
+ notebooks/stable-video-diffusion/stable-video-diffusion.ipynb
69
+ notebooks/llm-agent-langchain/llm-agent-langchain.ipynb
70
+ notebooks/hello-npu/hello-npu.ipynb
71
+ notebooks/stable-cascade-image-generation/stable-cascade-image-generation.ipynb
72
+ notebooks/dynamicrafter-animating-images/dynamicrafter-animating-images.ipynb
73
+ notebooks/yolov10-optimization/yolov10-optimization.ipynb
74
+ notebooks/nano-llava-multimodal-chatbot/nano-llava-multimodal-chatbot.ipynb
.ci/ignore_treon_py38.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ notebooks/surya-line-level-text-detection/surya-line-level-text-detection.ipynb
2
+ notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb
.ci/ignore_treon_win.txt ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-wav2vec2.ipynb
2
+ notebooks/speech-recognition-quantization/speech-recognition-quantization-data2vec.ipynb
3
+ notebooks/gpu-device/gpu-device.ipynb
4
+ notebooks/model-server/model-server.ipynb
5
+ notebooks/quantizing-model-with-accuracy-control/speech-recognition-quantization-wav2vec2.ipynb
6
+ notebooks/quantizing-model-with-accuracy-control/yolov8-quantization-with-accuracy-control.ipynb
7
+ notebooks/big-transfer-quantization/tensorflow-bit-image-classification-nncf-quantization.ipynb
8
+ notebooks/cross-lingual-books-alignment/cross-lingual-books-alignment.ipynb
9
+ notebooks/text-prediction/text-prediction.ipynb
10
+ notebooks/stable-diffusion-text-to-image/stable-diffusion-text-to-image.ipynb
11
+ notebooks/whisper-subtitles-generation/whisper-convert.ipynb
12
+ notebooks/whisper-subtitles-generation/whisper-nncf-quantize.ipynb
13
+ notebooks/clip-zero-shot-image-classification/clip-zero-shot-classification.ipynb
14
+ notebooks/instruct-pix2pix-image-editing/instruct-pix2pix-image-editing.ipynb
15
+ notebooks/blip-visual-language-processing/blip-visual-language-processing.ipynb
16
+ notebooks/encodec-audio-compression/encodec-audio-compression.ipynb
17
+ notebooks/controlnet-stable-diffusion/controlnet-stable-diffusion.ipynb
18
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-optimum-demo.ipynb
19
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-infinite-zoom.ipynb
20
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image-demo.ipynb
21
+ notebooks/stable-diffusion-v2/stable-diffusion-v2-text-to-image.ipynb
22
+ notebooks/segment-anything/segment-anything.ipynb
23
+ notebooks/image-bind/image-bind.ipynb
24
+ notebooks/dolly-2-instruction-following/dolly-2-instruction-following.ipynb
25
+ notebooks/riffusion-text-to-music/riffusion-text-to-music.ipynb
26
+ notebooks/freevc-voice-conversion/freevc-voice-conversion.ipynb
27
+ notebooks/stable-diffusion-xl/stable-diffusion-xl.ipynb
28
+ notebooks/stable-diffusion-xl/segmind-vegart.ipynb
29
+ notebooks/oneformer-segmentation/oneformer-segmentation.ipynb
30
+ notebooks/music-generation/music-generation.ipynb
31
+ notebooks/tiny-sd-image-generation/tiny-sd-image-generation.ipynb
32
+ notebooks/zeroscope-text2video/zeroscope-text2video.ipynb
33
+ notebooks/llm-chatbot/llm-chatbot.ipynb
34
+ notebooks/mms-massively-multilingual-speech/mms-massively-multilingual-speech.ipynb
35
+ notebooks/bark-text-to-audio/bark-text-to-audio.ipynb
36
+ notebooks/llava-multimodal-chatbot/videollava-multimodal-chatbot.ipynb
37
+ notebooks/llava-multimodal-chatbot/llava-multimodal-chatbot.ipynb
38
+ notebooks/decidiffusion-image-generation/decidiffusion-image-generation.ipynb
39
+ notebooks/pix2struct-docvqa/pix2struct-docvqa.ipynb
40
+ notebooks/fast-segment-anything/fast-segment-anything.ipynb
41
+ notebooks/softvc-voice-conversion/softvc-voice-conversion.ipynb
42
+ notebooks/latent-consistency-models-image-generation/latent-consistency-models-image-generation.ipynb
43
+ notebooks/latent-consistency-models-image-generation/lcm-lora-controlnet.ipynb
44
+ notebooks/latent-consistency-models-image-generation/latent-consistency-models-optimum-demo.ipynb
45
+ notebooks/qrcode-monster/qrcode-monster.ipynb
46
+ notebooks/wuerstchen-image-generation/wuerstchen-image-generation.ipynb
47
+ notebooks/speculative-sampling/speculative-sampling.ipynb
48
+ notebooks/distil-whisper-asr/distil-whisper-asr.ipynb
49
+ notebooks/film-slowmo/film-slowmo.ipynb
50
+ notebooks/sound-generation-audioldm2/sound-generation-audioldm2.ipynb
51
+ notebooks/sdxl-turbo/sdxl-turbo.ipynb
52
+ notebooks/stable-diffusion-keras-cv/stable-diffusion-keras-cv.ipynb
53
+ notebooks/paint-by-example/paint-by-example.ipynb
54
+ notebooks/stable-zephyr-3b-chatbot/stable-zephyr-3b-chatbot.ipynb
55
+ notebooks/llm-question-answering/llm-question-answering.ipynb
56
+ notebooks/stable-diffusion-torchdynamo-backend/stable-diffusion-torchdynamo-backend.ipynb
57
+ notebooks/stable-diffusion-ip-adapter/stable-diffusion-ip-adapter.ipynb
58
+ notebooks/kosmos2-multimodal-large-language-model/kosmos2-multimodal-large-language-model.ipynb
59
+ notebooks/photo-maker/photo-maker.ipynb
60
+ notebooks/openvoice/openvoice.ipynb
61
+ notebooks/instant-id/instant-id.ipynb
62
+ notebooks/animate-anyone/animate-anyone.ipynb
63
+ notebooks/llava-next-multimodal-chatbot/llava-next-multimodal-chatbot.ipynb
64
+ notebooks/llm-rag-langchain/llm-rag-langchain.ipynb
65
+ notebooks/stable-video-diffusion/stable-video-diffusion.ipynb
66
+ notebooks/llm-agent-langchain/llm-agent-langchain.ipynb
67
+ notebooks/hello-npu/hello-npu.ipynb
68
+ notebooks/stable-cascade-image-generation/stable-cascade-image-generation.ipynb
69
+ notebooks/dynamicrafter-animating-images/dynamicrafter-animating-images.ipynb
70
+ notebooks/yolov10-optimization/yolov10-optimization.ipynb
.ci/keywords.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tags": {
3
+ "GPU": ["device_name = \"GPU\"", "device_name=\"GPU\""],
4
+ "Auto device": ["device_name=\"AUTO\"", "device_name = \"AUTO\""],
5
+
6
+ "Dynamic Shape": [".partial_shape", "Dimension("],
7
+ "Reshape Model": ["model.reshape("],
8
+ "Async Inference": [".start_async("],
9
+
10
+ "Download Model": ["omz_downloader"],
11
+ "Convert Model": ["omz_converter"],
12
+ "Optimize Model": ["import openvino.tools.mo", "from openvino.tools.mo", "!mo "],
13
+ "Benchmark Model": ["benchmark_app"],
14
+ "OMZ Info Dumper": ["omz_info_dumper"],
15
+
16
+ "Paddle": ["import paddle", "from paddle"],
17
+ "Torchvision": ["import torchvision", "from torchvision"],
18
+ "Compression": ["import compression", "from compression"],
19
+ "Pytorch": ["import torch", "from torch"],
20
+ "NNCF": ["import nncf", "from nncf"],
21
+ "Transformers": ["import transformers", "from transformers"],
22
+ "Tensorflow": ["import tensorflow", "from tensorflow"],
23
+
24
+ "ONNX": [".onnx"],
25
+ "Train Model": ["model.fit(", "model.train()"]
26
+ }
27
+ }
.ci/patch_notebooks.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import re
3
+ from pathlib import Path
4
+ import nbformat
5
+ import nbconvert
6
+ from traitlets.config import Config
7
+
8
+
9
+ # Notebooks that are excluded from the CI tests
10
+ EXCLUDED_NOTEBOOKS = ["data-preparation-ct-scan.ipynb", "pytorch-monai-training.ipynb"]
11
+
12
+ DEVICE_WIDGET = "device = widgets.Dropdown("
13
+
14
+
15
+ def disable_gradio_debug(nb, notebook_path):
16
+ found = False
17
+ for cell in nb["cells"]:
18
+ if "gradio" in cell["source"] and "debug" in cell["source"]:
19
+ found = True
20
+ cell["source"] = cell["source"].replace("debug=True", "debug=False")
21
+
22
+ if found:
23
+ print(f"Disabled gradio debug mode for {notebook_path}")
24
+ return nb
25
+
26
+
27
+ def disable_skip_ext(nb, notebook_path, test_device=""):
28
+ found = False
29
+
30
+ skip_for_device = None if test_device else False
31
+ for cell in nb["cells"]:
32
+ if test_device is not None and skip_for_device is None:
33
+ if (
34
+ 'skip_for_device = "{}" in device.value'.format(test_device) in cell["source"]
35
+ and "to_quantize = widgets.Checkbox(value=not skip_for_device" in cell["source"]
36
+ ):
37
+ skip_for_device = True
38
+
39
+ if "%%skip" in cell["source"]:
40
+ found = True
41
+ if not skip_for_device:
42
+ cell["source"] = re.sub(r"%%skip.*.\n", "\n", cell["source"])
43
+ else:
44
+ cell["source"] = '"""\n' + cell["source"] + '\n"""'
45
+ if found:
46
+ print(f"Disabled skip extension mode for {notebook_path}")
47
+ return nb
48
+
49
+
50
+ def remove_ov_install(cell):
51
+ updated_lines = []
52
+
53
+ def has_additional_deps(str_part):
54
+ if "%pip" in str_part:
55
+ return False
56
+ if "install" in str_part:
57
+ return False
58
+ if str_part.startswith("-"):
59
+ return False
60
+ if str_part.startswith("https://"):
61
+ return False
62
+ return True
63
+
64
+ lines = cell["source"].split("\n")
65
+ for line in lines:
66
+ if "openvino" in line:
67
+ updated_line_content = []
68
+ empty = True
69
+ package_found = False
70
+ for part in line.split(" "):
71
+ if "openvino-dev" in part:
72
+ package_found = True
73
+ continue
74
+ if "openvino-nightly" in part:
75
+ package_found = True
76
+ continue
77
+ if "openvino-tokenizers" in part:
78
+ package_found = True
79
+ continue
80
+ if "openvino>" in part or "openvino=" in part or "openvino" == part:
81
+ package_found = True
82
+ continue
83
+ if empty:
84
+ empty = not has_additional_deps(part)
85
+ updated_line_content.append(part)
86
+
87
+ if package_found:
88
+ if not empty:
89
+ updated_line = " ".join(updated_line_content)
90
+ if line.startswith(" "):
91
+ for token in line:
92
+ if token != " ":
93
+ break
94
+ # keep indention
95
+ updated_line = " " + updated_line
96
+ updated_lines.append(updated_line + "\n# " + line)
97
+ else:
98
+ updated_lines.append(line)
99
+ else:
100
+ updated_lines.append(line)
101
+ cell["source"] = "\n".join(updated_lines)
102
+
103
+
104
+ def patch_notebooks(notebooks_dir, test_device="", skip_ov_install=False):
105
+ """
106
+ Patch notebooks in notebooks directory with replacement values
107
+ found in notebook metadata to speed up test execution.
108
+ This function is specific for the OpenVINO notebooks
109
+ Github Actions CI.
110
+
111
+ For example: change nr of epochs from 15 to 1 in
112
+ tensorflow-training-openvino-nncf.ipynb by adding
113
+ {"test_replace": {"epochs = 15": "epochs = 1"} to the cell
114
+ metadata of the cell that contains `epochs = 15`
115
+
116
+ :param notebooks_dir: Directory that contains the notebook subdirectories.
117
+ For example: openvino_notebooks/notebooks
118
+ """
119
+
120
+ nb_convert_config = Config()
121
+ nb_convert_config.NotebookExporter.preprocessors = ["nbconvert.preprocessors.ClearOutputPreprocessor"]
122
+ output_remover = nbconvert.NotebookExporter(nb_convert_config)
123
+ for notebookfile in Path(notebooks_dir).glob("**/*.ipynb"):
124
+ if not str(notebookfile.name).startswith("test_") and notebookfile.name not in EXCLUDED_NOTEBOOKS:
125
+ nb = nbformat.read(notebookfile, as_version=nbformat.NO_CONVERT)
126
+ found = False
127
+ device_found = False
128
+ for cell in nb["cells"]:
129
+ if skip_ov_install and "%pip" in cell["source"]:
130
+ remove_ov_install(cell)
131
+ if test_device and DEVICE_WIDGET in cell["source"]:
132
+ device_found = True
133
+ cell["source"] = re.sub(r"value=.*,", f"value='{test_device.upper()}',", cell["source"])
134
+ cell["source"] = re.sub(
135
+ r"options=",
136
+ f"options=['{test_device.upper()}'] + ",
137
+ cell["source"],
138
+ )
139
+ print(f"Replaced testing device to {test_device}")
140
+ replace_dict = cell.get("metadata", {}).get("test_replace")
141
+ if replace_dict is not None:
142
+ found = True
143
+ for source_value, target_value in replace_dict.items():
144
+ if source_value not in cell["source"]:
145
+ raise ValueError(f"Processing {notebookfile} failed: {source_value} does not exist in cell")
146
+ cell["source"] = cell["source"].replace(source_value, target_value)
147
+ cell["source"] = "# Modified for testing\n" + cell["source"]
148
+ print(f"Processed {notebookfile}: {source_value} -> {target_value}")
149
+ if test_device and not device_found:
150
+ print(f"No device replacement found for {notebookfile}")
151
+ if not found:
152
+ print(f"No replacements found for {notebookfile}")
153
+ disable_gradio_debug(nb, notebookfile)
154
+ disable_skip_ext(nb, notebookfile, args.test_device)
155
+ nb_without_out, _ = output_remover.from_notebook_node(nb)
156
+ with notebookfile.with_name(f"test_{notebookfile.name}").open("w", encoding="utf-8") as out_file:
157
+ out_file.write(nb_without_out)
158
+
159
+
160
+ if __name__ == "__main__":
161
+ parser = argparse.ArgumentParser("Notebook patcher")
162
+ parser.add_argument("notebooks_dir", default=".")
163
+ parser.add_argument("-td", "--test_device", default="")
164
+ parser.add_argument("--skip_ov_install", action="store_true")
165
+ args = parser.parse_args()
166
+ if not Path(args.notebooks_dir).is_dir():
167
+ raise ValueError(f"'{args.notebooks_dir}' is not an existing directory")
168
+ patch_notebooks(args.notebooks_dir, args.test_device, args.skip_ov_install)
.ci/pip_conflicts_check.sh ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ PARSER_REGEX="get_ipython\(\)\.run_line_magic\('pip', 'install \K([^#'\)]*)(?='\))"
5
+ INSTALL_OPTIONS_REGEX="(^-\S+)"
6
+ TMP_DIR="./tmp"
7
+ REQ_FILE="$TMP_DIR/requirements.txt"
8
+ EXCLUDE_INSTALL_OPTIONS="(--upgrade-strategy \S+)"
9
+
10
+
11
+ while [[ $# -gt 0 ]]; do
12
+ case "$1" in
13
+ --ignore)
14
+ ignore_file="$2"
15
+ shift
16
+ ;;
17
+ *)
18
+ echo "Unknown option: $1" >&2
19
+ exit 1
20
+ ;;
21
+ esac
22
+ shift
23
+ done
24
+
25
+ mkdir -p $TMP_DIR
26
+ trap "rm -rf $TMP_DIR" EXIT
27
+
28
+ # Iterate over all `.ipynb` files in the current folder and subfolders
29
+ find "$(pwd)" -type f -name "*.ipynb" -exec realpath --relative-to="$(pwd)" {} + | while read -r file; do
30
+ if [[ -v ignore_file ]]; then
31
+ grep -qF "$file" "$ignore_file" && continue
32
+ fi
33
+ nb_filename="$file" # Get file basename
34
+ extended_nb_filename="${nb_filename//\//_}"
35
+ req_file_name="$TMP_DIR/${extended_nb_filename%.ipynb}_requirements.txt"
36
+
37
+ # Convert to Python script first to flatten multi-line commands
38
+ output=$(jupyter nbconvert --no-prompt --to script --stdout "$file")
39
+ matched=$(grep -Po "$PARSER_REGEX" <<< "$output" || true)
40
+ if [ -z "$matched" ]; then
41
+ echo "ERROR: No '%pip install' command found in $file"
42
+ exit 1
43
+ fi
44
+
45
+ while IFS= read -r line; do
46
+ index_url=$(grep -Po "(--index-url \S+)" <<< "$line" || true)
47
+ extra_index_url=$(grep -Po "(--extra-index-url \S+)" <<< "$line" || true)
48
+ if [ "$index_url" ]; then
49
+ line=$(sed -r "s,${index_url},,g" <<< "$line") # remove option from line
50
+ echo "--extra-${index_url:2}" >> "$req_file_name" # add index url as extra to avoid overriding index-url
51
+ fi
52
+ if [ "$extra_index_url" ]; then
53
+ line=$(sed -r "s,${extra_index_url},,g" <<< "$line") # remove option from line
54
+ echo $extra_index_url >> "$req_file_name" # add extra index url to requirements file
55
+ fi
56
+ line=$(sed -r -E "s/$EXCLUDE_INSTALL_OPTIONS//g" <<< "$line") # exclude ignored options
57
+ packages=$(sed -r "s/ /\n/g" <<< "$line") # insert line separator between packages for iteration
58
+ for p in $packages
59
+ do
60
+ option=$(grep -Po $INSTALL_OPTIONS_REGEX <<< $p || true)
61
+ if [ ! $option ]; then
62
+ pure_p=$(sed -r "s/\"//g" <<< $p) # remove quotes
63
+ echo $pure_p >> "$req_file_name" # write package to file
64
+ fi
65
+ done
66
+ done <<< "$matched"
67
+ echo "-r ${req_file_name##*/}" >> "$REQ_FILE" # add partial requirements to the main file
68
+ done
69
+ echo "Checking requirements..."
70
+ python -m pip install -r $REQ_FILE --dry-run --ignore-installed
.ci/spellcheck/.pyspelling.wordlist.txt ADDED
@@ -0,0 +1,877 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 3D
2
+ abstractive
3
+ accelerometers
4
+ accelerometer
5
+ acknowledgement
6
+ activations
7
+ adaptively
8
+ adas
9
+ ADE
10
+ adversarially
11
+ AE
12
+ aeroplane
13
+ affective
14
+ ai
15
+ al
16
+ Alibaba
17
+ ALiBi
18
+ AlpacaEval
19
+ aMUSEd
20
+ analytics
21
+ AnimateAnyone
22
+ AnimeGAN
23
+ AnimateLCM
24
+ Antelopev
25
+ api
26
+ APIs
27
+ Arcface
28
+ argmax
29
+ artstation
30
+ ASPP
31
+ ASR
32
+ asr
33
+ async
34
+ AsyncInferQueue
35
+ Atrous
36
+ audiobooks
37
+ audioldm
38
+ AudioLDM
39
+ autoencoder
40
+ autogenerated
41
+ autoregressive
42
+ autoregressively
43
+ AutoTokenizer
44
+ backend
45
+ backends
46
+ Baevski
47
+ Baichuan
48
+ baichuan
49
+ BaseSpeakerTTS
50
+ BasicUNet
51
+ bboxes
52
+ BEiT
53
+ Belrose
54
+ Benchmarking
55
+ benchmarking
56
+ bert
57
+ BERT's
58
+ BetterTransformer
59
+ Bewley
60
+ bfloat
61
+ BGE
62
+ bge
63
+ BGR
64
+ BMP
65
+ Bicubic
66
+ bicubic
67
+ bilinear
68
+ biometrics
69
+ BiT
70
+ blockwise
71
+ boolean
72
+ CentOS
73
+ CFG
74
+ charlist
75
+ charlists
76
+ chatbot
77
+ chatbots
78
+ chatglm
79
+ ChatGLM
80
+ ChatGPT
81
+ chinese
82
+ CIN
83
+ ckpt
84
+ CHW
85
+ Cifar
86
+ cityscape
87
+ Cityscapes
88
+ CLI
89
+ cli
90
+ CLIP's
91
+ codebook
92
+ codebooks
93
+ codec
94
+ codecs
95
+ Codecs
96
+ CoLA
97
+ Colab
98
+ colormap
99
+ CompVis
100
+ conditionings
101
+ config
102
+ configs
103
+ Connectionist
104
+ ContentVec
105
+ Contrastive
106
+ contrastive
107
+ controllability
108
+ ControlNet
109
+ ControlNets
110
+ controlnet
111
+ ConvE
112
+ conve
113
+ ConvNet
114
+ ConvNets
115
+ ConvNext
116
+ ConvNeXt
117
+ ConvNeXts
118
+ Convolutional
119
+ convolutional
120
+ coreference
121
+ CoSENT
122
+ cpm
123
+ CPUs
124
+ cpu
125
+ CRNN
126
+ CSV
127
+ CTC
128
+ CUDA
129
+ CVF
130
+ CVPR
131
+ Databricks
132
+ databricks
133
+ dataloader
134
+ dataloaders
135
+ DataLoader
136
+ DataLoaders
137
+ DataModule
138
+ dataset
139
+ datasets
140
+ DDIM
141
+ DDPMs
142
+ dDPO
143
+ de
144
+ Deblur
145
+ deblur
146
+ DeblurGAN
147
+ DeblurGANv
148
+ deblurred
149
+ Deblurring
150
+ deblurring
151
+ deconvolution
152
+ decidiffusion
153
+ Deci
154
+ DeciDiffusion
155
+ DeciDiffusion's
156
+ deduplicated
157
+ DeepFloyd
158
+ DeepLabV
159
+ denoise
160
+ denoised
161
+ denoises
162
+ denoising
163
+ denormalization
164
+ denormalized
165
+ depainting
166
+ DepthAnything
167
+ detections
168
+ detokenization
169
+ detokenizer
170
+ Dettmers
171
+ dev
172
+ detectron
173
+ Detectron
174
+ dGPU
175
+ dGPUs
176
+ DialoGPT
177
+ diarization
178
+ Diffusers
179
+ diffusers
180
+ dimensionality
181
+ Distil
182
+ distil
183
+ DistilBERT
184
+ distilbert
185
+ distiluse
186
+ DL
187
+ DocLayNet
188
+ docstring
189
+ DocVQA
190
+ docvqa
191
+ DocumentLoaders
192
+ doi
193
+ Dollár
194
+ donut
195
+ DOTA
196
+ DOTAv
197
+ Downloader
198
+ downloader
199
+ downsample
200
+ downsampled
201
+ DPO
202
+ dpo
203
+ dpredictor
204
+ DreamBooth
205
+ DreamBooth Tsinghua
206
+ Dreamshaper
207
+ dropdown
208
+ DynamiCrafter
209
+ ECCV
210
+ editability
211
+ EfficientNet
212
+ EfficientSAM
213
+ EfficientSAMs
214
+ embeddings
215
+ EnCodec
216
+ encodec
217
+ enum
218
+ et
219
+ Evol
220
+ EVS
221
+ facto
222
+ fastcomposer
223
+ FastComposer
224
+ FastSAM
225
+ FC
226
+ feedforward
227
+ FFmpeg
228
+ FIL
229
+ FEIL
230
+ finetuned
231
+ finetuning
232
+ FLAC
233
+ floyd
234
+ Formatter
235
+ formatter
236
+ fp
237
+ FP
238
+ FPN
239
+ FreeVC
240
+ freevc
241
+ frisbee
242
+ Frontend
243
+ frontend
244
+ GAN
245
+ Gante
246
+ Gaudi
247
+ gaussian
248
+ Gb
249
+ gcc
250
+ GEC
251
+ GELU
252
+ GELAN
253
+ Gemma
254
+ gemma
255
+ genai
256
+ genAI
257
+ Girshick
258
+ Gitee
259
+ GitHub
260
+ GLA
261
+ GMCNN
262
+ GNA
263
+ Golang
264
+ googlenet
265
+ GOPRO
266
+ GPT
267
+ gpu
268
+ GPU's
269
+ GPUs
270
+ Gradio
271
+ gradio
272
+ grayscale
273
+ GroundedSAM
274
+ GroundingDINO
275
+ gRPC
276
+ Gu
277
+ Gutendex
278
+ Hafner
279
+ HugginFaceH
280
+ HandBrake
281
+ heatmap
282
+ HC
283
+ HED
284
+ HH
285
+ hoc
286
+ HuggingFace
287
+ huggingfacehub
288
+ Husain
289
+ HWC
290
+ hyperparameters
291
+ ICIP
292
+ ICPR
293
+ iGPU
294
+ IdentityNet
295
+ iGPUs
296
+ Ilija
297
+ ImageBind
298
+ imagenet
299
+ Imagenet
300
+ ImageNet
301
+ Imagenette
302
+ ImgPipeline
303
+ impactful
304
+ IMU
305
+ IMUs
306
+ InceptionResNetV
307
+ inferencing
308
+ InferRequest
309
+ InferRequests
310
+ inpainting
311
+ InsightFace
312
+ installable
313
+ InstantID
314
+ instantiation
315
+ InstructGPT
316
+ InstructPix
317
+ intel
318
+ InternLM
319
+ internlm
320
+ invertible
321
+ intervaling
322
+ im
323
+ img
324
+ ip
325
+ IRs
326
+ iteratively
327
+ JFLEG
328
+ JIT
329
+ Joao
330
+ JS
331
+ JSON
332
+ json
333
+ JT
334
+ JuggernautXL
335
+ Jupyter
336
+ Jupyter's
337
+ JupyterLab
338
+ Kaiming
339
+ Kalman
340
+ kbps
341
+ KD
342
+ keras
343
+ KerasCV
344
+ keypoint
345
+ keypoints
346
+ KiTS
347
+ Koltun
348
+ Kondate
349
+ Kosaraju
350
+ kosmos
351
+ Kosmos
352
+ KOSMOS
353
+ KServe
354
+ Kubernetes
355
+ Kupyn
356
+ KV
357
+ Labelling
358
+ labour
359
+ labse
360
+ LaBSE
361
+ Lai
362
+ LAION
363
+ LangChain
364
+ Lasinger
365
+ latents
366
+ LCMs
367
+ LCMScheduler
368
+ LDM
369
+ LDMs
370
+ LDP
371
+ learnable
372
+ LeViT
373
+ LibriSpeech
374
+ librispeech
375
+ Lim
376
+ Liu
377
+ LLama
378
+ LLaMa
379
+ LLaVA
380
+ llava
381
+ llm
382
+ LLM
383
+ LLMs
384
+ LMS
385
+ logits
386
+ LogSoftmax
387
+ LoRA
388
+ LoRAs
389
+ lraspp
390
+ LRASPP
391
+ LTS
392
+ LSTM
393
+ LSTMs
394
+ Luo
395
+ LVLM
396
+ macOS
397
+ Magika
398
+ Mahalanobis
399
+ Mapillary
400
+ Markovian
401
+ Martyniuk
402
+ maskrcnn
403
+ mathbf
404
+ MatMul
405
+ MBs
406
+ MediaPipe
407
+ mel
408
+ Mels
409
+ MERCHANTABILITY
410
+ MF
411
+ MiB
412
+ microservices
413
+ MiDaS
414
+ MidasNet
415
+ Midjourney
416
+ MiniCPM
417
+ MiniLM
418
+ mistralai
419
+ MLS
420
+ mms
421
+ MMS
422
+ MLLM
423
+ MLLMs
424
+ MMVLM
425
+ MLP
426
+ MobileCLIP
427
+ MobileLLaMA
428
+ mobilenet
429
+ MobileNet
430
+ MobileNetV
431
+ mobilevlm
432
+ MobileVLM
433
+ modelled
434
+ ModelBest
435
+ Modelscope
436
+ Mody
437
+ MONAI
438
+ MONAI's
439
+ Monodepth
440
+ monodepth
441
+ MosaicPretrainedTransformer
442
+ mpnet
443
+ mpt
444
+ MPT
445
+ MRPC
446
+ Multiclass
447
+ multiclass
448
+ MultiHeadAttention
449
+ multilayer
450
+ multimodal
451
+ Multimodality
452
+ MusicGen
453
+ Müller
454
+ Nakayosi
455
+ nano
456
+ nanoLLaVA
457
+ nar
458
+ NAS
459
+ natively
460
+ NCE
461
+ NEOX
462
+ NER
463
+ NETP
464
+ NeuSpell
465
+ NeXT
466
+ NLP
467
+ NMS
468
+ nn
469
+ NNCF
470
+ nncf
471
+ noncomparable
472
+ NONINFRINGEMENT
473
+ Notus
474
+ notus
475
+ nsamples
476
+ nsfw
477
+ NSFW
478
+ numpy
479
+ NumPy
480
+ NPU
481
+ NPUs
482
+ OASST
483
+ OBB
484
+ obb
485
+ ocr
486
+ OCRv
487
+ odometry
488
+ OMZ
489
+ OneFormer
490
+ oneformer
491
+ ONNX
492
+ onnx
493
+ ontologies
494
+ OpenAI
495
+ openai
496
+ OpenAI's
497
+ OpenCL
498
+ OpenCLIP
499
+ OpenCLIP's
500
+ OpenCV
501
+ OpenPose
502
+ OpenShift
503
+ OpenVINO
504
+ openvino
505
+ OpenVino
506
+ OpenVINO's
507
+ openvoice
508
+ OpenVoice
509
+ OpenVoiceBaseClass
510
+ opset
511
+ optimizable
512
+ Orca
513
+ OVC
514
+ overfitting
515
+ overlayed
516
+ OV
517
+ OVC
518
+ OVModel
519
+ OVModelForXXX
520
+ OVMS
521
+ OVStableDiffusionPipeline
522
+ OvStableDiffusionInpaintingPipeline
523
+ PaddleClas
524
+ PaddleGAN
525
+ paddlegan
526
+ PaddleGAN's
527
+ PaddleHub
528
+ PaddleOCR
529
+ PaddlePaddle
530
+ PaddlePaddle's
531
+ PAF
532
+ PAFs
533
+ Panoptic
534
+ panoptic
535
+ parallelized
536
+ parameterization
537
+ parametrize
538
+ parsers
539
+ perceptron
540
+ Patil
541
+ PEFT
542
+ performant
543
+ PersonaGPT
544
+ PGI
545
+ PhotoMaker
546
+ photorealism
547
+ photorealistic
548
+ Piotr
549
+ Pipelining
550
+ pixelwise
551
+ PNDM
552
+ Pointilism
553
+ PointNet
554
+ Postfuse
555
+ postprocess
556
+ postprocesses
557
+ postprocessing
558
+ Postprocessing
559
+ PowerShell
560
+ PPYOLOv
561
+ PR
562
+ Prateek
563
+ pre
564
+ Precisions
565
+ precomputed
566
+ prefetching
567
+ preformatted
568
+ PrePostProcessing
569
+ prepostprocessing
570
+ prepostprocessor
571
+ Preprocess
572
+ preprocess
573
+ preprocessed
574
+ preprocesses
575
+ preprocessing
576
+ preprocessor
577
+ pretrain
578
+ pretrained
579
+ Pretraining
580
+ pretraining
581
+ processings
582
+ promptable
583
+ proto
584
+ protobuf
585
+ PRs
586
+ psychoacoustics
587
+ PTQ
588
+ px
589
+ py
590
+ pyannote
591
+ PyPI
592
+ Pythia
593
+ pytorch
594
+ PyTorch
595
+ PyTorchVideo
596
+ QFormer
597
+ Qianwen
598
+ Qi
599
+ QKV
600
+ qrcode
601
+ quant
602
+ quantized
603
+ quantizer
604
+ quantizers
605
+ Quantizing
606
+ quantizing
607
+ QuartzNet
608
+ qwen
609
+ Qwen
610
+ Radiopaedia
611
+ Radosavovic
612
+ Raj
613
+ Ranftl
614
+ RASPP
615
+ rcnn
616
+ ReAct
617
+ RealSense
618
+ RealSR
619
+ Realtime
620
+ realtime
621
+ rebase
622
+ redistributable
623
+ RedPajama
624
+ ReferenceNet
625
+ RegNet
626
+ RegNetY
627
+ regressively
628
+ reidentification
629
+ ReLU
630
+ René
631
+ repo
632
+ reproducibility
633
+ rerank
634
+ Rerank
635
+ reranker
636
+ rescale
637
+ rescaling
638
+ Rescaling
639
+ ResNet
640
+ resnet
641
+ RetinaFace
642
+ RetroMAE
643
+ RGB
644
+ Riffusion
645
+ riffusion
646
+ Rinna
647
+ rinna
648
+ RLHF
649
+ RMBG
650
+ RoBERTa
651
+ roberta
652
+ ROI
653
+ Ruizhongtai
654
+ Runtime
655
+ runtime
656
+ runtimes
657
+ SageMaker
658
+ sagittal
659
+ SALICON
660
+ Saliency
661
+ saliency
662
+ SAMI
663
+ sam
664
+ SavedModel
665
+ scalability
666
+ Scalable
667
+ scalable
668
+ ScaleMapLearner
669
+ Schuster
670
+ sd
671
+ SDEdit
672
+ SDXL
673
+ sdxl
674
+ Segformer
675
+ Segmentations
676
+ segmentations
677
+ Segmenter
678
+ segmenter
679
+ Segmind
680
+ segmind
681
+ serializable
682
+ sft
683
+ ShapeNet
684
+ ShareGPT
685
+ Shazeer
686
+ Shutterstock
687
+ siggraph
688
+ sigmoid
689
+ SigLIP
690
+ siglip
691
+ SISR
692
+ SlimOrca
693
+ SlowFast
694
+ slowfast
695
+ slowmo
696
+ SML
697
+ sml
698
+ softmax
699
+ softvc
700
+ SoftVC
701
+ SOTA
702
+ Sovits
703
+ sparsity
704
+ Sparisty
705
+ sparsified
706
+ sparsify
707
+ spectrogram
708
+ spectrograms
709
+ splitters
710
+ SPS
711
+ SQA
712
+ SQuAD
713
+ SRT
714
+ SSD
715
+ SSDLite
716
+ sst
717
+ StableCascade
718
+ StableDiffusionInpaintPipeline
719
+ StableDiffusionPipeline
720
+ StableDiffusionImg
721
+ StableDiffusionImg2ImgPipeline
722
+ stabilityai
723
+ STFT
724
+ stateful
725
+ streamable
726
+ Struct
727
+ struct
728
+ stylization
729
+ subchunk
730
+ subchunks
731
+ subdirectories
732
+ subdirectory
733
+ subgraph
734
+ subgraphs
735
+ sublicense
736
+ subpackage
737
+ subtask
738
+ summarization
739
+ Suno
740
+ superresolution
741
+ superset
742
+ Suraj
743
+ surya
744
+ svc
745
+ SVD
746
+ SVTR
747
+ Swin
748
+ SwiGLU
749
+ SwinV
750
+ TaskManager
751
+ TartanAir
752
+ tbb
753
+ TensorBoard
754
+ tensorflow
755
+ tf
756
+ TFLite
757
+ tflite
758
+ th
759
+ timestep
760
+ timesteps
761
+ TinyLlama
762
+ Tokenization
763
+ tokenization
764
+ tokenize
765
+ tokenized
766
+ tokenizer
767
+ Tokenizer
768
+ tokenizers
769
+ Tokenizers
770
+ tokenizes
771
+ tokenizing
772
+ ToneColorConverter
773
+ Tongyi
774
+ TorchDynamo
775
+ torchdynamo
776
+ TorchMetrics
777
+ TorchScript
778
+ torchvision
779
+ TorchVision
780
+ transformative
781
+ Tripo
782
+ TripoSR
783
+ TTS
784
+ Tsinghua
785
+ TsinghuaNLP
786
+ tunable
787
+ tv
788
+ TypeScript
789
+ Udnie
790
+ UI
791
+ UIs
792
+ UINT
793
+ Ultralytics
794
+ Uncheck
795
+ unCLIP
796
+ uncomment
797
+ uncompressing
798
+ UMD
799
+ UNet
800
+ UNets
801
+ Unet
802
+ Unimodal
803
+ unsqueeze
804
+ uncurated
805
+ Uparrow
806
+ uparrow
807
+ upcast
808
+ upcasts
809
+ Upcroft
810
+ upsample
811
+ upsampled
812
+ upsamples
813
+ upsampling
814
+ Upscaled
815
+ upscaler
816
+ upscales
817
+ upscaling
818
+ utils
819
+ VAE
820
+ Validator
821
+ validator
822
+ variational
823
+ VCTK
824
+ Vec
825
+ VectorStore
826
+ vec
827
+ VegaRT
828
+ videpth
829
+ VIO
830
+ virtualenv
831
+ ViT
832
+ vit
833
+ vits
834
+ VITS
835
+ vitt
836
+ VM
837
+ Vladlen
838
+ VOC
839
+ Vocoder
840
+ VQ
841
+ VQA
842
+ VQGAN
843
+ VQVAE
844
+ waveform
845
+ waveforms
846
+ Wav
847
+ WavLM
848
+ WebGL
849
+ WebUI
850
+ WER
851
+ WIKISQL
852
+ WikiTable
853
+ WIKITQ
854
+ Wofk
855
+ WTQ
856
+ wuerstchen
857
+ WuerstchenDiffNeXt
858
+ Würstchen
859
+ XCode
860
+ Xeon
861
+ xl
862
+ xt
863
+ xvector
864
+ xxl
865
+ XYWH
866
+ Yiqin
867
+ YOLO
868
+ YOLOv
869
+ yolov
870
+ Youri
871
+ youri
872
+ ZavyChromaXL
873
+ Zongyuan
874
+ ZeroScope
875
+ zeroscope
876
+ zh
877
+ xformers
.ci/spellcheck/.pyspelling.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ spellchecker: aspell
2
+
3
+ matrix:
4
+ - name: markdown
5
+ aspell:
6
+ lang: en
7
+ d: en_US
8
+ dictionary:
9
+ wordlists:
10
+ - .ci/spellcheck/.pyspelling.wordlist.txt
11
+ output: .ci/spellcheck/dictionary/pyspelling.dic
12
+ pipeline:
13
+ - pyspelling.filters.url
14
+ - pyspelling.filters.markdown:
15
+ markdown_extensions:
16
+ - markdown.extensions.extra
17
+ - pyspelling.filters.html:
18
+ comments: false
19
+ ignores:
20
+ - code
21
+ - pre
22
+ - spell
23
+ sources:
24
+ - README.md
25
+ - CONTRIBUTING.md
26
+ - notebooks/**/README.md
27
+
28
+ - name: notebooks
29
+ aspell:
30
+ lang: en
31
+ d: en_US
32
+ dictionary:
33
+ wordlists:
34
+ - .ci/spellcheck/.pyspelling.wordlist.txt
35
+ output: .ci/spellcheck/dictionary/pyspelling.dic
36
+ pipeline:
37
+ - ipynb_filter
38
+ - pyspelling.filters.url
39
+ - pyspelling.filters.markdown:
40
+ markdown_extensions:
41
+ - markdown.extensions.extra
42
+ - pyspelling.filters.html:
43
+ comments: false
44
+ ignores:
45
+ - code
46
+ - pre
47
+ - spell
48
+ sources:
49
+ - notebooks/**/*.ipynb
.ci/spellcheck/ipynb_filter.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PySpelling plugin for filtering Jupyter Notebook files (*.ipynb)
3
+ """
4
+
5
+ from pyspelling import filters
6
+ import nbformat
7
+
8
+
9
+ class IpynbFilter(filters.Filter):
10
+ """Spellchecking Jupyter Notebook ipynb cells"""
11
+
12
+ def __init__(self, options, default_encoding="utf-8"):
13
+ """Initialization."""
14
+ super().__init__(options, default_encoding)
15
+
16
+ def get_default_config(self):
17
+ """Get default configuration."""
18
+ return {
19
+ "cell_type": "markdown", # Cell type to filter (markdown or code)
20
+ }
21
+
22
+ def setup(self):
23
+ """Setup."""
24
+ self.cell_type = self.config["cell_type"]
25
+
26
+ def filter(self, source_file, encoding): # noqa A001
27
+ """Open and filter the file from disk."""
28
+ nb: nbformat.NotebookNode = nbformat.read(source_file, as_version=nbformat.NO_CONVERT)
29
+
30
+ return [filters.SourceText(self._filter(nb), source_file, encoding, "ipynb")]
31
+
32
+ def _filter(self, nb):
33
+ """Filter ipynb."""
34
+ text_list = []
35
+ for cell in nb.cells:
36
+ if cell["cell_type"] == self.cell_type:
37
+ text_list.append(cell["source"])
38
+
39
+ return "\n".join(text_list)
40
+
41
+ def sfilter(self, source):
42
+ """Execute filter."""
43
+ return [filters.SourceText(self._filter(source.text), source.context, source.encoding, "ipynb")]
44
+
45
+
46
+ def get_plugin():
47
+ """Return the filter."""
48
+ return IpynbFilter
.ci/spellcheck/run_spellcheck.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys, os
2
+ import subprocess # nosec - disable B404:import-subprocess check
3
+ from pathlib import Path
4
+
5
+ spellcheck_dir = Path(__file__).parent
6
+
7
+ spellcheck_config_filename = ".pyspelling.yml"
8
+
9
+ # Add spellcheck directory to PYTHONPATH to use custom PySpelling Plugin for Jupyter Notebooks
10
+ PYTHONPATH = ":".join([os.environ.get("PYTHONPATH") or "", str(spellcheck_dir)])
11
+
12
+ # Run PySpelling tool
13
+ result = subprocess.run(
14
+ args=["pyspelling", "--config", f"{spellcheck_dir / spellcheck_config_filename}"],
15
+ universal_newlines=True,
16
+ stdout=subprocess.PIPE,
17
+ stderr=subprocess.PIPE,
18
+ env=dict(os.environ, PYTHONPATH=PYTHONPATH),
19
+ )
20
+
21
+ result_output = result.stdout.strip("\n") if result.stdout else result.stderr.strip("\n")
22
+
23
+ print(result_output, file=sys.stderr if result.returncode else sys.stdout, flush=True)
24
+
25
+ exit(result.returncode)
.ci/table_of_content.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pathlib
3
+ import argparse
4
+ import re
5
+
6
+ TABLE_OF_CONTENT = r"#+\s+Table of content:?"
7
+
8
+
9
+ def find_tc_in_cell(cell):
10
+ tc_cell = None
11
+ tc_line_number = None
12
+ for i, line in enumerate(cell["source"]):
13
+ if re.match(TABLE_OF_CONTENT, line):
14
+ tc_cell = cell
15
+ tc_line_number = i
16
+ break
17
+
18
+ return tc_cell, tc_line_number
19
+
20
+
21
+ def create_title_for_tc(title):
22
+ title_for_tc = title.lstrip("#").lstrip()
23
+ title_for_tc = re.sub(r"[\[\]\n]", "", title_for_tc)
24
+ title_for_tc = re.sub(r"\(http.*\)", "", title_for_tc)
25
+
26
+ return title_for_tc
27
+
28
+
29
+ def create_link_for_tc(title):
30
+ link = re.sub(r"[`$^]", "", title)
31
+ link = link.replace(" ", "-")
32
+
33
+ return link
34
+
35
+
36
+ def remove_old_tc(cell, idx):
37
+ if cell is not None:
38
+ for line in cell["source"][idx:]:
39
+ if re.match(r"\s*-\s*\[.*\]\(#.*\).*", line) or re.match(TABLE_OF_CONTENT, line):
40
+ cell["source"].remove(line)
41
+ return cell
42
+
43
+
44
+ def get_tc_line(title, title_for_tc, link, tc_list, titles_list):
45
+ # calc indents for Table of content
46
+ try:
47
+ indents_num = (title.index(" ") - 2) * 4
48
+ except:
49
+ indents_num = -1
50
+
51
+ if len(tc_list) == 0 or indents_num < 0:
52
+ # when first list item have more than 1 indents the alignment would be broken
53
+ indents_num = 0
54
+ elif indents_num - tc_list[-1].index("-") > 4:
55
+ # when previous list item have n indents and current have n+4+1 it broke the alignment
56
+ indents_num = tc_list[-1].index("-") + 4
57
+ elif indents_num != tc_list[-1].index("-") and title.index(" ") == titles_list[-1].index(" "):
58
+ # when we have several titles with same wrong alignments
59
+ indents_num = tc_list[-1].index("-")
60
+
61
+ indents = " " * indents_num + "-" + " "
62
+ line = f"{indents}[{title_for_tc}](#{link})\n"
63
+
64
+ return line
65
+
66
+
67
+ def is_ref_to_top_exists(cell, idx):
68
+ ref_exists = False
69
+ for row in cell[idx + 1 :]:
70
+ row = row.strip()
71
+ if "[back to top ⬆️](#Table-of-content" in row:
72
+ ref_exists = True
73
+ break
74
+ elif row != "":
75
+ # content of block started
76
+ break
77
+ return ref_exists
78
+
79
+
80
+ def is_markdown(cell):
81
+ return "markdown" == cell["cell_type"]
82
+
83
+
84
+ def is_title(line):
85
+ return line.strip().startswith("#") and line.strip().lstrip("#").lstrip()
86
+
87
+
88
+ def generate_table_of_content(notebook_path: pathlib.Path):
89
+ table_of_content = []
90
+
91
+ table_of_content_cell = None
92
+ table_of_content_cell_idx = None
93
+
94
+ with open(notebook_path, "r", encoding="utf-8") as notebook_file:
95
+ notebook_json = json.load(notebook_file)
96
+
97
+ if not notebook_json["cells"]:
98
+ return
99
+
100
+ table_of_content_cell, table_of_content_cell_idx = find_tc_in_cell(notebook_json["cells"][0])
101
+
102
+ all_titles = []
103
+ for cell in filter(is_markdown, notebook_json["cells"][1:]):
104
+ if table_of_content_cell is None:
105
+ table_of_content_cell, table_of_content_cell_idx = find_tc_in_cell(cell)
106
+ if not table_of_content_cell is None:
107
+ continue
108
+
109
+ titles = [line for line in cell["source"] if is_title(line)]
110
+ for title in titles:
111
+ idx = cell["source"].index(title)
112
+ if not is_ref_to_top_exists(cell["source"], idx):
113
+ if not title.endswith("\n"):
114
+ cell["source"].insert(idx, title + "\n")
115
+ cell["source"].insert(idx + 1, "[back to top ⬆️](#Table-of-contents:)\n")
116
+ cell["source"].insert(idx + 2, "")
117
+
118
+ title = title.strip()
119
+ title_for_tc = create_title_for_tc(title)
120
+ link_for_tc = create_link_for_tc(title_for_tc)
121
+ new_line = get_tc_line(title, title_for_tc, link_for_tc, table_of_content, all_titles)
122
+
123
+ if table_of_content.count(new_line) > 1:
124
+ print(
125
+ f'WARINING: the title "{title_for_tc}" has already used in titles.\n'
126
+ + "Navigation will work inccorect, the link will only point to "
127
+ + "the first encountered title"
128
+ )
129
+
130
+ table_of_content.append(new_line)
131
+ all_titles.append(title)
132
+
133
+ table_of_content = ["\n", "#### Table of contents:\n\n"] + table_of_content + ["\n"]
134
+
135
+ if table_of_content_cell is not None:
136
+ table_of_content_cell = remove_old_tc(table_of_content_cell, table_of_content_cell_idx)
137
+
138
+ if table_of_content_cell is not None:
139
+ table_of_content_cell["source"].extend(table_of_content)
140
+ else:
141
+ notebook_json["cells"][0]["source"].extend(table_of_content)
142
+
143
+ with open(notebook_path, "w", encoding="utf-8") as in_f:
144
+ json.dump(notebook_json, in_f, ensure_ascii=False, indent=1)
145
+
146
+
147
+ if __name__ == "__main__":
148
+ parser = argparse.ArgumentParser()
149
+
150
+ parser.add_argument(
151
+ "-s",
152
+ "--source",
153
+ help="Please, specify notebook or folder with notebooks.\
154
+ Table of content will be added or modified in each.",
155
+ required=True,
156
+ )
157
+
158
+ args = parser.parse_args()
159
+ path_to_source = pathlib.Path(args.source)
160
+ if not path_to_source.exists():
161
+ print(f"Incorrect path to notebook(s) {path_to_source}")
162
+ exit()
163
+ elif path_to_source.is_file():
164
+ generate_table_of_content(path_to_source)
165
+ elif path_to_source.is_dir():
166
+ for notebook in path_to_source.glob("**/*.ipynb"):
167
+ generate_table_of_content(notebook)
.ci/tagger.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import glob
3
+ import mmap
4
+ import sys
5
+
6
+
7
+ def get_notebooks(path: str):
8
+ return glob.glob(f"{path}/*/[0-9]*.ipynb")
9
+
10
+
11
+ def get_tags(path: str):
12
+ return json.load(open(path))
13
+
14
+
15
+ def find_tags_for_notebook(notebook_path: str, tags: dict):
16
+ nb_tags = []
17
+ with open(notebook_path) as file:
18
+ f = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
19
+ for tag, keywords in tags.items():
20
+ for keyword in keywords:
21
+ if f.find(bytes(keyword, "utf-8")) != -1:
22
+ nb_tags.append(tag)
23
+ break
24
+ return nb_tags
25
+
26
+
27
+ def find_tags_for_all_notebooks(notebooks: list, tags: dict):
28
+ notebooks_tags = {}
29
+ for notebook in notebooks:
30
+ nb_tags = sorted(find_tags_for_notebook(notebook, tags))
31
+ if nb_tags:
32
+ notebooks_tags[notebook.split("/")[-1].split(".")[0]] = nb_tags
33
+ return notebooks_tags
34
+
35
+
36
+ if __name__ == "__main__":
37
+ if len(sys.argv) == 1:
38
+ notebooks_paths = sorted(get_notebooks("notebooks"))
39
+ tags = get_tags(".ci/keywords.json")["tags"]
40
+ else:
41
+ notebooks_paths = sorted(get_notebooks("/".join(sys.argv[1].split("/")[:-2])))
42
+ tags = get_tags(sys.argv[2])["tags"]
43
+ all_notebooks_tags = find_tags_for_all_notebooks(notebooks_paths, tags)
44
+ print(json.dumps(all_notebooks_tags, indent=4))
.ci/test_notebooks.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from pathlib import Path
3
+ from typing import Set
4
+ import pytest
5
+
6
+ import toml
7
+ from pip._internal.req import parse_requirements
8
+
9
+
10
+ def get_parsed_requirements(requirements_file: str) -> Set:
11
+ """
12
+ Returns a set of requirements that are defined in `requirements_file`,
13
+ without versions
14
+ """
15
+ requirements_set = set()
16
+ ignore_list = [
17
+ "paddlenlp",
18
+ "paddle2onnx",
19
+ "paddlepaddle",
20
+ ] # temporary ignore paddle
21
+ parsed_requirements = parse_requirements(requirements_file, session=False)
22
+ separators = ("=", "<", ">", "[")
23
+ for req in parsed_requirements:
24
+ requirement = req.requirement
25
+ # requirements for Windows or macOS only
26
+ if ";" in requirement and "linux" not in requirement:
27
+ continue
28
+ if requirement.startswith("git+"):
29
+ requirement = requirement.split("#egg=")[-1]
30
+ for separator in separators:
31
+ requirement = requirement.replace(separator, "|")
32
+ reqname = requirement.split("|")[0]
33
+ if reqname not in ignore_list:
34
+ requirements_set.add(reqname)
35
+
36
+ return requirements_set
37
+
38
+
39
+ def test_readme():
40
+ """
41
+ Test that all notebooks have a README file
42
+ """
43
+ for item in Path("notebooks").iterdir():
44
+ if item.is_dir():
45
+ # item is a notebook directory
46
+ notebook_dir = item.relative_to(Path("notebooks"))
47
+ if str(notebook_dir)[0].isdigit():
48
+ assert "README.md" in [filename.name for filename in item.iterdir()], f"README not found in {item}"
49
+
50
+
51
+ def test_requirements_docker():
52
+ """
53
+ Test that requirements.txt is a subset of Docker requirements in Pipfile
54
+ This test does not check requirements versions, it only verifies existence
55
+ """
56
+ with open(".docker/Pipfile") as f:
57
+ pipfile_contents = toml.load(f)
58
+ docker_requirements = set(list(pipfile_contents["packages"].keys()))
59
+
60
+ pip_requirements = get_parsed_requirements("requirements.txt")
61
+ assert pip_requirements.issubset(docker_requirements), f"Docker Pipfile misses: {pip_requirements.difference(docker_requirements)}"
62
+
63
+
64
+ def test_requirements_binder():
65
+ """
66
+ Test that requirements.txt is a subset of Binder requirements
67
+ This test does not check requirements versions, it only verifies existence
68
+ """
69
+ pip_requirements = get_parsed_requirements("requirements.txt")
70
+ binder_requirements = get_parsed_requirements(".binder/requirements.txt")
71
+ assert pip_requirements.issubset(binder_requirements), f"Binder requirements misses: {pip_requirements.difference(binder_requirements)}"
72
+
73
+
74
+ @pytest.mark.skip(reason="URL existence is tested in docker_treon")
75
+ def test_urls_exist():
76
+ """
77
+ Test that urls that may be cached still exist on the server
78
+ """
79
+ urls = [
80
+ "http://cs231n.stanford.edu/tiny-imagenet-200.zip",
81
+ "https://github.com/onnx/models/raw/main/vision/style_transfer/fast_neural_style/model/pointilism-9.onnx",
82
+ "https://storage.openvinotoolkit.org/data/test_data/openvino_notebooks/kits19/case_00030.zip",
83
+ ]
84
+ headers = {"User-Agent": "Mozilla/5.0"}
85
+ for url in urls:
86
+ response = requests.get(url=url, headers=headers)
87
+ if not response.status_code == 200:
88
+ print(f"Downloading {url} failed")
89
+ raise
.ci/validate_notebooks.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import time
3
+ import os
4
+ import subprocess # nosec - disable B404:import-subprocess check
5
+ import csv
6
+ import json
7
+ import shutil
8
+ import platform
9
+
10
+ from argparse import ArgumentParser
11
+ from pathlib import Path
12
+ from typing import Dict, List, Optional, Tuple, TypedDict
13
+
14
+
15
+ ROOT = Path(__file__).parents[1]
16
+
17
+ NOTEBOOKS_DIR = Path("notebooks")
18
+
19
+
20
+ class NotebookStatus:
21
+ SUCCESS = "SUCCESS"
22
+ FAILED = "FAILED"
23
+ TIMEOUT = "TIMEOUT"
24
+ SKIPPED = "SKIPPED"
25
+ NOT_RUN = "NOT_RUN"
26
+ EMPTY = "EMPTY"
27
+
28
+
29
+ class NotebookReport(TypedDict):
30
+ status: str
31
+ path: Path
32
+ duration: float = 0
33
+
34
+
35
+ TestPlan = Dict[Path, NotebookReport]
36
+
37
+
38
+ def parse_arguments():
39
+ parser = ArgumentParser()
40
+ parser.add_argument("--ignore_list", required=False, nargs="+")
41
+ parser.add_argument("--test_list", required=False, nargs="+")
42
+ parser.add_argument("--early_stop", action="store_true")
43
+ parser.add_argument("--report_dir", default="report")
44
+ parser.add_argument("--keep_artifacts", action="store_true")
45
+ parser.add_argument("--collect_reports", action="store_true")
46
+ parser.add_argument("--move_notebooks_dir")
47
+ parser.add_argument("--job_name")
48
+ parser.add_argument("--device_used")
49
+ parser.add_argument("--upload_to_db")
50
+ parser.add_argument(
51
+ "--timeout",
52
+ type=int,
53
+ default=7200,
54
+ help="Timeout for running single notebook in seconds",
55
+ )
56
+ return parser.parse_args()
57
+
58
+
59
+ def move_notebooks(nb_dir):
60
+ current_notebooks_dir = ROOT / NOTEBOOKS_DIR
61
+ shutil.copytree(current_notebooks_dir, nb_dir)
62
+
63
+
64
+ def collect_python_packages(output_file: Path):
65
+ reqs = subprocess.check_output(
66
+ [sys.executable, "-m", "pip", "freeze"],
67
+ shell=(platform.system() == "Windows"),
68
+ )
69
+ with output_file.open("wb") as f:
70
+ f.write(reqs)
71
+
72
+
73
+ def prepare_test_plan(test_list: Optional[List[str]], ignore_list: List[str], nb_dir: Optional[Path] = None) -> TestPlan:
74
+ orig_nb_dir = ROOT / NOTEBOOKS_DIR
75
+ notebooks_dir = nb_dir or orig_nb_dir
76
+ notebooks: List[Path] = sorted(list([n for n in notebooks_dir.rglob("**/*.ipynb") if not n.name.startswith("test_")]))
77
+
78
+ test_plan: TestPlan = {notebook.relative_to(notebooks_dir): NotebookReport(status="", path=notebook, duration=0) for notebook in notebooks}
79
+
80
+ ignored_notebooks: List[Path] = []
81
+ if ignore_list is not None:
82
+ for ignore_item in ignore_list:
83
+ if ignore_item.endswith(".txt"):
84
+ # Paths to ignore files are provided to `--ignore_list` argument
85
+ with open(ignore_item, "r") as f:
86
+ ignored_notebooks.extend(list(map(lambda line: Path(line.strip()), f.readlines())))
87
+ else:
88
+ # Ignored notebooks are provided as several items to `--ignore_list` argument
89
+ ignored_notebooks.append(Path(ignore_item))
90
+ try:
91
+ ignored_notebooks = list(set(map(lambda n: n.relative_to(NOTEBOOKS_DIR), ignored_notebooks)))
92
+ except ValueError:
93
+ raise ValueError(
94
+ f"Ignore list items should be relative to repo root (e.g. 'notebooks/subdir/notebook.ipynb').\nInvalid ignored notebooks: {ignored_notebooks}"
95
+ )
96
+ print(f"Ignored notebooks: {ignored_notebooks}")
97
+
98
+ testing_notebooks: List[Path] = []
99
+ if not test_list:
100
+ testing_notebooks = [Path(n) for n in test_plan.keys()]
101
+ elif len(test_list) == 1 and test_list[0].endswith(".txt"):
102
+ with open(test_list[0], "r") as f:
103
+ for line in f.readlines():
104
+ changed_file_path = Path(line.strip())
105
+ if changed_file_path.resolve() == (ROOT / "requirements.txt").resolve():
106
+ print("requirements.txt changed, check all notebooks")
107
+ testing_notebooks = [Path(n) for n in test_plan.keys()]
108
+ break
109
+ if changed_file_path.suffix != ".ipynb":
110
+ continue
111
+ try:
112
+ testing_notebook_path = changed_file_path.relative_to(NOTEBOOKS_DIR)
113
+ except ValueError:
114
+ raise ValueError(
115
+ "Items in test list file should be relative to repo root (e.g. 'notebooks/subdir/notebook.ipynb').\n"
116
+ f"Invalid line: {changed_file_path}"
117
+ )
118
+ testing_notebooks.append(testing_notebook_path)
119
+ else:
120
+ raise ValueError(
121
+ "Testing notebooks should be provided to '--test_list' argument as a txt file or should be empty to test all notebooks.\n"
122
+ f"Received test list: {test_list}"
123
+ )
124
+ testing_notebooks = list(set(testing_notebooks))
125
+ print(f"Testing notebooks: {testing_notebooks}")
126
+
127
+ for notebook in test_plan:
128
+ if notebook not in testing_notebooks:
129
+ test_plan[notebook]["status"] = NotebookStatus.SKIPPED
130
+ if notebook in ignored_notebooks:
131
+ test_plan[notebook]["status"] = NotebookStatus.SKIPPED
132
+ return test_plan
133
+
134
+
135
+ def clean_test_artifacts(before_test_files: List[Path], after_test_files: List[Path]):
136
+ for file_path in after_test_files:
137
+ if file_path in before_test_files or not file_path.exists():
138
+ continue
139
+ if file_path.is_file():
140
+ try:
141
+ file_path.unlink()
142
+ except Exception:
143
+ pass
144
+ else:
145
+ shutil.rmtree(file_path, ignore_errors=True)
146
+
147
+
148
+ def get_openvino_version() -> str:
149
+ try:
150
+ import openvino as ov
151
+
152
+ version = ov.get_version()
153
+ except ImportError:
154
+ print("Openvino is missing in validation environment.")
155
+ version = "Openvino is missing"
156
+ return version
157
+
158
+
159
+ def run_test(notebook_path: Path, root, timeout=7200, keep_artifacts=False, report_dir=".") -> Optional[Tuple[str, int, float, str, str]]:
160
+ os.environ["HUGGINGFACE_HUB_CACHE"] = str(notebook_path.parent)
161
+ print(f"RUN {notebook_path.relative_to(root)}", flush=True)
162
+ result = None
163
+
164
+ if notebook_path.is_dir():
165
+ print(f'Notebook path "{notebook_path}" is a directory, but path to "*.ipynb" file was expected.')
166
+ return result
167
+ if notebook_path.suffix != ".ipynb":
168
+ print(f'Notebook path "{notebook_path}" should have "*.ipynb" extension.')
169
+ return result
170
+
171
+ with cd(notebook_path.parent):
172
+ files_before_test = sorted(Path(".").iterdir())
173
+ ov_version_before = get_openvino_version()
174
+ patched_notebook = Path(f"test_{notebook_path.name}")
175
+ if not patched_notebook.exists():
176
+ print(f'Patched notebook "{patched_notebook}" does not exist.')
177
+ return result
178
+
179
+ collect_python_packages(report_dir / (patched_notebook.stem + "_env_before.txt"))
180
+
181
+ main_command = [sys.executable, "-m", "treon", str(patched_notebook)]
182
+ start = time.perf_counter()
183
+ try:
184
+ retcode = subprocess.run(
185
+ main_command,
186
+ shell=(platform.system() == "Windows"),
187
+ timeout=timeout,
188
+ ).returncode
189
+ except subprocess.TimeoutExpired:
190
+ retcode = -42
191
+ duration = time.perf_counter() - start
192
+ ov_version_after = get_openvino_version()
193
+ result = (str(patched_notebook), retcode, duration, ov_version_before, ov_version_after)
194
+
195
+ if not keep_artifacts:
196
+ clean_test_artifacts(files_before_test, sorted(Path(".").iterdir()))
197
+ collect_python_packages(report_dir / (patched_notebook.stem + "_env_after.txt"))
198
+
199
+ return result
200
+
201
+
202
+ def finalize_status(failed_notebooks: List[str], timeout_notebooks: List[str], test_plan: TestPlan, report_dir: Path, root: Path) -> int:
203
+ return_status = 0
204
+ if failed_notebooks:
205
+ return_status = 1
206
+ print("FAILED: \n{}".format("\n".join(failed_notebooks)))
207
+ if timeout_notebooks:
208
+ print("FAILED BY TIMEOUT: \n{}".format("\n".join(timeout_notebooks)))
209
+ test_report = []
210
+ for notebook, status in test_plan.items():
211
+ test_status = status["status"] or NotebookStatus.NOT_RUN
212
+ test_report.append(
213
+ {"name": notebook.as_posix(), "status": test_status, "full_path": str(status["path"].relative_to(root)), "duration": status["duration"]}
214
+ )
215
+ with (report_dir / "test_report.csv").open("w") as f:
216
+ writer = csv.DictWriter(f, fieldnames=["name", "status", "full_path", "duration"])
217
+ writer.writeheader()
218
+ writer.writerows(test_report)
219
+ return return_status
220
+
221
+
222
+ class cd:
223
+ """Context manager for changing the current working directory"""
224
+
225
+ def __init__(self, new_path):
226
+ self.new_path = os.path.expanduser(new_path)
227
+
228
+ def __enter__(self):
229
+ self.saved_path = os.getcwd()
230
+ os.chdir(self.new_path)
231
+
232
+ def __exit__(self, etype, value, traceback):
233
+ os.chdir(self.saved_path)
234
+
235
+
236
+ def write_single_notebook_report(
237
+ base_version: str,
238
+ notebook_name: str,
239
+ status_code: int,
240
+ duration: float,
241
+ ov_version_before: str,
242
+ ov_version_after: str,
243
+ job_name: str,
244
+ device_used: str,
245
+ saving_dir: Path,
246
+ ) -> Path:
247
+ report_file = saving_dir / notebook_name.replace(".ipynb", ".json")
248
+ report = {
249
+ "version": base_version,
250
+ "notebook_name": notebook_name.replace("test_", ""),
251
+ "status": status_code,
252
+ "duration": duration,
253
+ "ov_version_before": ov_version_before,
254
+ "ov_version_after": ov_version_after,
255
+ "job_name": job_name,
256
+ "device_used": device_used,
257
+ }
258
+ with report_file.open("w") as f:
259
+ json.dump(report, f)
260
+ return report_file
261
+
262
+
263
+ def main():
264
+ failed_notebooks = []
265
+ timeout_notebooks = []
266
+ args = parse_arguments()
267
+ reports_dir = Path(args.report_dir)
268
+ reports_dir.mkdir(exist_ok=True, parents=True)
269
+ notebooks_moving_dir = args.move_notebooks_dir
270
+ root = ROOT
271
+ if notebooks_moving_dir is not None:
272
+ notebooks_moving_dir = Path(notebooks_moving_dir)
273
+ root = notebooks_moving_dir.parent
274
+ move_notebooks(notebooks_moving_dir)
275
+
276
+ keep_artifacts = False
277
+ if args.keep_artifacts:
278
+ keep_artifacts = True
279
+
280
+ base_version = get_openvino_version()
281
+
282
+ test_plan = prepare_test_plan(args.test_list, args.ignore_list, notebooks_moving_dir)
283
+ for notebook, report in test_plan.items():
284
+ if report["status"] == NotebookStatus.SKIPPED:
285
+ continue
286
+ test_result = run_test(report["path"], root, args.timeout, keep_artifacts, reports_dir.absolute())
287
+ timing = 0
288
+ if not test_result:
289
+ print(f'Testing notebooks "{str(notebook)}" is not found.')
290
+ report["status"] = NotebookStatus.EMPTY
291
+ report["duration"] = timing
292
+ else:
293
+ patched_notebook, status_code, duration, ov_version_before, ov_version_after = test_result
294
+ if status_code:
295
+ if status_code == -42:
296
+ status = NotebookStatus.TIMEOUT
297
+ timeout_notebooks.append(patched_notebook)
298
+ else:
299
+ status = NotebookStatus.FAILED
300
+ failed_notebooks.append(patched_notebook)
301
+ report["status"] = status
302
+ else:
303
+ report["status"] = NotebookStatus.SUCCESS if not report["status"] in [NotebookStatus.TIMEOUT, NotebookStatus.FAILED] else report["status"]
304
+
305
+ timing += duration
306
+ report["duration"] = timing
307
+ if args.collect_reports:
308
+ job_name = args.job_name or "Unknown"
309
+ device_used = args.device_used or "Unknown"
310
+ report_path = write_single_notebook_report(
311
+ base_version, patched_notebook, status_code, duration, ov_version_before, ov_version_after, job_name, device_used, reports_dir
312
+ )
313
+ if args.upload_to_db:
314
+ cmd = [sys.executable, args.upload_to_db, report_path]
315
+ print(f"\nUploading {report_path} to database. CMD: {cmd}")
316
+ try:
317
+ dbprocess = subprocess.Popen(
318
+ cmd, shell=(platform.system() == "Windows"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True
319
+ )
320
+ for line in dbprocess.stdout:
321
+ sys.stdout.write(line)
322
+ sys.stdout.flush()
323
+ except subprocess.CalledProcessError as e:
324
+ print(e.output)
325
+
326
+ if args.early_stop:
327
+ break
328
+
329
+ exit_status = finalize_status(failed_notebooks, timeout_notebooks, test_plan, reports_dir, root)
330
+ return exit_status
331
+
332
+
333
+ if __name__ == "__main__":
334
+ exit_code = main()
335
+ sys.exit(exit_code)
.docker/.aicoe-ci.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ check:
2
+ - thoth-build
3
+ build:
4
+ base-image: "quay.io/thoth-station/s2i-thoth-ubi8-py38:v0.20.1"
5
+ build-stratergy: "Source"
6
+ registry: "quay.io"
7
+ registry-org: "thoth-station"
8
+ registry-project: "s2i-minimal-py38-notebook"
9
+ registry-secret: "thoth-station-thoth-pusher-secret"
.docker/.jupyter/custom/custom.css ADDED
@@ -0,0 +1 @@
 
 
1
+ .container { width:80% !important; }
.docker/.jupyter/nbconfig/common.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "load_extensions": {
3
+ "nbextensions_configurator/config_menu/main": true,
4
+ "codefolding/main": true,
5
+ "collapsible_headings/main": true,
6
+ "contrib_nbextensions_help_item/main": true,
7
+ "execute_time/ExecuteTime": true,
8
+ "hinterland/hinterland": false,
9
+ "init_cell/main": true,
10
+ "python-markdown/main": true,
11
+ "publish": true,
12
+ "ruler/main": true,
13
+ "toc2/main": true,
14
+ "toggle_all_line_numbers/main": true
15
+ },
16
+ "ruler_column": [
17
+ "110"
18
+ ],
19
+ "toc2": {
20
+ "skip_h1_title": true,
21
+ "toc_cell": true
22
+ },
23
+ "init_cell": {
24
+ "run_on_kernel_ready": false
25
+ },
26
+ "Notebook": {
27
+ "Header": true
28
+ },
29
+ "scrollDownIsEnabled": true
30
+ }
.docker/.s2i/bin/assemble ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -x
4
+
5
+ set -eo pipefail
6
+
7
+ APP_ROOT=${APP_ROOT:-/opt/app-root/src}
8
+
9
+
10
+ # Run the original Python S2I assemble script to install packages.
11
+
12
+ /usr/libexec/s2i/assemble
13
+
14
+ # Remove the cached package dependencies files generated from s2i assemble script.
15
+ rm -rf /tmp/Pipfile.lock
16
+ rm -rf /tmp/requirements.txt
17
+
18
+ ########################################################################
19
+ # INFO: Install everything that's required for Jupyter notebooks here.
20
+ ########################################################################
21
+
22
+ # Install pipenv for jupyter-nbrequirements to use
23
+ # TODO: This should be removed once nbrequirements can use Thoth + micropipenv only
24
+ pip install pipenv==2020.11.15
25
+
26
+ # Install mod_wsgi for use in optional webdav support.
27
+
28
+ pip install 'mod_wsgi==4.6.8'
29
+
30
+ # Install base packages needed for running Jupyter Notebooks.
31
+
32
+ npm cache clean --force
33
+
34
+ rm -rf $HOME/.cache/yarn
35
+ rm -rf $HOME/.node-gyp
36
+
37
+ # Copy into place default config files for Jupyter and Apache webdav.
38
+
39
+ cp ${APP_ROOT}/src/jupyter_notebook_config.py /opt/app-root/etc/
40
+ cp ${APP_ROOT}/src/jupyter_kernel_gateway_config.py /opt/app-root/etc/
41
+ cp ${APP_ROOT}/src/httpd-webdav.conf /opt/app-root/etc/
42
+
43
+ # This S2I assemble script is only used when creating the custom image.
44
+ # For when running the image, or using it as a S2I builder, we use a second
45
+ # set of custom S2I scripts. We now need to move these into the correct
46
+ # location and have the custom image use those by dropping in an image
47
+ # metadata file which overrides the labels of the base image.
48
+
49
+ mkdir -p ${APP_ROOT}/.s2i
50
+
51
+ mv ${APP_ROOT}/src/builder/image_metadata.json ${APP_ROOT}/.s2i/image_metadata.json
52
+
53
+ mv ${APP_ROOT}/src/builder /opt/app-root/builder
54
+
55
+ mv ${APP_ROOT}/src/supervisor /opt/app-root/etc/supervisor
56
+
57
+ mv ${APP_ROOT}/src/gateway /opt/app-root/gateway
58
+
59
+ mv ${APP_ROOT}/src/*.sh /opt/app-root/bin
60
+
61
+ # Install oc command line client for OpenShift cluster.
62
+
63
+ curl -L -o ${APP_ROOT}/oc.tar.gz https://mirror.openshift.com/pub/openshift-v3/clients/3.11.153/linux/oc.tar.gz
64
+
65
+ # Check if oc.tar.gz is not empty
66
+ if [ -s ${APP_ROOT}/oc.tar.gz ]; then
67
+ tar -C /opt/app-root/bin -zxf ${APP_ROOT}/oc.tar.gz oc && \
68
+ mv /opt/app-root/bin/oc /opt/app-root/bin/oc-3.11 && \
69
+ rm ${APP_ROOT}/oc.tar.gz
70
+ else
71
+ echo "ERROR: Couldn't download OCP 3.11 client binary."
72
+ exit 1
73
+ fi
74
+
75
+ curl -L -o ${APP_ROOT}/oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz
76
+
77
+ # Check if oc.tar.gz is not empty
78
+ if [ -s ${APP_ROOT}/oc.tar.gz ]; then
79
+ tar -C /opt/app-root/bin -zxf ${APP_ROOT}/oc.tar.gz oc && \
80
+ mv /opt/app-root/bin/oc /opt/app-root/bin/oc-4 && \
81
+ rm ${APP_ROOT}/oc.tar.gz
82
+ else
83
+ echo "ERROR: Couldn't download OCP 4 client binary."
84
+ exit 1
85
+ fi
86
+
87
+ ln -s /opt/app-root/bin/oc-wrapper.sh /opt/app-root/bin/oc
88
+
89
+ # Ensure passwd/group file intercept happens for any shell environment.
90
+
91
+ echo "source /opt/app-root/etc/generate_container_user" >> /opt/app-root/etc/scl_enable
92
+
93
+ # Install packages required by the proxy process.
94
+
95
+ (cd /opt/app-root/gateway && npm install --production)
96
+
97
+ # Create additional directories.
98
+
99
+ echo " -----> Creating additional directories."
100
+
101
+ mkdir -p /opt/app-root/data
102
+
103
+ # Generate default supervisord.conf file.
104
+
105
+ echo_supervisord_conf | \
106
+ sed -e 's%^logfile=/tmp/supervisord.log%logfile=/dev/fd/1%' \
107
+ -e 's%^logfile_maxbytes=50MB%logfile_maxbytes=0%' > \
108
+ /opt/app-root/etc/supervisord.conf
109
+
110
+ cat >> /opt/app-root/etc/supervisord.conf << EOF
111
+ [include]
112
+ files = /opt/app-root/etc/supervisor/*.conf
113
+ EOF
114
+
115
+ # Install and enable default nbextensions
116
+
117
+ jupyter contrib nbextension install --sys-prefix
118
+
119
+ jupyter nbextension install --sys-prefix https://raw.githubusercontent.com/vpavlin/jupyter-publish-extension/master/publish.js
120
+ jupyter nbextension enable --sys-prefix publish
121
+
122
+ # Enable the extensions configurator
123
+
124
+ pip install -U jupyter_nbextensions_configurator
125
+
126
+ jupyter nbextensions_configurator enable --sys-prefix
127
+
128
+ # Apply custom notebook configuration
129
+
130
+ if [ -d "$APP_ROOT/src/.jupyter/" ]; then
131
+ rsync \
132
+ --link-dest="$APP_ROOT/src/.jupyter/" \
133
+ --recursive \
134
+ --verbose \
135
+ "$APP_ROOT/src/.jupyter/" ${APP_ROOT}/etc/jupyter
136
+ fi
137
+
138
+ # Make sure the S2I source directory is empty as we will use the image
139
+ # produced to run further S2I builds
140
+
141
+ (shopt -s dotglob ; rm -rf ${APP_ROOT}/src/*)
142
+
143
+ # Fixup permissions on directories and files.
144
+
145
+ fix-permissions /opt/app-root
.docker/.s2i/bin/assemble.orig ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -x
4
+
5
+ set -eo pipefail
6
+
7
+ APP_ROOT=${APP_ROOT:-/opt/app-root/src}
8
+
9
+
10
+ # Run the original Python S2I assemble script to install packages.
11
+
12
+ /usr/libexec/s2i/assemble
13
+
14
+ # Remove the cached package dependencies files generated from s2i assemble script.
15
+ rm -rf /tmp/Pipfile.lock
16
+ rm -rf /tmp/requirements.txt
17
+
18
+ ########################################################################
19
+ # INFO: Install everything that's required for Jupyter notebooks here.
20
+ ########################################################################
21
+
22
+ # Install pipenv for jupyter-nbrequirements to use
23
+ # TODO: This should be removed once nbrequirements can use Thoth + micropipenv only
24
+ pip install pipenv==2020.11.15
25
+
26
+ # Install mod_wsgi for use in optional webdav support.
27
+
28
+ pip install 'mod_wsgi==4.6.8'
29
+
30
+ # Install base packages needed for running Jupyter Notebooks.
31
+
32
+ npm cache clean --force
33
+
34
+ rm -rf $HOME/.cache/yarn
35
+ rm -rf $HOME/.node-gyp
36
+
37
+ # Copy into place default config files for Jupyter and Apache webdav.
38
+
39
+ cp ${APP_ROOT}/src/jupyter_notebook_config.py /opt/app-root/etc/
40
+ cp ${APP_ROOT}/src/jupyter_kernel_gateway_config.py /opt/app-root/etc/
41
+ cp ${APP_ROOT}/src/httpd-webdav.conf /opt/app-root/etc/
42
+
43
+ # This S2I assemble script is only used when creating the custom image.
44
+ # For when running the image, or using it as a S2I builder, we use a second
45
+ # set of custom S2I scripts. We now need to move these into the correct
46
+ # location and have the custom image use those by dropping in an image
47
+ # metadata file which overrides the labels of the base image.
48
+
49
+ mkdir -p ${APP_ROOT}/.s2i
50
+
51
+ mv ${APP_ROOT}/src/builder/image_metadata.json ${APP_ROOT}/.s2i/image_metadata.json
52
+
53
+ mv ${APP_ROOT}/src/builder /opt/app-root/builder
54
+
55
+ mv ${APP_ROOT}/src/supervisor /opt/app-root/etc/supervisor
56
+
57
+ mv ${APP_ROOT}/src/gateway /opt/app-root/gateway
58
+
59
+ mv ${APP_ROOT}/src/*.sh /opt/app-root/bin
60
+
61
+ # Install oc command line client for OpenShift cluster.
62
+
63
+ curl -s -o ${APP_ROOT}/oc.tar.gz https://mirror.openshift.com/pub/openshift-v3/clients/3.11.153/linux/oc.tar.gz && \
64
+ tar -C /opt/app-root/bin -zxf ${APP_ROOT}/oc.tar.gz oc && \
65
+ mv /opt/app-root/bin/oc /opt/app-root/bin/oc-3.11 && \
66
+ rm ${APP_ROOT}/oc.tar.gz
67
+
68
+ curl -s -o ${APP_ROOT}/oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/oc/latest/linux/oc.tar.gz&& \
69
+ tar -C /opt/app-root/bin -zxf ${APP_ROOT}/oc.tar.gz oc && \
70
+ mv /opt/app-root/bin/oc /opt/app-root/bin/oc-4 && \
71
+ rm ${APP_ROOT}/oc.tar.gz
72
+
73
+ ln -s /opt/app-root/bin/oc-wrapper.sh /opt/app-root/bin/oc
74
+
75
+ # Ensure passwd/group file intercept happens for any shell environment.
76
+
77
+ echo "source /opt/app-root/etc/generate_container_user" >> /opt/app-root/etc/scl_enable
78
+
79
+ # Install packages required by the proxy process.
80
+
81
+ (cd /opt/app-root/gateway && npm install --production)
82
+
83
+ # Create additional directories.
84
+
85
+ echo " -----> Creating additional directories."
86
+
87
+ mkdir -p /opt/app-root/data
88
+
89
+ # Generate default supervisord.conf file.
90
+
91
+ echo_supervisord_conf | \
92
+ sed -e 's%^logfile=/tmp/supervisord.log%logfile=/dev/fd/1%' \
93
+ -e 's%^logfile_maxbytes=50MB%logfile_maxbytes=0%' > \
94
+ /opt/app-root/etc/supervisord.conf
95
+
96
+ cat >> /opt/app-root/etc/supervisord.conf << EOF
97
+ [include]
98
+ files = /opt/app-root/etc/supervisor/*.conf
99
+ EOF
100
+
101
+ # Install and enable default nbextensions
102
+
103
+ jupyter contrib nbextension install --sys-prefix
104
+
105
+ jupyter nbextension install --sys-prefix https://raw.githubusercontent.com/vpavlin/jupyter-publish-extension/master/publish.js
106
+ jupyter nbextension enable --sys-prefix publish
107
+
108
+ # Enable the extensions configurator
109
+
110
+ pip install -U jupyter_nbextensions_configurator
111
+
112
+ jupyter nbextensions_configurator enable --sys-prefix
113
+
114
+ # Apply custom notebook configuration
115
+
116
+ if [ -d "$APP_ROOT/src/.jupyter/" ]; then
117
+ rsync \
118
+ --link-dest="$APP_ROOT/src/.jupyter/" \
119
+ --recursive \
120
+ --verbose \
121
+ "$APP_ROOT/src/.jupyter/" ${APP_ROOT}/etc/jupyter
122
+ fi
123
+
124
+ # Make sure the S2I source directory is empty as we will use the image
125
+ # produced to run further S2I builds
126
+
127
+ (shopt -s dotglob ; rm -rf ${APP_ROOT}/src/*)
128
+
129
+ # Fixup permissions on directories and files.
130
+
131
+ fix-permissions /opt/app-root
.docker/.s2i/bin/run ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -eo pipefail
4
+
5
+ # Execute the run script from the customised builder.
6
+
7
+ exec /opt/app-root/builder/run
.docker/.s2i/bin/test ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -eo pipefail
4
+
5
+ # Test that jupyterhub and ovmsclient imports work (required for image to work in RedHat cluster)
6
+ python -c "from jupyterhub.singleuser.mixins import make_singleuser_app"
7
+ python -c "import ovmsclient"
8
+
9
+ # Required for PaddlePaddle
10
+ export HUB_HOME=/tmp
11
+
12
+ # Download CT scan image for 110 training notebook
13
+ mkdir /opt/app-root/notebooks/ct-segmentation-quantize/kits19
14
+ cd /opt/app-root/notebooks/ct-segmentation-quantize/kits19
15
+ curl -O -C - https://storage.openvinotoolkit.org/data/test_data/openvino_notebooks/kits19/case_00030.zip
16
+ unzip case_00030.zip
17
+ mkdir kits19_frames
18
+ mv case_00030 kits19_frames
19
+ cp -r kits19_frames/case_00030 kits19_frames/case_00001
20
+
21
+ cd "/tmp"
22
+ # Patch notebooks to speed up test execution
23
+ python /tmp/scripts/patch_notebooks.py /opt/app-root/notebooks
24
+ # Move notebooks for validation
25
+ mv /opt/app-root/notebooks/ /tmp/
26
+
27
+ # Test notebooks
28
+ python /tmp/scripts/validate_notebooks.py --ignore_list /tmp/scripts/ignore_treon_docker.txt
.docker/.s2i/bin/test_precommit ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -eo pipefail
4
+
5
+ # Test that jupyterhub and ovmsclient imports work (required for image to work in RedHat cluster)
6
+ python -c "from jupyterhub.singleuser.mixins import make_singleuser_app"
7
+ python -c "import ovmsclient"
8
+
9
+ # Required for PaddlePaddle
10
+ export HUB_HOME=/tmp
11
+
12
+ # Download CT scan image for 110 training notebook
13
+ mkdir /opt/app-root/notebooks/ct-segmentation-quantize/kits19
14
+ cd /opt/app-root/notebooks/ct-segmentation-quantize/kits19
15
+ curl -O -C - https://storage.openvinotoolkit.org/data/test_data/openvino_notebooks/kits19/case_00030.zip
16
+ unzip case_00030.zip
17
+ mkdir kits19_frames
18
+ mv case_00030 kits19_frames
19
+ cp -r kits19_frames/case_00030 kits19_frames/case_00001
20
+
21
+ cd "/tmp"
22
+ # Patch notebooks to speed up test execution
23
+ python /tmp/scripts/patch_notebooks.py /opt/app-root/notebooks
24
+ # Move notebooks for validation
25
+ mv /opt/app-root/notebooks/ /tmp/
26
+
27
+ # Test notebooks
28
+ python /tmp/scripts/validate_notebooks.py --ignore_list /tmp/scripts/ignore_treon_docker.txt --test_list /tmp/scripts/test_notebooks.txt
.docker/.thoth.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ host: khemenu.thoth-station.ninja
2
+ tls_verify: false
3
+ requirements_format: pipenv
4
+
5
+ runtime_environments:
6
+ - name: rhel:8
7
+ operating_system:
8
+ name: rhel
9
+ version: "8"
10
+ python_version: "3.8"
11
+ recommendation_type: latest
12
+
13
+ managers:
14
+ - name: update
15
+ configuration:
16
+ labels: [bot]
17
+ - name: info
18
+ - name: version
19
+ configuration:
20
+ maintainers:
21
+ - goern
22
+ - harshad16
23
+ - vpavlin
24
+ assignees:
25
+ - sesheta
26
+ labels: [bot]
27
+ changelog_file: true
.docker/Pipfile ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[source]]
2
+ url = "https://pypi.org/simple"
3
+ verify_ssl = true
4
+ name = "pypi"
5
+
6
+ [[source]]
7
+ url = "https://download.pytorch.org/whl/"
8
+ verify_ssl = true
9
+ name = "pytorch-wheels"
10
+
11
+ [packages]
12
+ boto3 = "*"
13
+ gdown = "*"
14
+ ipykernel = ">=5.*"
15
+ ipython = ">=7.16.3" # pin to avoid vulnerability
16
+ ipywidgets = "*"
17
+ jedi = ">=0.17.2"
18
+ jupyter-nbrequirements = "*"
19
+ jupyterhub = "==4.1.*"
20
+ jupyterlab = "*"
21
+ jupyterlab-git = "==0.30"
22
+ librosa = ">=0.8.1"
23
+ matplotlib = ">=3.4"
24
+ monai = ">=0.9.1,<1.0.0"
25
+ nbval = "*"
26
+ notebook = "<7.0.0"
27
+ nncf = "==2.10.0"
28
+ numpy = ">=1.21.0"
29
+ onnx = ">=1.11.0"
30
+ opencv-python = "*"
31
+ openvino-dev = {version = "==2024.1.0"}
32
+ openvino-telemetry = "==2023.2.1"
33
+ ovmsclient = "*"
34
+ Pillow = ">=8.3.2"
35
+ psutil = "*"
36
+ pyclipper = ">=1.2.1"
37
+ pygments = ">=2.7.4"
38
+ pytorch-lightning = "*"
39
+ pytube = ">=12.1.0"
40
+ rsa = ">=4.7"
41
+ scikit-image = ">=0.19.2"
42
+ scipy = "*"
43
+ seaborn = ">=0.11.0"
44
+ setuptools = ">56.0.0"
45
+ shapely = ">=1.7.1"
46
+ supervisor = ">=4.1.0"
47
+ tensorflow = ">=2.5,<=2.12"
48
+ tensorflow-datasets = "==4.2.0"
49
+ treon = "*"
50
+ torch = {index = "pytorch-wheels", version = "==2.1.0+cpu"}
51
+ torchaudio = {index = "pytorch-wheels", version = "==2.1.0+cpu"}
52
+ torchmetrics = ">=0.11.0"
53
+ torchvision = {index = "pytorch-wheels", version = "==0.16.0+cpu"}
54
+ transformers = ">=4.21.1"
55
+
56
+ [dev-packages]
57
+
58
+ [requires]
59
+ python_version = "3.8"
.docker/Pipfile.lock ADDED
The diff for this file is too large to render. See raw diff
 
.docker/buildconfig.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ apiVersion: v1
3
+ kind: ImageStream
4
+ metadata:
5
+ name: s2i-openvino-notebook
6
+ labels:
7
+ application: s2i-openvino-notebook
8
+ opendatahub.io/notebook-image: "true"
9
+ annotations:
10
+ opendatahub.io/notebook-image-url: "quay.io/modh/s2i-openvino-notebook"
11
+ opendatahub.io/notebook-image-name: "Notebook Image with OpenVINO components"
12
+ opendatahub.io/notebook-image-desc: "Jupyter notebook image with minimal dependency set to start experimenting with OpenVINO in a Jupyter environment."
13
+ ---
14
+ kind: BuildConfig
15
+ apiVersion: build.openshift.io/v1
16
+ metadata:
17
+ labels:
18
+ build: s2i-openvino-notebook
19
+ name: s2i-openvino-notebook
20
+ spec:
21
+ output:
22
+ to:
23
+ kind: ImageStreamTag
24
+ name: s2i-openvino-notebook:local-build
25
+ source:
26
+ git:
27
+ uri: https://github.com/mmgaggle/s2i-openvino-notebook
28
+ ref: wip-python38
29
+ type: Git
30
+ strategy:
31
+ dockerStrategy:
32
+ from:
33
+ kind: DockerImage
34
+ name: 'quay.io/thoth-station/s2i-thoth-ubi8-py38:v0.26.0'
35
+ noCache: true
36
+ dockerfilePath: Dockerfile
37
+ env:
38
+ PIPFILE: pipfiles/openvino
39
+ type: Docker
40
+ triggers:
41
+ - type: ConfigChange
.docker/builder/assemble ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -x
4
+
5
+ set -eo pipefail
6
+
7
+ APP_ROOT=${APP_ROOT:-/opt/app-root}
8
+
9
+
10
+ # Run the original Python S2I assemble script to install packages.
11
+
12
+ /usr/libexec/s2i/assemble
13
+
14
+ # Enable required extensions
15
+
16
+ if [ -f "$APP_ROOT/src/extensions.in" ]; then
17
+ while IFS= read -r extension; do
18
+ jupyter nbextension enable --sys-prefix $extension
19
+ done < "$APP_ROOT/src/extensions.in"
20
+ fi
21
+
22
+ # Apply custom notebook configuration
23
+
24
+ if [ -d "$APP_ROOT/src/.jupyter/" ]; then
25
+ rsync \
26
+ --link-dest="$APP_ROOT/src/.jupyter/" \
27
+ --recursive \
28
+ --verbose \
29
+ "$APP_ROOT/src/.jupyter/" ${APP_ROOT}/etc/jupyter
30
+ fi
31
+
32
+ # Move files from application source directory to master files directory
33
+ # if directory has been specified. This is to facilitate later copying
34
+ # of files into a persistent volume on startup of instance.
35
+
36
+ if [ x"$JUPYTER_MASTER_FILES" != x"" ]; then
37
+ mkdir -p $JUPYTER_MASTER_FILES
38
+ shopt -s dotglob
39
+ mv $APP_ROOT/src/* $JUPYTER_MASTER_FILES
40
+ fi
.docker/builder/image_metadata.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "labels": [
3
+ {"io.k8s.display-name":"Jupyter Notebook"},
4
+ {"io.k8s.description":"Minimal Thoth S2I Jupyter Notebook."},
5
+ {"io.openshift.s2i.scripts-url":"image:///opt/app-root/builder"},
6
+ {"io.s2i.scripts-url":"image:///opt/app-root/builder"},
7
+ {"io.openshift.tags":"aicoe,builder,minimal,notebook,jupyter,python"}
8
+ ]
9
+ }
.docker/builder/run ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -eo pipefail
4
+
5
+ set -x
6
+
7
+ # Generate htdigest password file for webdav access if it doesn't exist.
8
+
9
+ JUPYTER_ENABLE_WEBDAV=${JUPYTER_ENABLE_WEBDAV:-false}
10
+ export HUB_HOME=/tmp
11
+
12
+ export JUPYTER_ENABLE_WEBDAV
13
+
14
+ if [ -f /var/run/secrets/kubernetes.io/serviceaccount/namespace ]; then
15
+ DEPLOYMENT=`echo $HOSTNAME | sed -e 's/^\(.*\)-[^-]*-[^-]*$/\1/'`
16
+ NAMESPACE=`cat /var/run/secrets/kubernetes.io/serviceaccount/namespace`
17
+ WEBDAV_REALM=$NAMESPACE/$DEPLOYMENT
18
+ else
19
+ WEBDAV_REALM=jupyter-on-openshift/jupyter-notebooks
20
+ fi
21
+
22
+ WEBDAV_USERFILE=/opt/app-root/etc/webdav.htdigest
23
+
24
+ export WEBDAV_REALM
25
+ export WEBDAV_USERFILE
26
+
27
+ if [ ! -f $WEBDAV_USERFILE ]; then
28
+ touch $WEBDAV_USERFILE
29
+ if [[ ! -z "${JUPYTER_NOTEBOOK_PASSWORD}" ]]; then
30
+ DIGEST="$( printf "%s:%s:%s" "jupyter" "$WEBDAV_REALM" "$JUPYTER_NOTEBOOK_PASSWORD" | md5sum | awk '{print $1}' )"
31
+ printf "%s:%s:%s\n" "jupyter" "$WEBDAV_REALM" "$DIGEST" >> $WEBDAV_USERFILE
32
+ fi
33
+ fi
34
+
35
+ # Pre-clone repositories defined in JUPYTER_PRELOAD_REPOS
36
+ if [ -n "${JUPYTER_PRELOAD_REPOS}" ]; then
37
+ for repo in `echo ${JUPYTER_PRELOAD_REPOS} | tr ',' ' '`; do
38
+ # Check for the presence of "@branch" in the repo string
39
+ REPO_BRANCH=$(echo ${repo} | cut -s -d'@' -f2)
40
+ if [[ -n ${REPO_BRANCH} ]]; then
41
+ # Remove the branch from the repo string and convert REPO_BRANCH to git clone arg
42
+ repo=$(echo ${repo} | cut -d'@' -f1)
43
+ REPO_BRANCH="-b ${REPO_BRANCH}"
44
+ fi
45
+ echo "Checking if repository $repo exists locally"
46
+ REPO_DIR=$(basename ${repo})
47
+ if ! [ -d "${REPO_DIR}" ]; then
48
+ GIT_SSL_NO_VERIFY=true git clone ${repo} ${REPO_DIR} ${REPO_BRANCH}
49
+ fi
50
+ done
51
+ fi
52
+
53
+ # Copy notebooks onto the user volume if they don't exist
54
+ if [ -d /opt/app-root/notebooks ] && ! [ -d /opt/app-root/src/openvino_notebooks ]; then
55
+ cp -r /opt/app-root/notebooks /opt/app-root/src/openvino_notebooks
56
+ fi
57
+
58
+ # Start the Jupyter notebook instance. Run using supervisord if enabled,
59
+ # or it is required by webdav access.
60
+
61
+ if [ x"$JUPYTER_ENABLE_WEBDAV" == x"true" ]; then
62
+ JUPYTER_ENABLE_SUPERVISORD=true
63
+ fi
64
+
65
+ if [[ ! -z "${JUPYTER_ENABLE_SUPERVISORD}" ]]; then
66
+ # Startup supervisord against the configuration and keep it in the
67
+ # foreground so becomes process ID 1 for the container.
68
+
69
+ exec /opt/app-root/bin/supervisord --nodaemon \
70
+ --configuration /opt/app-root/etc/supervisord.conf
71
+ else
72
+ . /opt/app-root/bin/start-notebook.sh "$@"
73
+ fi
.docker/builder/save-artifacts ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ true
.docker/gateway/logger.js ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const winston = require("winston");
2
+
3
+ const level = process.env.LOG_LEVEL || 'debug';
4
+
5
+ const logger = winston.createLogger({
6
+ transports: [
7
+ new winston.transports.Console({
8
+ level: level,
9
+ timestamp: function () {
10
+ return (new Date()).toISOString();
11
+ }
12
+ })
13
+ ]
14
+ });
15
+
16
+ module.exports = logger
.docker/gateway/package.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "proxy",
3
+ "version": "1.0.0",
4
+ "description": "",
5
+ "scripts": {
6
+ "start": "node server.js"
7
+ },
8
+ "dependencies": {
9
+ "express": "4.19.2",
10
+ "http-proxy-middleware": "0.19.1",
11
+ "morgan": "1.9.1",
12
+ "winston": "3.2.0"
13
+ }
14
+ }
.docker/gateway/routes/webdav.js ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ var express = require('express');
2
+ var proxy = require('http-proxy-middleware');
3
+ var logger = require('../logger');
4
+
5
+ module.exports = function(app, prefix) {
6
+ var router = express.Router();
7
+
8
+ router.use(proxy(prefix, {
9
+ target: 'http://127.0.0.1:8081',
10
+ ws: true
11
+ }));
12
+
13
+ return router;
14
+ }