dungmai commited on
Commit
8a6cf24
·
verified ·
1 Parent(s): 26bb70e

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +25 -0
  2. .github/workflows/update_space.yml +28 -0
  3. .gitignore +101 -0
  4. .gradio/certificate.pem +31 -0
  5. 0.19.4 +0 -0
  6. Comic_Generation.ipynb +3 -0
  7. LICENSE +201 -0
  8. README.md +158 -12
  9. app.py +750 -0
  10. cog.yaml +23 -0
  11. config/models.yaml +19 -0
  12. examples/Robert/images.jpeg +0 -0
  13. examples/lecun/yann-lecun2.png +0 -0
  14. examples/taylor/1-1.png +0 -0
  15. examples/twoperson/1.jpeg +0 -0
  16. examples/twoperson/2.png +0 -0
  17. fonts/Inkfree.ttf +0 -0
  18. fonts/ShadowsIntoLightTwo-Regular.ttf +0 -0
  19. fonts/iCielPequena-English.otf +0 -0
  20. gradio_app_sdxl_specific_id_low_vram.py +1346 -0
  21. images/logo.png +0 -0
  22. images/pad_images.png +0 -0
  23. myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER +1 -0
  24. myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst +28 -0
  25. myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA +93 -0
  26. myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD +14 -0
  27. myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL +5 -0
  28. myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt +1 -0
  29. myenv/Lib/site-packages/PIL/BdfFontFile.py +133 -0
  30. myenv/Lib/site-packages/PIL/BlpImagePlugin.py +488 -0
  31. myenv/Lib/site-packages/PIL/BmpImagePlugin.py +489 -0
  32. myenv/Lib/site-packages/PIL/BufrStubImagePlugin.py +76 -0
  33. myenv/Lib/site-packages/PIL/ContainerIO.py +121 -0
  34. myenv/Lib/site-packages/PIL/CurImagePlugin.py +75 -0
  35. myenv/Lib/site-packages/PIL/DcxImagePlugin.py +80 -0
  36. myenv/Lib/site-packages/PIL/DdsImagePlugin.py +575 -0
  37. myenv/Lib/site-packages/PIL/EpsImagePlugin.py +478 -0
  38. myenv/Lib/site-packages/PIL/ExifTags.py +381 -0
  39. myenv/Lib/site-packages/PIL/FitsImagePlugin.py +152 -0
  40. myenv/Lib/site-packages/PIL/FliImagePlugin.py +174 -0
  41. myenv/Lib/site-packages/PIL/FontFile.py +134 -0
  42. myenv/Lib/site-packages/PIL/FpxImagePlugin.py +255 -0
  43. myenv/Lib/site-packages/PIL/FtexImagePlugin.py +115 -0
  44. myenv/Lib/site-packages/PIL/GbrImagePlugin.py +103 -0
  45. myenv/Lib/site-packages/PIL/GdImageFile.py +102 -0
  46. myenv/Lib/site-packages/PIL/GifImagePlugin.py +1159 -0
  47. myenv/Lib/site-packages/PIL/GimpGradientFile.py +149 -0
  48. myenv/Lib/site-packages/PIL/GimpPaletteFile.py +58 -0
  49. myenv/Lib/site-packages/PIL/GribStubImagePlugin.py +76 -0
  50. myenv/Lib/site-packages/PIL/Hdf5StubImagePlugin.py +76 -0
.gitattributes CHANGED
@@ -33,3 +33,28 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Comic_Generation.ipynb filter=lfs diff=lfs merge=lfs -text
37
+ myenv/Lib/site-packages/gradio/_frontend_code/lite/dist/assets/Canvas3D-JkRyAMo4.js.map filter=lfs diff=lfs merge=lfs -text
38
+ myenv/Lib/site-packages/gradio/frpc_windows_amd64_v0.2 filter=lfs diff=lfs merge=lfs -text
39
+ myenv/Lib/site-packages/gradio/frpc_windows_amd64_v0.3 filter=lfs diff=lfs merge=lfs -text
40
+ myenv/Lib/site-packages/gradio/templates/frontend/assets/Canvas3D-Y-FPpH_E.js.map filter=lfs diff=lfs merge=lfs -text
41
+ myenv/Lib/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Canvas3D.rJxgLkEw.js.br filter=lfs diff=lfs merge=lfs -text
42
+ myenv/Lib/site-packages/gradio/templates/node/build/server/chunks/Canvas3D-qDqWI6YT.js.map filter=lfs diff=lfs merge=lfs -text
43
+ myenv/Lib/site-packages/gradio/templates/node/build/server/chunks/PlotlyPlot-P3y0I-J2.js.map filter=lfs diff=lfs merge=lfs -text
44
+ myenv/Lib/site-packages/numpy.libs/libopenblas64__v0.3.23-293-gc2f4bdbb-gcc_10_3_0-2bde3a66a51006b2b53eb373ff767a3f.dll filter=lfs diff=lfs merge=lfs -text
45
+ myenv/Lib/site-packages/torch/bin/fbgemm.dll filter=lfs diff=lfs merge=lfs -text
46
+ myenv/Lib/site-packages/torch/bin/protoc.exe filter=lfs diff=lfs merge=lfs -text
47
+ myenv/Lib/site-packages/torch/lib/dnnl.lib filter=lfs diff=lfs merge=lfs -text
48
+ myenv/Lib/site-packages/torch/lib/fbgemm.dll filter=lfs diff=lfs merge=lfs -text
49
+ myenv/Lib/site-packages/torch/lib/fbgemm.lib filter=lfs diff=lfs merge=lfs -text
50
+ myenv/Lib/site-packages/torch/lib/kineto.lib filter=lfs diff=lfs merge=lfs -text
51
+ myenv/Lib/site-packages/torch/lib/libiomp5md.dll filter=lfs diff=lfs merge=lfs -text
52
+ myenv/Lib/site-packages/torch/lib/libprotobuf-lite.lib filter=lfs diff=lfs merge=lfs -text
53
+ myenv/Lib/site-packages/torch/lib/libprotobuf.lib filter=lfs diff=lfs merge=lfs -text
54
+ myenv/Lib/site-packages/torch/lib/libprotoc.lib filter=lfs diff=lfs merge=lfs -text
55
+ myenv/Lib/site-packages/torch/lib/torch_cpu.dll filter=lfs diff=lfs merge=lfs -text
56
+ myenv/Lib/site-packages/torch/lib/torch_cpu.lib filter=lfs diff=lfs merge=lfs -text
57
+ myenv/Lib/site-packages/torch/lib/torch_python.dll filter=lfs diff=lfs merge=lfs -text
58
+ myenv/Lib/site-packages/torch/lib/XNNPACK.lib filter=lfs diff=lfs merge=lfs -text
59
+ myenv/Scripts/ruff.exe filter=lfs diff=lfs merge=lfs -text
60
+ results_examples/image1.png filter=lfs diff=lfs merge=lfs -text
.github/workflows/update_space.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Python script
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - Mytest
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout
14
+ uses: actions/checkout@v2
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v2
18
+ with:
19
+ python-version: '3.9'
20
+
21
+ - name: Install Gradio
22
+ run: python -m pip install gradio
23
+
24
+ - name: Log in to Hugging Face
25
+ run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
26
+
27
+ - name: Deploy to Spaces
28
+ run: gradio deploy
.gitignore ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compile / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Jupyter Notebook
55
+ .ipynb_checkpoints
56
+
57
+ # pyenv
58
+ .python-version
59
+
60
+ # celery beat schedule file
61
+ celerybeat-schedule
62
+
63
+ # SageMath parsed files
64
+ *.sage.py
65
+
66
+ # Environments
67
+ .env
68
+ .venv
69
+ env/
70
+ venv/
71
+ ENV/
72
+ env.bak/
73
+ venv.bak/
74
+
75
+ # Spyder project settings
76
+ .spyderproject
77
+ .spyproject
78
+
79
+ # Rope project settings
80
+ .ropeproject
81
+
82
+ # mkdocs documentation
83
+ /site
84
+
85
+ # mypy
86
+ .mypy_cache/
87
+ .aider*
88
+
89
+ # ignore ALL .log files
90
+ *.log
91
+ # ignore ALL files in ANY directory named temp
92
+ .ipynb_checkpoints/
93
+ images/.ipynb_checkpoints/
94
+ results/
95
+ .vscode/settings.json
96
+ fonts/agency.ttf
97
+ fonts/calibri.ttf
98
+ data/
99
+
100
+ # myenv/
101
+ models/
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
0.19.4 ADDED
File without changes
Comic_Generation.ipynb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6376c7f98b4d0419d9310a3debfaa76b403c6f5a994f8f0de687a65adc6564c
3
+ size 19371692
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,12 +1,158 @@
1
- ---
2
- title: StoryDiffusion
3
- emoji: 🐠
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 5.9.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: StoryDiffusion
3
+ app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 3.41.2
6
+ ---
7
+ <p align="center">
8
+ <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/f79da6b7-0b3b-4dd7-8dd0-ba0b15306fe6" height=100>
9
+ </p>
10
+
11
+ <div align="center">
12
+
13
+ ## StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation [![Paper page](https://huggingface.co/datasets/huggingface/badges/resolve/main/paper-page-md-dark.svg)]()
14
+
15
+ [[Paper](https://arxiv.org/abs/2405.01434)] &emsp; [[Project Page](https://storydiffusion.github.io/)] &emsp; [[Jittor Version](https://github.com/JittorCV/jittordiffusion/tree/master)]&emsp; [[🤗 Comic Generation Demo ](https://huggingface.co/spaces/YupengZhou/StoryDiffusion)] [![Replicate](https://replicate.com/cjwbw/StoryDiffusion/badge)](https://replicate.com/cjwbw/StoryDiffusion) [![Run Comics Demo in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/HVision-NKU/StoryDiffusion/blob/main/Comic_Generation.ipynb) <br>
16
+ </div>
17
+
18
+
19
+ ---
20
+
21
+ Official implementation of **[StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation]()**.
22
+
23
+ ### **Demo Video**
24
+
25
+ https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/d5b80f8f-09b0-48cd-8b10-daff46d422af
26
+
27
+
28
+ ### Update History
29
+
30
+ ***You can visit [here](update.md) to visit update history.***
31
+
32
+ ### 🌠 **Key Features:**
33
+ StoryDiffusion can create a magic story by generating consistent images and videos. Our work mainly has two parts:
34
+ 1. Consistent self-attention for character-consistent image generation over long-range sequences. It is hot-pluggable and compatible with all SD1.5 and SDXL-based image diffusion models. For the current implementation, the user needs to provide at least 3 text prompts for the consistent self-attention module. We recommend at least 5 - 6 text prompts for better layout arrangement.
35
+ 2. Motion predictor for long-range video generation, which predicts motion between Condition Images in a compressed image semantic space, achieving larger motion prediction.
36
+
37
+
38
+
39
+ ## 🔥 **Examples**
40
+
41
+
42
+ ### Comics generation
43
+
44
+
45
+ ![1](https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/b3771cbc-b6ca-4e26-bdc5-d944daf9f266)
46
+
47
+
48
+
49
+ ### Image-to-Video generation (Results are HIGHLY compressed for speed)
50
+ Leveraging the images produced through our Consistent Self-Attention mechanism, we can extend the process to create videos by seamlessly transitioning between these images. This can be considered as a two-stage long video generation approach.
51
+
52
+ Note: results are **highly compressed** for speed, you can visit [our website](https://storydiffusion.github.io/) for the high-quality version.
53
+ #### Two-stage Long Videos Generation (New Update)
54
+ Combining the two parts, we can generate very long and high-quality AIGC videos.
55
+ | Video1 | Video2 | Video3 |
56
+ | --- | --- | --- |
57
+ | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/4e7e0f24-5f90-419b-9a1e-cdf36d361b26" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/f509343d-d691-4e2a-b615-7d96381ef7c1" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/4f0f7abb-4ae4-47a6-b692-5bdd8d9c8006" width=224> |
58
+
59
+
60
+ #### Long Video Results using Condition Images
61
+ Our Image-to-Video model can generate a video by providing a sequence of user-input condition images.
62
+ | Video1 | Video2 | Video3 |
63
+ | --- | --- | --- |
64
+ | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/af6f5c50-c773-4ef2-a757-6d7a46393f39" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/d58e4037-d8df-4f90-8c81-ce4b6d2d868e" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/40da15ba-f5c1-48d8-84d6-8d327207d696" width=224> |
65
+
66
+ | Video4 | Video5 | Video6 |
67
+ | --- | --- | --- |
68
+ | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/8f04c9fc-3031-49e3-9de8-83d582b80a1f" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/604107fb-8afe-4052-bda4-362c646a756e" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/b05fa6a0-12e6-4111-abf8-18b8cd84f3ff" width=224> |
69
+
70
+
71
+
72
+
73
+ #### Short Videos
74
+
75
+ | Video1 | Video2 | Video3 |
76
+ | --- | --- | --- |
77
+ | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/5e7f717f-daad-46f6-b3ba-c087bd843158" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/79aa52b2-bf37-4c9c-8555-c7050aec0cdf" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/9fdfd091-10e6-434e-9ce7-6d6e6d8f4b22" width=224> |
78
+
79
+
80
+
81
+ | Video4 | Video5 | Video6 |
82
+ | --- | --- | --- |
83
+ | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/0b219b60-a998-4820-9657-6abe1747cb6b" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/d387aef0-ffc8-41b0-914f-4b0392d9f8c5" width=224> | <img src="https://github.com/HVision-NKU/StoryDiffusion/assets/49511209/3c64958a-1079-4ca0-a9cf-e0486adbc57f" width=224> |
84
+
85
+
86
+
87
+
88
+ ## 🚩 **TODO/Updates**
89
+ - [x] Comic Results of StoryDiffusion.
90
+ - [x] Video Results of StoryDiffusion.
91
+ - [x] Source code of Comic Generation
92
+ - [x] Source code of gradio demo
93
+ - [ ] Source code of Video Generation Model
94
+ - [ ] Pretrained weight of Video Generation Model
95
+ ---
96
+
97
+ # 🔧 Dependencies and Installation
98
+
99
+ - Python >= 3.8 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html))
100
+ - [PyTorch >= 2.0.0](https://pytorch.org/)
101
+ ```bash
102
+ conda create --name storydiffusion python=3.10
103
+ conda activate storydiffusion
104
+ pip install -U pip
105
+
106
+ # Install requirements
107
+ pip install -r requirements.txt
108
+ ```
109
+ # How to use
110
+
111
+ Currently, we provide two ways for you to generate comics.
112
+
113
+ ## Use the jupyter notebook
114
+
115
+ You can open the `Comic_Generation.ipynb` and run the code.
116
+
117
+ ## Start a local gradio demo
118
+ Run the following command:
119
+
120
+
121
+ **(Recommend)** We provide a low GPU Memory cost version, it was tested on a machine with 24GB GPU-memory(Tesla A10) and 30GB RAM, and expected to work well with >20 G GPU-memory.
122
+
123
+ ```python
124
+ python gradio_app_sdxl_specific_id_low_vram.py
125
+ ```
126
+
127
+
128
+ ## Contact
129
+ If you have any questions, you are very welcome to email ypzhousdu@gmail.com and zhoudaquan21@gmail.com
130
+
131
+
132
+
133
+
134
+ # Disclaimer
135
+ This project strives to impact the domain of AI-driven image and video generation positively. Users are granted the freedom to create images and videos using this tool, but they are expected to comply with local laws and utilize it responsibly. The developers do not assume any responsibility for potential misuse by users.
136
+
137
+ # Related Resources
138
+ Following are some third-party implementations of StoryDiffusion.
139
+
140
+
141
+ ## API
142
+
143
+ - [runpod.io serverless worker](https://github.com/bes-dev/story-diffusion-runpod-serverless-worker) provided by [BeS](https://github.com/bes-dev).
144
+ - [Replicate worker](https://github.com/camenduru/StoryDiffusion-replicate) provided by [camenduru](https://github.com/camenduru).
145
+
146
+
147
+
148
+
149
+ # BibTeX
150
+ If you find StoryDiffusion useful for your research and applications, please cite using this BibTeX:
151
+
152
+ ```BibTeX
153
+ @article{zhou2024storydiffusion,
154
+ title={StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation},
155
+ author={Zhou, Yupeng and Zhou, Daquan and Cheng, Ming-Ming and Feng, Jiashi and Hou, Qibin},
156
+ journal={NeurIPS 2024},
157
+ year={2024}
158
+ }
app.py ADDED
@@ -0,0 +1,750 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from email.policy import default
2
+ import gradio as gr
3
+ import numpy as np
4
+ import spaces
5
+ import torch
6
+ import requests
7
+ import random
8
+ import os
9
+ import sys
10
+ import pickle
11
+ from PIL import Image
12
+ from tqdm.auto import tqdm
13
+ from datetime import datetime
14
+ from utils.gradio_utils import is_torch2_available
15
+ if is_torch2_available():
16
+ from utils.gradio_utils import \
17
+ AttnProcessor2_0 as AttnProcessor
18
+ # from utils.gradio_utils import SpatialAttnProcessor2_0
19
+ else:
20
+ from utils.gradio_utils import AttnProcessor
21
+
22
+ import diffusers
23
+ from diffusers import StableDiffusionXLPipeline
24
+ from utils import PhotoMakerStableDiffusionXLPipeline
25
+ from diffusers import DDIMScheduler
26
+ import torch.nn.functional as F
27
+ from utils.gradio_utils import cal_attn_mask_xl
28
+ import copy
29
+ import os
30
+ from huggingface_hub import hf_hub_download
31
+ from diffusers.utils import load_image
32
+ from utils.utils import get_comic
33
+ from utils.style_template import styles
34
+ image_encoder_path = "./data/models/ip_adapter/sdxl_models/image_encoder"
35
+ ip_ckpt = "./data/models/ip_adapter/sdxl_models/ip-adapter_sdxl_vit-h.bin"
36
+ os.environ["no_proxy"] = "localhost,127.0.0.1,::1"
37
+ STYLE_NAMES = list(styles.keys())
38
+ DEFAULT_STYLE_NAME = "Japanese Anime"
39
+ global models_dict
40
+ use_va = True
41
+ models_dict = {
42
+ # "Juggernaut": "RunDiffusion/Juggernaut-XL-v8",
43
+ # "RealVision": "SG161222/RealVisXL_V4.0" ,
44
+ # "SDXL":"stabilityai/stable-diffusion-xl-base-1.0" ,
45
+ "Unstable": "stablediffusionapi/sdxl-unstable-diffusers-y"
46
+ }
47
+ photomaker_path = hf_hub_download(repo_id="TencentARC/PhotoMaker", filename="photomaker-v1.bin", repo_type="model")
48
+ MAX_SEED = np.iinfo(np.int32).max
49
+ def setup_seed(seed):
50
+ torch.manual_seed(seed)
51
+ torch.cuda.manual_seed_all(seed)
52
+ np.random.seed(seed)
53
+ random.seed(seed)
54
+ torch.backends.cudnn.deterministic = True
55
+ def set_text_unfinished():
56
+ return gr.update(visible=True, value="<h3>(Not Finished) Generating ··· The intermediate results will be shown.</h3>")
57
+ def set_text_finished():
58
+ return gr.update(visible=True, value="<h3>Generation Finished</h3>")
59
+ #################################################
60
+ def get_image_path_list(folder_name):
61
+ image_basename_list = os.listdir(folder_name)
62
+ image_path_list = sorted([os.path.join(folder_name, basename) for basename in image_basename_list])
63
+ return image_path_list
64
+
65
+ #################################################
66
+ class SpatialAttnProcessor2_0(torch.nn.Module):
67
+ r"""
68
+ Attention processor for IP-Adapater for PyTorch 2.0.
69
+ Args:
70
+ hidden_size (`int`):
71
+ The hidden size of the attention layer.
72
+ cross_attention_dim (`int`):
73
+ The number of channels in the `encoder_hidden_states`.
74
+ text_context_len (`int`, defaults to 77):
75
+ The context length of the text features.
76
+ scale (`float`, defaults to 1.0):
77
+ the weight scale of image prompt.
78
+ """
79
+
80
+ def __init__(self, hidden_size = None, cross_attention_dim=None,id_length = 4,device = "cuda",dtype = torch.float16):
81
+ super().__init__()
82
+ if not hasattr(F, "scaled_dot_product_attention"):
83
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
84
+ self.device = device
85
+ self.dtype = dtype
86
+ self.hidden_size = hidden_size
87
+ self.cross_attention_dim = cross_attention_dim
88
+ self.total_length = id_length + 1
89
+ self.id_length = id_length
90
+ self.id_bank = {}
91
+
92
+ def __call__(
93
+ self,
94
+ attn,
95
+ hidden_states,
96
+ encoder_hidden_states=None,
97
+ attention_mask=None,
98
+ temb=None):
99
+ # un_cond_hidden_states, cond_hidden_states = hidden_states.chunk(2)
100
+ # un_cond_hidden_states = self.__call2__(attn, un_cond_hidden_states,encoder_hidden_states,attention_mask,temb)
101
+ # 生成一个0到1之间的随机数
102
+ global total_count,attn_count,cur_step,mask1024,mask4096
103
+ global sa32, sa64
104
+ global write
105
+ global height,width
106
+ if write:
107
+ # print(f"white:{cur_step}")
108
+ self.id_bank[cur_step] = [hidden_states[:self.id_length], hidden_states[self.id_length:]]
109
+ else:
110
+ encoder_hidden_states = torch.cat((self.id_bank[cur_step][0].to(self.device),hidden_states[:1],self.id_bank[cur_step][1].to(self.device),hidden_states[1:]))
111
+ # 判断随机数是否大于0.5
112
+ if cur_step <5:
113
+ hidden_states = self.__call2__(attn, hidden_states,encoder_hidden_states,attention_mask,temb)
114
+ else: # 256 1024 4096
115
+ random_number = random.random()
116
+ if cur_step <20:
117
+ rand_num = 0.3
118
+ else:
119
+ rand_num = 0.1
120
+ # print(f"hidden state shape {hidden_states.shape[1]}")
121
+ if random_number > rand_num:
122
+ # print("mask shape",mask1024.shape,mask4096.shape)
123
+ if not write:
124
+ if hidden_states.shape[1] == (height//32) * (width//32):
125
+ attention_mask = mask1024[mask1024.shape[0] // self.total_length * self.id_length:]
126
+ else:
127
+ attention_mask = mask4096[mask4096.shape[0] // self.total_length * self.id_length:]
128
+ else:
129
+ # print(self.total_length,self.id_length,hidden_states.shape,(height//32) * (width//32))
130
+ if hidden_states.shape[1] == (height//32) * (width//32):
131
+ attention_mask = mask1024[:mask1024.shape[0] // self.total_length * self.id_length,:mask1024.shape[0] // self.total_length * self.id_length]
132
+ else:
133
+ attention_mask = mask4096[:mask4096.shape[0] // self.total_length * self.id_length,:mask4096.shape[0] // self.total_length * self.id_length]
134
+ # print(attention_mask.shape)
135
+ # print("before attention",hidden_states.shape,attention_mask.shape,encoder_hidden_states.shape if encoder_hidden_states is not None else "None")
136
+ hidden_states = self.__call1__(attn, hidden_states,encoder_hidden_states,attention_mask,temb)
137
+ else:
138
+ hidden_states = self.__call2__(attn, hidden_states,None,attention_mask,temb)
139
+ attn_count +=1
140
+ if attn_count == total_count:
141
+ attn_count = 0
142
+ cur_step += 1
143
+ mask1024,mask4096 = cal_attn_mask_xl(self.total_length,self.id_length,sa32,sa64,height,width, device=self.device, dtype= self.dtype)
144
+
145
+ return hidden_states
146
+ def __call1__(
147
+ self,
148
+ attn,
149
+ hidden_states,
150
+ encoder_hidden_states=None,
151
+ attention_mask=None,
152
+ temb=None,
153
+ ):
154
+ # print("hidden state shape",hidden_states.shape,self.id_length)
155
+ residual = hidden_states
156
+ # if encoder_hidden_states is not None:
157
+ # raise Exception("not implement")
158
+ if attn.spatial_norm is not None:
159
+ hidden_states = attn.spatial_norm(hidden_states, temb)
160
+ input_ndim = hidden_states.ndim
161
+
162
+ if input_ndim == 4:
163
+ total_batch_size, channel, height, width = hidden_states.shape
164
+ hidden_states = hidden_states.view(total_batch_size, channel, height * width).transpose(1, 2)
165
+ total_batch_size,nums_token,channel = hidden_states.shape
166
+ img_nums = total_batch_size//2
167
+ hidden_states = hidden_states.view(-1,img_nums,nums_token,channel).reshape(-1,img_nums * nums_token,channel)
168
+
169
+ batch_size, sequence_length, _ = hidden_states.shape
170
+
171
+ if attn.group_norm is not None:
172
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
173
+
174
+ query = attn.to_q(hidden_states)
175
+
176
+ if encoder_hidden_states is None:
177
+ encoder_hidden_states = hidden_states # B, N, C
178
+ else:
179
+ encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,nums_token,channel).reshape(-1,(self.id_length+1) * nums_token,channel)
180
+
181
+ key = attn.to_k(encoder_hidden_states)
182
+ value = attn.to_v(encoder_hidden_states)
183
+
184
+
185
+ inner_dim = key.shape[-1]
186
+ head_dim = inner_dim // attn.heads
187
+
188
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
189
+
190
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
191
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
192
+ # print(key.shape,value.shape,query.shape,attention_mask.shape)
193
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
194
+ # TODO: add support for attn.scale when we move to Torch 2.1
195
+ #print(query.shape,key.shape,value.shape,attention_mask.shape)
196
+ hidden_states = F.scaled_dot_product_attention(
197
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
198
+ )
199
+
200
+ hidden_states = hidden_states.transpose(1, 2).reshape(total_batch_size, -1, attn.heads * head_dim)
201
+ hidden_states = hidden_states.to(query.dtype)
202
+
203
+
204
+
205
+ # linear proj
206
+ hidden_states = attn.to_out[0](hidden_states)
207
+ # dropout
208
+ hidden_states = attn.to_out[1](hidden_states)
209
+
210
+ # if input_ndim == 4:
211
+ # tile_hidden_states = tile_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
212
+
213
+ # if attn.residual_connection:
214
+ # tile_hidden_states = tile_hidden_states + residual
215
+
216
+ if input_ndim == 4:
217
+ hidden_states = hidden_states.transpose(-1, -2).reshape(total_batch_size, channel, height, width)
218
+ if attn.residual_connection:
219
+ hidden_states = hidden_states + residual
220
+ hidden_states = hidden_states / attn.rescale_output_factor
221
+ # print(hidden_states.shape)
222
+ return hidden_states
223
+ def __call2__(
224
+ self,
225
+ attn,
226
+ hidden_states,
227
+ encoder_hidden_states=None,
228
+ attention_mask=None,
229
+ temb=None):
230
+ residual = hidden_states
231
+
232
+ if attn.spatial_norm is not None:
233
+ hidden_states = attn.spatial_norm(hidden_states, temb)
234
+
235
+ input_ndim = hidden_states.ndim
236
+
237
+ if input_ndim == 4:
238
+ batch_size, channel, height, width = hidden_states.shape
239
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
240
+
241
+ batch_size, sequence_length, channel = (
242
+ hidden_states.shape
243
+ )
244
+ # print(hidden_states.shape)
245
+ if attention_mask is not None:
246
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
247
+ # scaled_dot_product_attention expects attention_mask shape to be
248
+ # (batch, heads, source_length, target_length)
249
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
250
+
251
+ if attn.group_norm is not None:
252
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
253
+
254
+ query = attn.to_q(hidden_states)
255
+
256
+ if encoder_hidden_states is None:
257
+ encoder_hidden_states = hidden_states # B, N, C
258
+ else:
259
+ encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,sequence_length,channel).reshape(-1,(self.id_length+1) * sequence_length,channel)
260
+
261
+ key = attn.to_k(encoder_hidden_states)
262
+ value = attn.to_v(encoder_hidden_states)
263
+
264
+ inner_dim = key.shape[-1]
265
+ head_dim = inner_dim // attn.heads
266
+
267
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
268
+
269
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
270
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
271
+
272
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
273
+ # TODO: add support for attn.scale when we move to Torch 2.1
274
+ hidden_states = F.scaled_dot_product_attention(
275
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
276
+ )
277
+
278
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
279
+ hidden_states = hidden_states.to(query.dtype)
280
+
281
+ # linear proj
282
+ hidden_states = attn.to_out[0](hidden_states)
283
+ # dropout
284
+ hidden_states = attn.to_out[1](hidden_states)
285
+
286
+ if input_ndim == 4:
287
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
288
+
289
+ if attn.residual_connection:
290
+ hidden_states = hidden_states + residual
291
+
292
+ hidden_states = hidden_states / attn.rescale_output_factor
293
+
294
+ return hidden_states
295
+
296
+ def set_attention_processor(unet,id_length,is_ipadapter = False):
297
+ global total_count
298
+ total_count = 0
299
+ attn_procs = {}
300
+ for name in unet.attn_processors.keys():
301
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
302
+ if name.startswith("mid_block"):
303
+ hidden_size = unet.config.block_out_channels[-1]
304
+ elif name.startswith("up_blocks"):
305
+ block_id = int(name[len("up_blocks.")])
306
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
307
+ elif name.startswith("down_blocks"):
308
+ block_id = int(name[len("down_blocks.")])
309
+ hidden_size = unet.config.block_out_channels[block_id]
310
+ if cross_attention_dim is None:
311
+ if name.startswith("up_blocks") :
312
+ attn_procs[name] = SpatialAttnProcessor2_0(id_length = id_length)
313
+ total_count +=1
314
+ else:
315
+ attn_procs[name] = AttnProcessor()
316
+ else:
317
+ if is_ipadapter:
318
+ attn_procs[name] = IPAttnProcessor2_0(
319
+ hidden_size=hidden_size,
320
+ cross_attention_dim=cross_attention_dim,
321
+ scale=1,
322
+ num_tokens=4,
323
+ ).to(unet.device, dtype=torch.float16)
324
+ else:
325
+ attn_procs[name] = AttnProcessor()
326
+
327
+ unet.set_attn_processor(copy.deepcopy(attn_procs))
328
+ print("successsfully load paired self-attention")
329
+ print(f"number of the processor : {total_count}")
330
+ #################################################
331
+ #################################################
332
+ canvas_html = "<div id='canvas-root' style='max-width:400px; margin: 0 auto'></div>"
333
+ load_js = """
334
+ async () => {
335
+ const url = "https://huggingface.co/datasets/radames/gradio-components/raw/main/sketch-canvas.js"
336
+ fetch(url)
337
+ .then(res => res.text())
338
+ .then(text => {
339
+ const script = document.createElement('script');
340
+ script.type = "module"
341
+ script.src = URL.createObjectURL(new Blob([text], { type: 'application/javascript' }));
342
+ document.head.appendChild(script);
343
+ });
344
+ }
345
+ """
346
+
347
+ get_js_colors = """
348
+ async (canvasData) => {
349
+ const canvasEl = document.getElementById("canvas-root");
350
+ return [canvasEl._data]
351
+ }
352
+ """
353
+
354
+ css = '''
355
+ #color-bg{display:flex;justify-content: center;align-items: center;}
356
+ .color-bg-item{width: 100%; height: 32px}
357
+ #main_button{width:100%}
358
+ <style>
359
+ '''
360
+
361
+
362
+ #################################################
363
+ title = r"""
364
+ <h1 align="center">StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation</h1>
365
+ """
366
+
367
+ description = r"""
368
+ <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/HVision-NKU/StoryDiffusion' target='_blank'><b>StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation</b></a>.<br>
369
+ ❗️❗️❗️[<b>Important</b>] Personalization steps:<br>
370
+ 1️⃣ Enter a Textual Description for Character, if you add the Ref-Image, making sure to <b>follow the class word</b> you want to customize with the <b>trigger word</b>: `img`, such as: `man img` or `woman img` or `girl img`.<br>
371
+ 2️⃣ Enter the prompt array, each line corrsponds to one generated image.<br>
372
+ 3️⃣ Choose your preferred style template.<br>
373
+ 4️⃣ Click the <b>Submit</b> button to start customizing.
374
+ """
375
+
376
+ article = r"""
377
+
378
+ If StoryDiffusion is helpful, please help to ⭐ the <a href='https://github.com/HVision-NKU/StoryDiffusion' target='_blank'>Github Repo</a>. Thanks!
379
+ [![GitHub Stars](https://img.shields.io/github/stars/HVision-NKU/StoryDiffusion?style=social)](https://github.com/HVision-NKU/StoryDiffusion)
380
+ ---
381
+ 📝 **Citation**
382
+ <br>
383
+ If our work is useful for your research, please consider citing:
384
+
385
+ ```bibtex
386
+ @article{Zhou2024storydiffusion,
387
+ title={StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation},
388
+ author={Zhou, Yupeng and Zhou, Daquan and Cheng, Ming-Ming and Feng, Jiashi and Hou, Qibin},
389
+ year={2024}
390
+ }
391
+ ```
392
+ 📋 **License**
393
+ <br>
394
+ The Contents you create are under Apache-2.0 LICENSE. The Code are under Attribution-NonCommercial 4.0 International.
395
+
396
+ 📧 **Contact**
397
+ <br>
398
+ If you have any questions, please feel free to reach me out at <b>ypzhousdu@gmail.com</b>.
399
+ """
400
+ version = r"""
401
+ <h3 align="center">StoryDiffusion Version 0.01 (test version)</h3>
402
+
403
+ <h5 >1. Support image ref image. (Cartoon Ref image is not support now)</h5>
404
+ <h5 >2. Support Typesetting Style and Captioning.(By default, the prompt is used as the caption for each image. If you need to change the caption, add a # at the end of each line. Only the part after the # will be added as a caption to the image.)</h5>
405
+ <h5 >3. [NC]symbol (The [NC] symbol is used as a flag to indicate that no characters should be present in the generated scene images. If you want do that, prepend the "[NC]" at the beginning of the line. For example, to generate a scene of falling leaves without any character, write: "[NC] The leaves are falling."),Currently, support is only using Textual Description</h5>
406
+ <h5 align="center">Tips: Not Ready Now! Just Test</h5>
407
+ """
408
+ #################################################
409
+ global attn_count, total_count, id_length, total_length,cur_step, cur_model_type
410
+ global write
411
+ global sa32, sa64
412
+ global height,width
413
+ attn_count = 0
414
+ total_count = 0
415
+ cur_step = 0
416
+ id_length = 4
417
+ total_length = 5
418
+ cur_model_type = ""
419
+ device="cuda"
420
+ global attn_procs,unet
421
+ attn_procs = {}
422
+ ###
423
+ write = False
424
+ ###
425
+ sa32 = 0.5
426
+ sa64 = 0.5
427
+ height = 768
428
+ width = 768
429
+ ###
430
+ global sd_model_path
431
+ sd_model_path = models_dict["Unstable"]#"SG161222/RealVisXL_V4.0"
432
+ use_safetensors= False
433
+ ### LOAD Stable Diffusion Pipeline
434
+ pipe1 = StableDiffusionXLPipeline.from_pretrained(sd_model_path, torch_dtype=torch.float16, use_safetensors= use_safetensors)
435
+ pipe1 = pipe1.to("cuda")
436
+ pipe1.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
437
+ # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
438
+ pipe1.scheduler.set_timesteps(50)
439
+ ###
440
+ pipe2 = PhotoMakerStableDiffusionXLPipeline.from_pretrained(
441
+ sd_model_path, torch_dtype=torch.float16, use_safetensors=use_safetensors)
442
+ pipe2 = pipe2.to("cuda")
443
+ pipe2.load_photomaker_adapter(
444
+ os.path.dirname(photomaker_path),
445
+ subfolder="",
446
+ weight_name=os.path.basename(photomaker_path),
447
+ trigger_word="img" # define the trigger word
448
+ )
449
+ pipe2 = pipe2.to("cuda")
450
+ pipe2.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
451
+ pipe2.fuse_lora()
452
+
453
+ ######### Gradio Fuction #############
454
+
455
+ def swap_to_gallery(images):
456
+ return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False)
457
+
458
+ def upload_example_to_gallery(images, prompt, style, negative_prompt):
459
+ return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False)
460
+
461
+ def remove_back_to_files():
462
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
463
+
464
+ def remove_tips():
465
+ return gr.update(visible=False)
466
+
467
+ def apply_style_positive(style_name: str, positive: str):
468
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
469
+ return p.replace("{prompt}", positive)
470
+
471
+ def apply_style(style_name: str, positives: list, negative: str = ""):
472
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
473
+ return [p.replace("{prompt}", positive) for positive in positives], n + ' ' + negative
474
+
475
+ def change_visiale_by_model_type(_model_type):
476
+ if _model_type == "Only Using Textual Description":
477
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
478
+ elif _model_type == "Using Ref Images":
479
+ return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)
480
+ else:
481
+ raise ValueError("Invalid model type",_model_type)
482
+
483
+
484
+ ######### Image Generation ##############
485
+ @spaces.GPU
486
+ def process_generation(_sd_type,_model_type,_upload_images, _num_steps,style_name, _Ip_Adapter_Strength ,_style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt,prompt_array,G_height,G_width,_comic_type):
487
+ _model_type = "Photomaker" if _model_type == "Using Ref Images" else "original"
488
+ if _model_type == "Photomaker" and "img" not in general_prompt:
489
+ raise gr.Error("Please add the triger word \" img \" behind the class word you want to customize, such as: man img or woman img")
490
+ if _upload_images is None and _model_type != "original":
491
+ raise gr.Error(f"Cannot find any input face image!")
492
+ global sa32, sa64,id_length,total_length,attn_procs,unet,cur_model_type,device
493
+ global write
494
+ global cur_step,attn_count
495
+ global height,width
496
+ height = G_height
497
+ width = G_width
498
+ global pipe1,pipe2
499
+ global sd_model_path,models_dict
500
+ sd_model_path = models_dict[_sd_type]
501
+ use_safe_tensor = True
502
+ if _model_type == "original":
503
+ pipe = pipe1
504
+ set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
505
+ elif _model_type == "Photomaker":
506
+ pipe = pipe2
507
+ set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
508
+ else:
509
+ raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
510
+ ##### ########################
511
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
512
+ pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
513
+ cur_model_type = _sd_type+"-"+_model_type+""+str(id_length_)
514
+ if _model_type != "original":
515
+ input_id_images = []
516
+ for img in _upload_images:
517
+ print(img)
518
+ input_id_images.append(load_image(img))
519
+ prompts = prompt_array.splitlines()
520
+ start_merge_step = int(float(_style_strength_ratio) / 100 * _num_steps)
521
+ if start_merge_step > 30:
522
+ start_merge_step = 30
523
+ print(f"start_merge_step:{start_merge_step}")
524
+ generator = torch.Generator(device="cuda").manual_seed(seed_)
525
+ sa32, sa64 = sa32_, sa64_
526
+ id_length = id_length_
527
+ clipped_prompts = prompts[:]
528
+ prompts = [general_prompt + "," + prompt if "[NC]" not in prompt else prompt.replace("[NC]","") for prompt in clipped_prompts]
529
+ prompts = [prompt.rpartition('#')[0] if "#" in prompt else prompt for prompt in prompts]
530
+ print(prompts)
531
+ id_prompts = prompts[:id_length]
532
+ real_prompts = prompts[id_length:]
533
+ torch.cuda.empty_cache()
534
+ write = True
535
+ cur_step = 0
536
+
537
+ attn_count = 0
538
+ id_prompts, negative_prompt = apply_style(style_name, id_prompts, negative_prompt)
539
+ setup_seed(seed_)
540
+ total_results = []
541
+ if _model_type == "original":
542
+ id_images = pipe(id_prompts, num_inference_steps=_num_steps, guidance_scale=guidance_scale, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images
543
+ elif _model_type == "Photomaker":
544
+ id_images = pipe(id_prompts,input_id_images=input_id_images, num_inference_steps=_num_steps, guidance_scale=guidance_scale, start_merge_step = start_merge_step, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images
545
+ else:
546
+ raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
547
+ total_results = id_images + total_results
548
+ yield total_results
549
+ real_images = []
550
+ write = False
551
+ for real_prompt in real_prompts:
552
+ setup_seed(seed_)
553
+ cur_step = 0
554
+ real_prompt = apply_style_positive(style_name, real_prompt)
555
+ if _model_type == "original":
556
+ real_images.append(pipe(real_prompt, num_inference_steps=_num_steps, guidance_scale=guidance_scale, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images[0])
557
+ elif _model_type == "Photomaker":
558
+ real_images.append(pipe(real_prompt, input_id_images=input_id_images, num_inference_steps=_num_steps, guidance_scale=guidance_scale, start_merge_step = start_merge_step, height = height, width = width,negative_prompt = negative_prompt,generator = generator).images[0])
559
+ else:
560
+ raise NotImplementedError("You should choice between original and Photomaker!",f"But you choice {_model_type}")
561
+ total_results = [real_images[-1]] + total_results
562
+ yield total_results
563
+ if _comic_type != "No typesetting (default)":
564
+ captions= prompt_array.splitlines()
565
+ captions = [caption.replace("[NC]","") for caption in captions]
566
+ captions = [caption.split('#')[-1] if "#" in caption else caption for caption in captions]
567
+ from PIL import ImageFont
568
+ total_results = get_comic(id_images + real_images, _comic_type,captions= captions,font=ImageFont.truetype("./fonts/Inkfree.ttf", int(45))) + total_results
569
+ set_attention_processor(pipe.unet,id_length_,is_ipadapter = False)
570
+ yield total_results
571
+
572
+
573
+
574
+ def array2string(arr):
575
+ stringtmp = ""
576
+ for i,part in enumerate(arr):
577
+ if i != len(arr)-1:
578
+ stringtmp += part +"\n"
579
+ else:
580
+ stringtmp += part
581
+
582
+ return stringtmp
583
+
584
+
585
+ #################################################
586
+ #################################################
587
+ ### define the interface
588
+ with gr.Blocks(css=css) as demo:
589
+ binary_matrixes = gr.State([])
590
+ color_layout = gr.State([])
591
+
592
+ # gr.Markdown(logo)
593
+ gr.Markdown(title)
594
+ gr.Markdown(description)
595
+
596
+ with gr.Row():
597
+ with gr.Group(elem_id="main-image"):
598
+ # button_run = gr.Button("generate id images ! 😺", elem_id="main_button", interactive=True)
599
+
600
+ prompts = []
601
+ colors = []
602
+ # with gr.Column(visible=False) as post_sketch:
603
+ # for n in range(MAX_COLORS):
604
+ # if n == 0 :
605
+ # with gr.Row(visible=False) as color_row[n]:
606
+ # colors.append(gr.Image(shape=(100, 100), label="background", type="pil", image_mode="RGB", width=100, height=100))
607
+ # prompts.append(gr.Textbox(label="Prompt for the background (white region)", value=""))
608
+ # else:
609
+ # with gr.Row(visible=False) as color_row[n]:
610
+ # colors.append(gr.Image(shape=(100, 100), label="segment "+str(n), type="pil", image_mode="RGB", width=100, height=100))
611
+ # prompts.append(gr.Textbox(label="Prompt for the segment "+str(n)))
612
+
613
+ # get_genprompt_run = gr.Button("(2) I've finished segment labeling ! 😺", elem_id="prompt_button", interactive=True)
614
+
615
+ with gr.Column(visible=True) as gen_prompt_vis:
616
+ sd_type = gr.Dropdown(choices=list(models_dict.keys()), value = "Unstable",label="sd_type", info="Select pretrained model")
617
+ model_type = gr.Radio(["Only Using Textual Description", "Using Ref Images"], label="model_type", value = "Only Using Textual Description", info="Control type of the Character")
618
+ with gr.Group(visible=False) as control_image_input:
619
+ files = gr.Files(
620
+ label="Drag (Select) 1 or more photos of your face",
621
+ file_types=["image"],
622
+ )
623
+ uploaded_files = gr.Gallery(label="Your images", visible=False, columns=5, rows=1, height=200)
624
+ with gr.Column(visible=False) as clear_button:
625
+ remove_and_reupload = gr.ClearButton(value="Remove and upload new ones", components=files, size="sm")
626
+ general_prompt = gr.Textbox(value='', label="(1) Textual Description for Character", interactive=True)
627
+ negative_prompt = gr.Textbox(value='', label="(2) Negative_prompt", interactive=True)
628
+ style = gr.Dropdown(label="Style template", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
629
+ prompt_array = gr.Textbox(lines = 3,value='', label="(3) Comic Description (each line corresponds to a frame).", interactive=True)
630
+ with gr.Accordion("(4) Tune the hyperparameters", open=True):
631
+ #sa16_ = gr.Slider(label=" (The degree of Paired Attention at 16 x 16 self-attention layers) ", minimum=0, maximum=1., value=0.3, step=0.1)
632
+ sa32_ = gr.Slider(label=" (The degree of Paired Attention at 32 x 32 self-attention layers) ", minimum=0, maximum=1., value=0.7, step=0.1)
633
+ sa64_ = gr.Slider(label=" (The degree of Paired Attention at 64 x 64 self-attention layers) ", minimum=0, maximum=1., value=0.7, step=0.1)
634
+ id_length_ = gr.Slider(label= "Number of id images in total images" , minimum=2, maximum=4, value=2, step=1)
635
+ # total_length_ = gr.Slider(label= "Number of total images", minimum=1, maximum=20, value=1, step=1)
636
+ seed_ = gr.Slider(label="Seed", minimum=-1, maximum=MAX_SEED, value=0, step=1)
637
+ num_steps = gr.Slider(
638
+ label="Number of sample steps",
639
+ minimum=20,
640
+ maximum=100,
641
+ step=1,
642
+ value=50,
643
+ )
644
+ G_height = gr.Slider(
645
+ label="height",
646
+ minimum=256,
647
+ maximum=1024,
648
+ step=32,
649
+ value=768,
650
+ )
651
+ G_width = gr.Slider(
652
+ label="width",
653
+ minimum=256,
654
+ maximum=1024,
655
+ step=32,
656
+ value=768,
657
+ )
658
+ comic_type = gr.Radio(["No typesetting (default)", "Four Pannel", "Classic Comic Style"], value = "Classic Comic Style", label="Typesetting Style", info="Select the typesetting style ")
659
+ guidance_scale = gr.Slider(
660
+ label="Guidance scale",
661
+ minimum=0.1,
662
+ maximum=10.0,
663
+ step=0.1,
664
+ value=5,
665
+ )
666
+ style_strength_ratio = gr.Slider(
667
+ label="Style strength of Ref Image (%)",
668
+ minimum=15,
669
+ maximum=50,
670
+ step=1,
671
+ value=20,
672
+ visible=False
673
+ )
674
+ Ip_Adapter_Strength = gr.Slider(
675
+ label="Ip_Adapter_Strength",
676
+ minimum=0,
677
+ maximum=1,
678
+ step=0.1,
679
+ value=0.5,
680
+ visible=False
681
+ )
682
+ final_run_btn = gr.Button("Generate ! 😺")
683
+
684
+
685
+ with gr.Column():
686
+ out_image = gr.Gallery(label="Result", columns=2, height='auto')
687
+ generated_information = gr.Markdown(label="Generation Details", value="",visible=False)
688
+ gr.Markdown(version)
689
+ model_type.change(fn = change_visiale_by_model_type , inputs = model_type, outputs=[control_image_input,style_strength_ratio,Ip_Adapter_Strength])
690
+ files.upload(fn=swap_to_gallery, inputs=files, outputs=[uploaded_files, clear_button, files])
691
+ remove_and_reupload.click(fn=remove_back_to_files, outputs=[uploaded_files, clear_button, files])
692
+
693
+ final_run_btn.click(fn=set_text_unfinished, outputs = generated_information
694
+ ).then(process_generation, inputs=[sd_type,model_type,files, num_steps,style, Ip_Adapter_Strength,style_strength_ratio, guidance_scale, seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt, prompt_array,G_height,G_width,comic_type], outputs=out_image
695
+ ).then(fn=set_text_finished,outputs = generated_information)
696
+
697
+
698
+ gr.Examples(
699
+ examples=[
700
+ [1,0.5,0.5,3,"a woman img, wearing a white T-shirt, blue loose hair",
701
+ "bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
702
+ array2string(["wake up in the bed",
703
+ "have breakfast",
704
+ "is on the road, go to company",
705
+ "work in the company",
706
+ "Take a walk next to the company at noon",
707
+ "lying in bed at night"]),
708
+ "Japanese Anime", "Using Ref Images",get_image_path_list('./examples/taylor'),768,768
709
+ ],
710
+ [0,0.5,0.5,2,"a man, wearing black jacket",
711
+ "bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
712
+ array2string(["wake up in the bed",
713
+ "have breakfast",
714
+ "is on the road, go to the company, close look",
715
+ "work in the company",
716
+ "laughing happily",
717
+ "lying in bed at night"
718
+ ]),
719
+ "Japanese Anime","Only Using Textual Description",get_image_path_list('./examples/taylor'),768,768
720
+ ],
721
+ [0,0.3,0.5,2,"a girl, wearing white shirt, black skirt, black tie, yellow hair",
722
+ "bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
723
+ array2string([
724
+ "at home #at home, began to go to drawing",
725
+ "sitting alone on a park bench.",
726
+ "reading a book on a park bench.",
727
+ "[NC]A squirrel approaches, peeking over the bench. ",
728
+ "look around in the park. # She looks around and enjoys the beauty of nature.",
729
+ "[NC]leaf falls from the tree, landing on the sketchbook.",
730
+ "picks up the leaf, examining its details closely.",
731
+ "starts sketching the leaf with intricate lines.",
732
+ "holds up the sketch drawing of the leaf.",
733
+ "[NC]The brown squirrel appear.",
734
+ "is very happy # She is very happy to see the squirrel again",
735
+ "[NC]The brown squirrel takes the cracker and scampers up a tree. # She gives the squirrel cracker",
736
+ "laughs and tucks the leaf into her book as a keepsake.",
737
+ "ready to leave.",]),
738
+ "Japanese Anime","Only Using Textual Description",get_image_path_list('./examples/taylor'),768,768
739
+ ]
740
+ ],
741
+ inputs=[seed_, sa32_, sa64_, id_length_, general_prompt, negative_prompt, prompt_array,style,model_type,files,G_height,G_width],
742
+ # outputs=[post_sketch, binary_matrixes, *color_row, *colors, *prompts, gen_prompt_vis, general_prompt, seed_],
743
+ # run_on_click=True,
744
+ label='😺 Examples 😺',
745
+ )
746
+ gr.Markdown(article)
747
+
748
+ # demo.load(None, None, None, _js=load_js)
749
+
750
+ demo.launch(server_name="0.0.0.0", share = True if use_va else False)
cog.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for Cog ⚙️
2
+ # Reference: https://cog.run/yaml
3
+
4
+ build:
5
+ gpu: true
6
+ system_packages:
7
+ - "libgl1-mesa-glx"
8
+ - "libglib2.0-0"
9
+ python_version: "3.11"
10
+ python_packages:
11
+ - xformers==0.0.20
12
+ - torch==2.0.1
13
+ - torchvision==0.15.2
14
+ - diffusers==0.25.0
15
+ - transformers==4.36.2
16
+ - gradio==3.48.0
17
+ - accelerate
18
+ - safetensors
19
+ - peft
20
+ - Pillow==9.5.0
21
+ run:
22
+ - curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.6.0/pget_linux_x86_64" && chmod +x /usr/local/bin/pget
23
+ predict: "predict.py:Predictor"
config/models.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Juggernaut:
2
+ path: "https://huggingface.co/RunDiffusion/Juggernaut-XL-v9/blob/main/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors"
3
+ single_files: true ### if true, is a civitai model
4
+ use_safetensors: true
5
+
6
+ RealVision:
7
+ path: "SG161222/RealVisXL_V4.0"
8
+ single_files: false
9
+ use_safetensors: true
10
+
11
+ SDXL:
12
+ path: "stabilityai/stable-diffusion-xl-base-1.0"
13
+ single_files: false
14
+ use_safetensors: true
15
+
16
+ Unstable:
17
+ path: "stablediffusionapi/sdxl-unstable-diffusers-y"
18
+ single_files: false
19
+ use_safetensors: false
examples/Robert/images.jpeg ADDED
examples/lecun/yann-lecun2.png ADDED
examples/taylor/1-1.png ADDED
examples/twoperson/1.jpeg ADDED
examples/twoperson/2.png ADDED
fonts/Inkfree.ttf ADDED
Binary file (41.2 kB). View file
 
fonts/ShadowsIntoLightTwo-Regular.ttf ADDED
Binary file (35.9 kB). View file
 
fonts/iCielPequena-English.otf ADDED
Binary file (46.6 kB). View file
 
gradio_app_sdxl_specific_id_low_vram.py ADDED
@@ -0,0 +1,1346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from this import d
2
+ import gradio as gr
3
+ import numpy as np
4
+ import torch
5
+ import gc
6
+ import copy
7
+ import os
8
+ import random
9
+ import datetime
10
+ from PIL import ImageFont
11
+ from utils.gradio_utils import (
12
+ character_to_dict,
13
+ process_original_prompt,
14
+ get_ref_character,
15
+ cal_attn_mask_xl,
16
+ cal_attn_indice_xl_effcient_memory,
17
+ is_torch2_available,
18
+ )
19
+
20
+ if is_torch2_available():
21
+ from utils.gradio_utils import AttnProcessor2_0 as AttnProcessor
22
+ else:
23
+ from utils.gradio_utils import AttnProcessor
24
+ from huggingface_hub import hf_hub_download
25
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl import (
26
+ StableDiffusionXLPipeline,
27
+ )
28
+ from diffusers.schedulers.scheduling_ddim import DDIMScheduler
29
+ import torch.nn.functional as F
30
+ from diffusers.utils.loading_utils import load_image
31
+ from utils.utils import get_comic
32
+ from utils.style_template import styles
33
+ from utils.load_models_utils import get_models_dict, load_models
34
+
35
+ STYLE_NAMES = list(styles.keys())
36
+ DEFAULT_STYLE_NAME = "Japanese Anime"
37
+ global models_dict
38
+
39
+ models_dict = get_models_dict()
40
+
41
+ # Automatically select the device
42
+ device = (
43
+ "cuda"
44
+ if torch.cuda.is_available()
45
+ else "mps" if torch.backends.mps.is_available() else "cpu"
46
+ )
47
+ print(f"@@device:{device}")
48
+
49
+
50
+ # check if the file exists locally at a specified path before downloading it.
51
+ # if the file doesn't exist, it uses `hf_hub_download` to download the file
52
+ # and optionally move it to a specific directory. If the file already
53
+ # exists, it simply uses the local path.
54
+ local_dir = "data/"
55
+ photomaker_local_path = f"{local_dir}photomaker-v1.bin"
56
+ if not os.path.exists(photomaker_local_path):
57
+ photomaker_path = hf_hub_download(
58
+ repo_id="TencentARC/PhotoMaker",
59
+ filename="photomaker-v1.bin",
60
+ repo_type="model",
61
+ local_dir=local_dir,
62
+ )
63
+ else:
64
+ photomaker_path = photomaker_local_path
65
+
66
+ MAX_SEED = np.iinfo(np.int32).max
67
+
68
+
69
+ def setup_seed(seed):
70
+ torch.manual_seed(seed)
71
+ if device == "cuda":
72
+ torch.cuda.manual_seed_all(seed)
73
+ np.random.seed(seed)
74
+ random.seed(seed)
75
+ torch.backends.cudnn.deterministic = True
76
+
77
+
78
+ def set_text_unfinished():
79
+ return gr.update(
80
+ visible=True,
81
+ value="<h3>(Not Finished) Generating ··· The intermediate results will be shown.</h3>",
82
+ )
83
+
84
+
85
+ def set_text_finished():
86
+ return gr.update(visible=True, value="<h3>Generation Finished</h3>")
87
+
88
+
89
+ #################################################
90
+ def get_image_path_list(folder_name):
91
+ image_basename_list = os.listdir(folder_name)
92
+ image_path_list = sorted(
93
+ [os.path.join(folder_name, basename) for basename in image_basename_list]
94
+ )
95
+ return image_path_list
96
+
97
+
98
+ #################################################
99
+ class SpatialAttnProcessor2_0(torch.nn.Module):
100
+ r"""
101
+ Attention processor for IP-Adapater for PyTorch 2.0.
102
+ Args:
103
+ hidden_size (`int`):
104
+ The hidden size of the attention layer.
105
+ cross_attention_dim (`int`):
106
+ The number of channels in the `encoder_hidden_states`.
107
+ text_context_len (`int`, defaults to 77):
108
+ The context length of the text features.
109
+ scale (`float`, defaults to 1.0):
110
+ the weight scale of image prompt.
111
+ """
112
+
113
+ def __init__(
114
+ self,
115
+ hidden_size=None,
116
+ cross_attention_dim=None,
117
+ id_length=4,
118
+ device=device,
119
+ dtype=torch.float16,
120
+ ):
121
+ super().__init__()
122
+ if not hasattr(F, "scaled_dot_product_attention"):
123
+ raise ImportError(
124
+ "AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
125
+ )
126
+ self.device = device
127
+ self.dtype = dtype
128
+ self.hidden_size = hidden_size
129
+ self.cross_attention_dim = cross_attention_dim
130
+ self.total_length = id_length + 1
131
+ self.id_length = id_length
132
+ self.id_bank = {}
133
+
134
+ def __call__(
135
+ self,
136
+ attn,
137
+ hidden_states,
138
+ encoder_hidden_states=None,
139
+ attention_mask=None,
140
+ temb=None,
141
+ ):
142
+ # un_cond_hidden_states, cond_hidden_states = hidden_states.chunk(2)
143
+ # un_cond_hidden_states = self.__call2__(attn, un_cond_hidden_states,encoder_hidden_states,attention_mask,temb)
144
+ # 生成一个0到1之间的随机数
145
+ global total_count, attn_count, cur_step, indices1024, indices4096
146
+ global sa32, sa64
147
+ global write
148
+ global height, width
149
+ global character_dict, character_index_dict, invert_character_index_dict, cur_character, ref_indexs_dict, ref_totals, cur_character
150
+ if attn_count == 0 and cur_step == 0:
151
+ indices1024, indices4096 = cal_attn_indice_xl_effcient_memory(
152
+ self.total_length,
153
+ self.id_length,
154
+ sa32,
155
+ sa64,
156
+ height,
157
+ width,
158
+ device=self.device,
159
+ dtype=self.dtype,
160
+ )
161
+ if write:
162
+ assert len(cur_character) == 1
163
+ if hidden_states.shape[1] == (height // 32) * (width // 32):
164
+ indices = indices1024
165
+ else:
166
+ indices = indices4096
167
+ # print(f"white:{cur_step}")
168
+ total_batch_size, nums_token, channel = hidden_states.shape
169
+ img_nums = total_batch_size // 2
170
+ hidden_states = hidden_states.reshape(-1, img_nums, nums_token, channel)
171
+ # print(img_nums,len(indices),hidden_states.shape,self.total_length)
172
+ if cur_character[0] not in self.id_bank:
173
+ self.id_bank[cur_character[0]] = {}
174
+ self.id_bank[cur_character[0]][cur_step] = [
175
+ hidden_states[:, img_ind, indices[img_ind], :]
176
+ .reshape(2, -1, channel)
177
+ .clone()
178
+ for img_ind in range(img_nums)
179
+ ]
180
+ hidden_states = hidden_states.reshape(-1, nums_token, channel)
181
+ # self.id_bank[cur_step] = [hidden_states[:self.id_length].clone(), hidden_states[self.id_length:].clone()]
182
+ else:
183
+ # encoder_hidden_states = torch.cat((self.id_bank[cur_step][0].to(self.device),self.id_bank[cur_step][1].to(self.device)))
184
+ # TODO: ADD Multipersion Control
185
+ encoder_arr = []
186
+ for character in cur_character:
187
+ encoder_arr = encoder_arr + [
188
+ tensor.to(self.device)
189
+ for tensor in self.id_bank[character][cur_step]
190
+ ]
191
+ # 判断随机数是否大于0.5
192
+ if cur_step < 1:
193
+ hidden_states = self.__call2__(
194
+ attn, hidden_states, None, attention_mask, temb
195
+ )
196
+ else: # 256 1024 4096
197
+ random_number = random.random()
198
+ if cur_step < 20:
199
+ rand_num = 0.3
200
+ else:
201
+ rand_num = 0.1
202
+ # print(f"hidden state shape {hidden_states.shape[1]}")
203
+ if random_number > rand_num:
204
+ if hidden_states.shape[1] == (height // 32) * (width // 32):
205
+ indices = indices1024
206
+ else:
207
+ indices = indices4096
208
+ # print("before attention",hidden_states.shape,attention_mask.shape,encoder_hidden_states.shape if encoder_hidden_states is not None else "None")
209
+ if write:
210
+ total_batch_size, nums_token, channel = hidden_states.shape
211
+ img_nums = total_batch_size // 2
212
+ hidden_states = hidden_states.reshape(
213
+ -1, img_nums, nums_token, channel
214
+ )
215
+ encoder_arr = [
216
+ hidden_states[:, img_ind, indices[img_ind], :].reshape(
217
+ 2, -1, channel
218
+ )
219
+ for img_ind in range(img_nums)
220
+ ]
221
+ for img_ind in range(img_nums):
222
+ # print(img_nums)
223
+ # assert img_nums != 1
224
+ img_ind_list = [i for i in range(img_nums)]
225
+ # print(img_ind_list,img_ind)
226
+ img_ind_list.remove(img_ind)
227
+ # print(img_ind,invert_character_index_dict[img_ind])
228
+ # print(character_index_dict[invert_character_index_dict[img_ind]])
229
+ # print(img_ind_list)
230
+ # print(img_ind,img_ind_list)
231
+ encoder_hidden_states_tmp = torch.cat(
232
+ [encoder_arr[img_ind] for img_ind in img_ind_list]
233
+ + [hidden_states[:, img_ind, :, :]],
234
+ dim=1,
235
+ )
236
+
237
+ hidden_states[:, img_ind, :, :] = self.__call2__(
238
+ attn,
239
+ hidden_states[:, img_ind, :, :],
240
+ encoder_hidden_states_tmp,
241
+ None,
242
+ temb,
243
+ )
244
+ else:
245
+ _, nums_token, channel = hidden_states.shape
246
+ # img_nums = total_batch_size // 2
247
+ # encoder_hidden_states = encoder_hidden_states.reshape(-1,img_nums,nums_token,channel)
248
+ hidden_states = hidden_states.reshape(2, -1, nums_token, channel)
249
+ # print(len(indices))
250
+ # encoder_arr = [encoder_hidden_states[:,img_ind,indices[img_ind],:].reshape(2,-1,channel) for img_ind in range(img_nums)]
251
+ encoder_hidden_states_tmp = torch.cat(
252
+ encoder_arr + [hidden_states[:, 0, :, :]], dim=1
253
+ )
254
+ # print(len(encoder_arr),encoder_hidden_states_tmp.shape)
255
+ hidden_states[:, 0, :, :] = self.__call2__(
256
+ attn,
257
+ hidden_states[:, 0, :, :],
258
+ encoder_hidden_states_tmp,
259
+ None,
260
+ temb,
261
+ )
262
+ hidden_states = hidden_states.reshape(-1, nums_token, channel)
263
+ else:
264
+ hidden_states = self.__call2__(
265
+ attn, hidden_states, None, attention_mask, temb
266
+ )
267
+ attn_count += 1
268
+ if attn_count == total_count:
269
+ attn_count = 0
270
+ cur_step += 1
271
+ indices1024, indices4096 = cal_attn_indice_xl_effcient_memory(
272
+ self.total_length,
273
+ self.id_length,
274
+ sa32,
275
+ sa64,
276
+ height,
277
+ width,
278
+ device=self.device,
279
+ dtype=self.dtype,
280
+ )
281
+
282
+ return hidden_states
283
+
284
+ def __call2__(
285
+ self,
286
+ attn,
287
+ hidden_states,
288
+ encoder_hidden_states=None,
289
+ attention_mask=None,
290
+ temb=None,
291
+ ):
292
+ residual = hidden_states
293
+
294
+ if attn.spatial_norm is not None:
295
+ hidden_states = attn.spatial_norm(hidden_states, temb)
296
+
297
+ input_ndim = hidden_states.ndim
298
+
299
+ if input_ndim == 4:
300
+ batch_size, channel, height, width = hidden_states.shape
301
+ hidden_states = hidden_states.view(
302
+ batch_size, channel, height * width
303
+ ).transpose(1, 2)
304
+
305
+ batch_size, sequence_length, channel = hidden_states.shape
306
+ # print(hidden_states.shape)
307
+ if attention_mask is not None:
308
+ attention_mask = attn.prepare_attention_mask(
309
+ attention_mask, sequence_length, batch_size
310
+ )
311
+ # scaled_dot_product_attention expects attention_mask shape to be
312
+ # (batch, heads, source_length, target_length)
313
+ attention_mask = attention_mask.view(
314
+ batch_size, attn.heads, -1, attention_mask.shape[-1]
315
+ )
316
+
317
+ if attn.group_norm is not None:
318
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(
319
+ 1, 2
320
+ )
321
+
322
+ query = attn.to_q(hidden_states)
323
+
324
+ if encoder_hidden_states is None:
325
+ encoder_hidden_states = hidden_states # B, N, C
326
+ # else:
327
+ # encoder_hidden_states = encoder_hidden_states.view(-1,self.id_length+1,sequence_length,channel).reshape(-1,(self.id_length+1) * sequence_length,channel)
328
+
329
+ key = attn.to_k(encoder_hidden_states)
330
+ value = attn.to_v(encoder_hidden_states)
331
+
332
+ inner_dim = key.shape[-1]
333
+ head_dim = inner_dim // attn.heads
334
+
335
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
336
+
337
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
338
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
339
+
340
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
341
+ # TODO: add support for attn.scale when we move to Torch 2.1
342
+ hidden_states = F.scaled_dot_product_attention(
343
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
344
+ )
345
+
346
+ hidden_states = hidden_states.transpose(1, 2).reshape(
347
+ batch_size, -1, attn.heads * head_dim
348
+ )
349
+ hidden_states = hidden_states.to(query.dtype)
350
+
351
+ # linear proj
352
+ hidden_states = attn.to_out[0](hidden_states)
353
+ # dropout
354
+ hidden_states = attn.to_out[1](hidden_states)
355
+
356
+ if input_ndim == 4:
357
+ hidden_states = hidden_states.transpose(-1, -2).reshape(
358
+ batch_size, channel, height, width
359
+ )
360
+
361
+ if attn.residual_connection:
362
+ hidden_states = hidden_states + residual
363
+
364
+ hidden_states = hidden_states / attn.rescale_output_factor
365
+
366
+ return hidden_states
367
+
368
+
369
+ def set_attention_processor(unet, id_length, is_ipadapter=False):
370
+ global attn_procs
371
+ attn_procs = {}
372
+ for name in unet.attn_processors.keys():
373
+ cross_attention_dim = (
374
+ None
375
+ if name.endswith("attn1.processor")
376
+ else unet.config.cross_attention_dim
377
+ )
378
+ if name.startswith("mid_block"):
379
+ hidden_size = unet.config.block_out_channels[-1]
380
+ elif name.startswith("up_blocks"):
381
+ block_id = int(name[len("up_blocks.")])
382
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
383
+ elif name.startswith("down_blocks"):
384
+ block_id = int(name[len("down_blocks.")])
385
+ hidden_size = unet.config.block_out_channels[block_id]
386
+ if cross_attention_dim is None:
387
+ if name.startswith("up_blocks"):
388
+ attn_procs[name] = SpatialAttnProcessor2_0(id_length=id_length)
389
+ else:
390
+ attn_procs[name] = AttnProcessor()
391
+ else:
392
+ if is_ipadapter:
393
+ attn_procs[name] = IPAttnProcessor2_0(
394
+ hidden_size=hidden_size,
395
+ cross_attention_dim=cross_attention_dim,
396
+ scale=1,
397
+ num_tokens=4,
398
+ ).to(unet.device, dtype=torch.float16)
399
+ else:
400
+ attn_procs[name] = AttnProcessor()
401
+
402
+ unet.set_attn_processor(copy.deepcopy(attn_procs))
403
+
404
+
405
+ #################################################
406
+ #################################################
407
+ canvas_html = "<div id='canvas-root' style='max-width:400px; margin: 0 auto'></div>"
408
+ load_js = """
409
+ async () => {
410
+ const url = "https://huggingface.co/datasets/radames/gradio-components/raw/main/sketch-canvas.js"
411
+ fetch(url)
412
+ .then(res => res.text())
413
+ .then(text => {
414
+ const script = document.createElement('script');
415
+ script.type = "module"
416
+ script.src = URL.createObjectURL(new Blob([text], { type: 'application/javascript' }));
417
+ document.head.appendChild(script);
418
+ });
419
+ }
420
+ """
421
+
422
+ get_js_colors = """
423
+ async (canvasData) => {
424
+ const canvasEl = document.getElementById("canvas-root");
425
+ return [canvasEl._data]
426
+ }
427
+ """
428
+
429
+ css = """
430
+ #color-bg{display:flex;justify-content: center;align-items: center;}
431
+ .color-bg-item{width: 100%; height: 32px}
432
+ #main_button{width:100%}
433
+ <style>
434
+ """
435
+
436
+
437
+ def save_single_character_weights(unet, character, description, filepath):
438
+ """
439
+ 保存 attention_processor 类中的 id_bank GPU Tensor 列表到指定文件中。
440
+ 参数:
441
+ - model: 包含 attention_processor 类实例的模型。
442
+ - filepath: 权重要保存到的文件路径。
443
+ """
444
+ weights_to_save = {}
445
+ weights_to_save["description"] = description
446
+ weights_to_save["character"] = character
447
+ for attn_name, attn_processor in unet.attn_processors.items():
448
+ if isinstance(attn_processor, SpatialAttnProcessor2_0):
449
+ # 将每个 Tensor 转到 CPU 并转为列表,以确保它可以被序列化
450
+ weights_to_save[attn_name] = {}
451
+ for step_key in attn_processor.id_bank[character].keys():
452
+ weights_to_save[attn_name][step_key] = [
453
+ tensor.cpu()
454
+ for tensor in attn_processor.id_bank[character][step_key]
455
+ ]
456
+ # 使用torch.save保存权重
457
+ torch.save(weights_to_save, filepath)
458
+
459
+
460
+ def load_single_character_weights(unet, filepath):
461
+ """
462
+ 从指定文件中加载权重到 attention_processor 类的 id_bank 中。
463
+ 参数:
464
+ - model: 包含 attention_processor 类实例的模型。
465
+ - filepath: 权重文件的路径。
466
+ """
467
+ # 使用torch.load来读取权重
468
+ weights_to_load = torch.load(filepath, map_location=torch.device("cpu"))
469
+ character = weights_to_load["character"]
470
+ description = weights_to_load["description"]
471
+ for attn_name, attn_processor in unet.attn_processors.items():
472
+ if isinstance(attn_processor, SpatialAttnProcessor2_0):
473
+ # 转移权重到GPU(如果GPU可用的话)并赋值给id_bank
474
+ attn_processor.id_bank[character] = {}
475
+ for step_key in weights_to_load[attn_name].keys():
476
+ attn_processor.id_bank[character][step_key] = [
477
+ tensor.to(unet.device)
478
+ for tensor in weights_to_load[attn_name][step_key]
479
+ ]
480
+
481
+
482
+ def save_results(unet, img_list):
483
+
484
+ timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
485
+ folder_name = f"results/{timestamp}"
486
+ weight_folder_name = f"{folder_name}/weights"
487
+ # 创建文件夹
488
+ if not os.path.exists(folder_name):
489
+ os.makedirs(folder_name)
490
+ os.makedirs(weight_folder_name)
491
+
492
+ for idx, img in enumerate(img_list):
493
+ file_path = os.path.join(folder_name, f"image_{idx}.png") # 图片文件名
494
+ img.save(file_path)
495
+ global character_dict
496
+ # for char in character_dict:
497
+ # description = character_dict[char]
498
+ # save_single_character_weights(unet,char,description,os.path.join(weight_folder_name, f'{char}.pt'))
499
+
500
+
501
+ #################################################
502
+ title = r"""
503
+ <h1 align="center">StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation</h1>
504
+ """
505
+
506
+ description = r"""
507
+ <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/HVision-NKU/StoryDiffusion' target='_blank'><b>StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation</b></a>.<br>
508
+ ❗️❗️❗️[<b>Important</b>] Personalization steps:<br>
509
+ 1️⃣ Enter a Textual Description for Character, if you add the Ref-Image, making sure to <b>follow the class word</b> you want to customize with the <b>trigger word</b>: `img`, such as: `man img` or `woman img` or `girl img`.<br>
510
+ 2️⃣ Enter the prompt array, each line corrsponds to one generated image.<br>
511
+ 3️⃣ Choose your preferred style template.<br>
512
+ 4️⃣ Click the <b>Submit</b> button to start customizing.
513
+ """
514
+
515
+ article = r"""
516
+
517
+ If StoryDiffusion is helpful, please help to ⭐ the <a href='https://github.com/HVision-NKU/StoryDiffusion' target='_blank'>Github Repo</a>. Thanks!
518
+ [![GitHub Stars](https://img.shields.io/github/stars/HVision-NKU/StoryDiffusion?style=social)](https://github.com/HVision-NKU/StoryDiffusion)
519
+ ---
520
+ 📝 **Citation**
521
+ <br>
522
+ If our work is useful for your research, please consider citing:
523
+
524
+ ```bibtex
525
+ @article{Zhou2024storydiffusion,
526
+ title={StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation},
527
+ author={Zhou, Yupeng and Zhou, Daquan and Cheng, Ming-Ming and Feng, Jiashi and Hou, Qibin},
528
+ year={2024}
529
+ }
530
+ ```
531
+ 📋 **License**
532
+ <br>
533
+ Apache-2.0 LICENSE.
534
+
535
+ 📧 **Contact**
536
+ <br>
537
+ If you have any questions, please feel free to reach me out at <b>ypzhousdu@gmail.com</b>.
538
+ """
539
+ version = r"""
540
+ <h3 align="center">StoryDiffusion Version 0.02 (test version)</h3>
541
+
542
+ <h5 >1. Support image ref image. (Cartoon Ref image is not support now)</h5>
543
+ <h5 >2. Support Typesetting Style and Captioning.(By default, the prompt is used as the caption for each image. If you need to change the caption, add a # at the end of each line. Only the part after the # will be added as a caption to the image.)</h5>
544
+ <h5 >3. [NC]symbol (The [NC] symbol is used as a flag to indicate that no characters should be present in the generated scene images. If you want do that, prepend the "[NC]" at the beginning of the line. For example, to generate a scene of falling leaves without any character, write: "[NC] The leaves are falling.")</h5>
545
+ <h5 align="center">Tips: </h4>
546
+ """
547
+ #################################################
548
+ global attn_count, total_count, id_length, total_length, cur_step, cur_model_type
549
+ global write
550
+ global sa32, sa64
551
+ global height, width
552
+ attn_count = 0
553
+ total_count = 0
554
+ cur_step = 0
555
+ id_length = 4
556
+ total_length = 5
557
+ cur_model_type = ""
558
+ global attn_procs, unet
559
+ attn_procs = {}
560
+ ###
561
+ write = False
562
+ ###
563
+ sa32 = 0.5
564
+ sa64 = 0.5
565
+ height = 768
566
+ width = 768
567
+ ###
568
+ global pipe
569
+ global sd_model_path
570
+ pipe = None
571
+ sd_model_path = models_dict["Unstable"]["path"] # "SG161222/RealVisXL_V4.0"
572
+ single_files = models_dict["Unstable"]["single_files"]
573
+ ### LOAD Stable Diffusion Pipeline
574
+ if single_files:
575
+ pipe = StableDiffusionXLPipeline.from_single_file(
576
+ sd_model_path, torch_dtype=torch.float16
577
+ )
578
+ else:
579
+ pipe = StableDiffusionXLPipeline.from_pretrained(
580
+ sd_model_path, torch_dtype=torch.float16, use_safetensors=False
581
+ )
582
+ pipe = pipe.to(device)
583
+ pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
584
+ # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
585
+ pipe.scheduler.set_timesteps(50)
586
+ pipe.enable_vae_slicing()
587
+ if device != "mps":
588
+ pipe.enable_model_cpu_offload()
589
+ unet = pipe.unet
590
+ cur_model_type = "Unstable" + "-" + "original"
591
+ ### Insert PairedAttention
592
+ for name in unet.attn_processors.keys():
593
+ cross_attention_dim = (
594
+ None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
595
+ )
596
+ if name.startswith("mid_block"):
597
+ hidden_size = unet.config.block_out_channels[-1]
598
+ elif name.startswith("up_blocks"):
599
+ block_id = int(name[len("up_blocks.")])
600
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
601
+ elif name.startswith("down_blocks"):
602
+ block_id = int(name[len("down_blocks.")])
603
+ hidden_size = unet.config.block_out_channels[block_id]
604
+ if cross_attention_dim is None and (name.startswith("up_blocks")):
605
+ attn_procs[name] = SpatialAttnProcessor2_0(id_length=id_length)
606
+ total_count += 1
607
+ else:
608
+ attn_procs[name] = AttnProcessor()
609
+ print("successsfully load paired self-attention")
610
+ print(f"number of the processor : {total_count}")
611
+ unet.set_attn_processor(copy.deepcopy(attn_procs))
612
+ global mask1024, mask4096
613
+ mask1024, mask4096 = cal_attn_mask_xl(
614
+ total_length,
615
+ id_length,
616
+ sa32,
617
+ sa64,
618
+ height,
619
+ width,
620
+ device=device,
621
+ dtype=torch.float16,
622
+ )
623
+
624
+ ######### Gradio Fuction #############
625
+
626
+
627
+ def swap_to_gallery(images):
628
+ return (
629
+ gr.update(value=images, visible=True),
630
+ gr.update(visible=True),
631
+ gr.update(visible=False),
632
+ )
633
+
634
+
635
+ def upload_example_to_gallery(images, prompt, style, negative_prompt):
636
+ return (
637
+ gr.update(value=images, visible=True),
638
+ gr.update(visible=True),
639
+ gr.update(visible=False),
640
+ )
641
+
642
+
643
+ def remove_back_to_files():
644
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
645
+
646
+
647
+ def remove_tips():
648
+ return gr.update(visible=False)
649
+
650
+
651
+ def apply_style_positive(style_name: str, positive: str):
652
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
653
+ return p.replace("{prompt}", positive)
654
+
655
+
656
+ def apply_style(style_name: str, positives: list, negative: str = ""):
657
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
658
+ return [
659
+ p.replace("{prompt}", positive) for positive in positives
660
+ ], n + " " + negative
661
+
662
+
663
+ def change_visiale_by_model_type(_model_type):
664
+ if _model_type == "Only Using Textual Description":
665
+ return (
666
+ gr.update(visible=False),
667
+ gr.update(visible=False),
668
+ gr.update(visible=False),
669
+ )
670
+ elif _model_type == "Using Ref Images":
671
+ return (
672
+ gr.update(visible=True),
673
+ gr.update(visible=True),
674
+ gr.update(visible=False),
675
+ )
676
+ else:
677
+ raise ValueError("Invalid model type", _model_type)
678
+
679
+
680
+ def load_character_files(character_files: str):
681
+ if character_files == "":
682
+ raise gr.Error("Please set a character file!")
683
+ character_files_arr = character_files.splitlines()
684
+ primarytext = []
685
+ for character_file_name in character_files_arr:
686
+ character_file = torch.load(
687
+ character_file_name, map_location=torch.device("cpu")
688
+ )
689
+ primarytext.append(character_file["character"] + character_file["description"])
690
+ return array2string(primarytext)
691
+
692
+
693
+ def load_character_files_on_running(unet, character_files: str):
694
+ if character_files == "":
695
+ return False
696
+ character_files_arr = character_files.splitlines()
697
+ for character_file in character_files_arr:
698
+ load_single_character_weights(unet, character_file)
699
+ return True
700
+
701
+
702
+ ######### Image Generation ##############
703
+ def process_generation(
704
+ _sd_type,
705
+ _model_type,
706
+ _upload_images,
707
+ _num_steps,
708
+ style_name,
709
+ _Ip_Adapter_Strength,
710
+ _style_strength_ratio,
711
+ guidance_scale,
712
+ seed_,
713
+ sa32_,
714
+ sa64_,
715
+ id_length_,
716
+ general_prompt,
717
+ negative_prompt,
718
+ prompt_array,
719
+ G_height,
720
+ G_width,
721
+ _comic_type,
722
+ font_choice,
723
+ _char_files,
724
+ ): # Corrected font_choice usage
725
+ if len(general_prompt.splitlines()) >= 3:
726
+ raise gr.Error(
727
+ "Support for more than three characters is temporarily unavailable due to VRAM limitations, but this issue will be resolved soon."
728
+ )
729
+ _model_type = "Photomaker" if _model_type == "Using Ref Images" else "original"
730
+ if _model_type == "Photomaker" and "img" not in general_prompt:
731
+ raise gr.Error(
732
+ 'Please add the triger word " img " behind the class word you want to customize, such as: man img or woman img'
733
+ )
734
+ if _upload_images is None and _model_type != "original":
735
+ raise gr.Error(f"Cannot find any input face image!")
736
+ global sa32, sa64, id_length, total_length, attn_procs, unet, cur_model_type
737
+ global write
738
+ global cur_step, attn_count
739
+ global height, width
740
+ height = G_height
741
+ width = G_width
742
+ global pipe
743
+ global sd_model_path, models_dict
744
+ sd_model_path = models_dict[_sd_type]
745
+ use_safe_tensor = True
746
+ for attn_processor in pipe.unet.attn_processors.values():
747
+ if isinstance(attn_processor, SpatialAttnProcessor2_0):
748
+ for values in attn_processor.id_bank.values():
749
+ del values
750
+ attn_processor.id_bank = {}
751
+ attn_processor.id_length = id_length
752
+ attn_processor.total_length = id_length + 1
753
+ gc.collect()
754
+ torch.cuda.empty_cache()
755
+ if cur_model_type != _sd_type + "-" + _model_type:
756
+ # apply the style template
757
+ ##### load pipe
758
+ del pipe
759
+ gc.collect()
760
+ if device == "cuda":
761
+ torch.cuda.empty_cache()
762
+ model_info = models_dict[_sd_type]
763
+ model_info["model_type"] = _model_type
764
+ pipe = load_models(model_info, device=device, photomaker_path=photomaker_path)
765
+ set_attention_processor(pipe.unet, id_length_, is_ipadapter=False)
766
+ ##### ########################
767
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
768
+ pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
769
+ cur_model_type = _sd_type + "-" + _model_type
770
+ pipe.enable_vae_slicing()
771
+ if device != "mps":
772
+ pipe.enable_model_cpu_offload()
773
+ else:
774
+ unet = pipe.unet
775
+ # unet.set_attn_processor(copy.deepcopy(attn_procs))
776
+
777
+ load_chars = load_character_files_on_running(unet, character_files=_char_files)
778
+
779
+ prompts = prompt_array.splitlines()
780
+ global character_dict, character_index_dict, invert_character_index_dict, ref_indexs_dict, ref_totals
781
+ character_dict, character_list = character_to_dict(general_prompt)
782
+
783
+ start_merge_step = int(float(_style_strength_ratio) / 100 * _num_steps)
784
+ if start_merge_step > 30:
785
+ start_merge_step = 30
786
+ print(f"start_merge_step:{start_merge_step}")
787
+ generator = torch.Generator(device=device).manual_seed(seed_)
788
+ sa32, sa64 = sa32_, sa64_
789
+ id_length = id_length_
790
+ clipped_prompts = prompts[:]
791
+ nc_indexs = []
792
+ for ind, prompt in enumerate(clipped_prompts):
793
+ if "[NC]" in prompt:
794
+ nc_indexs.append(ind)
795
+ if ind < id_length:
796
+ raise gr.Error(
797
+ f"The first {id_length} row is id prompts, cannot use [NC]!"
798
+ )
799
+ prompts = [
800
+ prompt if "[NC]" not in prompt else prompt.replace("[NC]", "")
801
+ for prompt in clipped_prompts
802
+ ]
803
+
804
+ prompts = [
805
+ prompt.rpartition("#")[0] if "#" in prompt else prompt for prompt in prompts
806
+ ]
807
+ print(prompts)
808
+ # id_prompts = prompts[:id_length]
809
+ (
810
+ character_index_dict,
811
+ invert_character_index_dict,
812
+ replace_prompts,
813
+ ref_indexs_dict,
814
+ ref_totals,
815
+ ) = process_original_prompt(character_dict, prompts.copy(), id_length)
816
+ if _model_type != "original":
817
+ input_id_images_dict = {}
818
+ if len(_upload_images) != len(character_dict.keys()):
819
+ raise gr.Error(
820
+ f"You upload images({len(_upload_images)}) is not equal to the number of characters({len(character_dict.keys())})!"
821
+ )
822
+ for ind, img in enumerate(_upload_images):
823
+ input_id_images_dict[character_list[ind]] = [load_image(img)]
824
+ print(character_dict)
825
+ print(character_index_dict)
826
+ print(invert_character_index_dict)
827
+ # real_prompts = prompts[id_length:]
828
+ if device == "cuda":
829
+ torch.cuda.empty_cache()
830
+ write = True
831
+ cur_step = 0
832
+
833
+ attn_count = 0
834
+ # id_prompts, negative_prompt = apply_style(style_name, id_prompts, negative_prompt)
835
+ # print(id_prompts)
836
+ setup_seed(seed_)
837
+ total_results = []
838
+ id_images = []
839
+ results_dict = {}
840
+ global cur_character
841
+ if not load_chars:
842
+ for character_key in character_dict.keys():
843
+ cur_character = [character_key]
844
+ ref_indexs = ref_indexs_dict[character_key]
845
+ print(character_key, ref_indexs)
846
+ current_prompts = [replace_prompts[ref_ind] for ref_ind in ref_indexs]
847
+ print(current_prompts)
848
+ setup_seed(seed_)
849
+ generator = torch.Generator(device=device).manual_seed(seed_)
850
+ cur_step = 0
851
+ cur_positive_prompts, negative_prompt = apply_style(
852
+ style_name, current_prompts, negative_prompt
853
+ )
854
+ if _model_type == "original":
855
+ id_images = pipe(
856
+ cur_positive_prompts,
857
+ num_inference_steps=_num_steps,
858
+ guidance_scale=guidance_scale,
859
+ height=height,
860
+ width=width,
861
+ negative_prompt=negative_prompt,
862
+ generator=generator,
863
+ ).images
864
+ elif _model_type == "Photomaker":
865
+ id_images = pipe(
866
+ cur_positive_prompts,
867
+ input_id_images=input_id_images_dict[character_key],
868
+ num_inference_steps=_num_steps,
869
+ guidance_scale=guidance_scale,
870
+ start_merge_step=start_merge_step,
871
+ height=height,
872
+ width=width,
873
+ negative_prompt=negative_prompt,
874
+ generator=generator,
875
+ ).images
876
+ else:
877
+ raise NotImplementedError(
878
+ "You should choice between original and Photomaker!",
879
+ f"But you choice {_model_type}",
880
+ )
881
+
882
+ # total_results = id_images + total_results
883
+ # yield total_results
884
+ print(id_images)
885
+ for ind, img in enumerate(id_images):
886
+ print(ref_indexs[ind])
887
+ results_dict[ref_indexs[ind]] = img
888
+ # real_images = []
889
+ yield [results_dict[ind] for ind in results_dict.keys()]
890
+ write = False
891
+ if not load_chars:
892
+ real_prompts_inds = [
893
+ ind for ind in range(len(prompts)) if ind not in ref_totals
894
+ ]
895
+ else:
896
+ real_prompts_inds = [ind for ind in range(len(prompts))]
897
+ print(real_prompts_inds)
898
+
899
+ for real_prompts_ind in real_prompts_inds:
900
+ real_prompt = replace_prompts[real_prompts_ind]
901
+ cur_character = get_ref_character(prompts[real_prompts_ind], character_dict)
902
+ print(cur_character, real_prompt)
903
+ setup_seed(seed_)
904
+ if len(cur_character) > 1 and _model_type == "Photomaker":
905
+ raise gr.Error(
906
+ "Temporarily Not Support Multiple character in Ref Image Mode!"
907
+ )
908
+ generator = torch.Generator(device=device).manual_seed(seed_)
909
+ cur_step = 0
910
+ real_prompt = apply_style_positive(style_name, real_prompt)
911
+ if _model_type == "original":
912
+ results_dict[real_prompts_ind] = pipe(
913
+ real_prompt,
914
+ num_inference_steps=_num_steps,
915
+ guidance_scale=guidance_scale,
916
+ height=height,
917
+ width=width,
918
+ negative_prompt=negative_prompt,
919
+ generator=generator,
920
+ ).images[0]
921
+ elif _model_type == "Photomaker":
922
+ results_dict[real_prompts_ind] = pipe(
923
+ real_prompt,
924
+ input_id_images=(
925
+ input_id_images_dict[cur_character[0]]
926
+ if real_prompts_ind not in nc_indexs
927
+ else input_id_images_dict[character_list[0]]
928
+ ),
929
+ num_inference_steps=_num_steps,
930
+ guidance_scale=guidance_scale,
931
+ start_merge_step=start_merge_step,
932
+ height=height,
933
+ width=width,
934
+ negative_prompt=negative_prompt,
935
+ generator=generator,
936
+ nc_flag=True if real_prompts_ind in nc_indexs else False,
937
+ ).images[0]
938
+ else:
939
+ raise NotImplementedError(
940
+ "You should choice between original and Photomaker!",
941
+ f"But you choice {_model_type}",
942
+ )
943
+ yield [results_dict[ind] for ind in results_dict.keys()]
944
+ total_results = [results_dict[ind] for ind in range(len(prompts))]
945
+ if _comic_type != "No typesetting (default)":
946
+ captions = prompt_array.splitlines()
947
+ captions = [caption.replace("[NC]", "") for caption in captions]
948
+ captions = [
949
+ caption.split("#")[-1] if "#" in caption else caption
950
+ for caption in captions
951
+ ]
952
+ font_path = os.path.join("fonts", font_choice)
953
+ font = ImageFont.truetype(font_path, int(45))
954
+ total_results = (
955
+ get_comic(total_results, _comic_type, captions=captions, font=font)
956
+ + total_results
957
+ )
958
+ save_results(pipe.unet, total_results)
959
+
960
+ yield total_results
961
+
962
+
963
+ def array2string(arr):
964
+ stringtmp = ""
965
+ for i, part in enumerate(arr):
966
+ if i != len(arr) - 1:
967
+ stringtmp += part + "\n"
968
+ else:
969
+ stringtmp += part
970
+
971
+ return stringtmp
972
+
973
+
974
+ #################################################
975
+ #################################################
976
+ ### define the interface
977
+
978
+ with gr.Blocks(css=css) as demo:
979
+ binary_matrixes = gr.State([])
980
+ color_layout = gr.State([])
981
+
982
+ # gr.Markdown(logo)
983
+ gr.Markdown(title)
984
+ gr.Markdown(description)
985
+
986
+ with gr.Row():
987
+ with gr.Group(elem_id="main-image"):
988
+
989
+ prompts = []
990
+ colors = []
991
+
992
+ with gr.Column(visible=True) as gen_prompt_vis:
993
+ sd_type = gr.Dropdown(
994
+ choices=list(models_dict.keys()),
995
+ value="Unstable",
996
+ label="sd_type",
997
+ info="Select pretrained model",
998
+ )
999
+ model_type = gr.Radio(
1000
+ ["Only Using Textual Description", "Using Ref Images"],
1001
+ label="model_type",
1002
+ value="Only Using Textual Description",
1003
+ info="Control type of the Character",
1004
+ )
1005
+ with gr.Group(visible=False) as control_image_input:
1006
+ files = gr.Files(
1007
+ label="Drag (Select) 1 or more photos of your face",
1008
+ file_types=["image"],
1009
+ )
1010
+ uploaded_files = gr.Gallery(
1011
+ label="Your images",
1012
+ visible=False,
1013
+ columns=5,
1014
+ rows=1,
1015
+ height=200,
1016
+ )
1017
+ with gr.Column(visible=False) as clear_button:
1018
+ remove_and_reupload = gr.ClearButton(
1019
+ value="Remove and upload new ones",
1020
+ components=files,
1021
+ size="sm",
1022
+ )
1023
+ general_prompt = gr.Textbox(
1024
+ value="",
1025
+ lines=2,
1026
+ label="(1) Textual Description for Character",
1027
+ interactive=True,
1028
+ )
1029
+ negative_prompt = gr.Textbox(
1030
+ value="", label="(2) Negative_prompt", interactive=True
1031
+ )
1032
+ style = gr.Dropdown(
1033
+ label="Style template",
1034
+ choices=STYLE_NAMES,
1035
+ value=DEFAULT_STYLE_NAME,
1036
+ )
1037
+ prompt_array = gr.Textbox(
1038
+ lines=3,
1039
+ value="",
1040
+ label="(3) Comic Description (each line corresponds to a frame).",
1041
+ interactive=True,
1042
+ )
1043
+ char_path = gr.Textbox(
1044
+ lines=2,
1045
+ value="",
1046
+ visible=False,
1047
+ label="(Optional) Character files",
1048
+ interactive=True,
1049
+ )
1050
+ char_btn = gr.Button("Load Character files", visible=False)
1051
+ with gr.Accordion("(4) Tune the hyperparameters", open=True):
1052
+ font_choice = gr.Dropdown(
1053
+ label="Select Font",
1054
+ choices=[
1055
+ f for f in os.listdir("./fonts") if f.endswith(".ttf")
1056
+ ],
1057
+ value="Inkfree.ttf",
1058
+ info="Select font for the final slide.",
1059
+ interactive=True,
1060
+ )
1061
+ sa32_ = gr.Slider(
1062
+ label=" (The degree of Paired Attention at 32 x 32 self-attention layers) ",
1063
+ minimum=0,
1064
+ maximum=1.0,
1065
+ value=0.5,
1066
+ step=0.1,
1067
+ )
1068
+ sa64_ = gr.Slider(
1069
+ label=" (The degree of Paired Attention at 64 x 64 self-attention layers) ",
1070
+ minimum=0,
1071
+ maximum=1.0,
1072
+ value=0.5,
1073
+ step=0.1,
1074
+ )
1075
+ id_length_ = gr.Slider(
1076
+ label="Number of id images in total images",
1077
+ minimum=1,
1078
+ maximum=4,
1079
+ value=1,
1080
+ step=1,
1081
+ )
1082
+ with gr.Row():
1083
+ seed_ = gr.Slider(
1084
+ label="Seed", minimum=-1, maximum=MAX_SEED, value=0, step=1
1085
+ )
1086
+ randomize_seed_btn = gr.Button("🎲", size="sm")
1087
+ num_steps = gr.Slider(
1088
+ label="Number of sample steps",
1089
+ minimum=20,
1090
+ maximum=100,
1091
+ step=1,
1092
+ value=35,
1093
+ )
1094
+ G_height = gr.Slider(
1095
+ label="height",
1096
+ minimum=256,
1097
+ maximum=1024,
1098
+ step=32,
1099
+ value=768,
1100
+ )
1101
+ G_width = gr.Slider(
1102
+ label="width",
1103
+ minimum=256,
1104
+ maximum=1024,
1105
+ step=32,
1106
+ value=768,
1107
+ )
1108
+ comic_type = gr.Radio(
1109
+ [
1110
+ "No typesetting (default)",
1111
+ "Four Pannel",
1112
+ "Classic Comic Style",
1113
+ ],
1114
+ value="Classic Comic Style",
1115
+ label="Typesetting Style",
1116
+ info="Select the typesetting style ",
1117
+ )
1118
+ guidance_scale = gr.Slider(
1119
+ label="Guidance scale",
1120
+ minimum=0.1,
1121
+ maximum=10.0,
1122
+ step=0.1,
1123
+ value=5,
1124
+ )
1125
+ style_strength_ratio = gr.Slider(
1126
+ label="Style strength of Ref Image (%)",
1127
+ minimum=15,
1128
+ maximum=50,
1129
+ step=1,
1130
+ value=20,
1131
+ visible=False,
1132
+ )
1133
+ Ip_Adapter_Strength = gr.Slider(
1134
+ label="Ip_Adapter_Strength",
1135
+ minimum=0,
1136
+ maximum=1,
1137
+ step=0.1,
1138
+ value=0.5,
1139
+ visible=False,
1140
+ )
1141
+ final_run_btn = gr.Button("Generate ! 😺")
1142
+
1143
+ with gr.Column():
1144
+ out_image = gr.Gallery(label="Result", columns=2, height="auto")
1145
+ generated_information = gr.Markdown(
1146
+ label="Generation Details", value="", visible=False
1147
+ )
1148
+ gr.Markdown(version)
1149
+ model_type.change(
1150
+ fn=change_visiale_by_model_type,
1151
+ inputs=model_type,
1152
+ outputs=[control_image_input, style_strength_ratio, Ip_Adapter_Strength],
1153
+ )
1154
+ files.upload(
1155
+ fn=swap_to_gallery, inputs=files, outputs=[uploaded_files, clear_button, files]
1156
+ )
1157
+ remove_and_reupload.click(
1158
+ fn=remove_back_to_files, outputs=[uploaded_files, clear_button, files]
1159
+ )
1160
+ char_btn.click(fn=load_character_files, inputs=char_path, outputs=[general_prompt])
1161
+
1162
+ randomize_seed_btn.click(
1163
+ fn=lambda: random.randint(-1, MAX_SEED),
1164
+ inputs=[],
1165
+ outputs=seed_,
1166
+ )
1167
+
1168
+ final_run_btn.click(fn=set_text_unfinished, outputs=generated_information).then(
1169
+ process_generation,
1170
+ inputs=[
1171
+ sd_type,
1172
+ model_type,
1173
+ files,
1174
+ num_steps,
1175
+ style,
1176
+ Ip_Adapter_Strength,
1177
+ style_strength_ratio,
1178
+ guidance_scale,
1179
+ seed_,
1180
+ sa32_,
1181
+ sa64_,
1182
+ id_length_,
1183
+ general_prompt,
1184
+ negative_prompt,
1185
+ prompt_array,
1186
+ G_height,
1187
+ G_width,
1188
+ comic_type,
1189
+ font_choice,
1190
+ char_path,
1191
+ ],
1192
+ outputs=out_image,
1193
+ ).then(fn=set_text_finished, outputs=generated_information)
1194
+
1195
+ gr.Examples(
1196
+ examples=[
1197
+ [
1198
+ 0,
1199
+ 0.5,
1200
+ 0.5,
1201
+ 2,
1202
+ "[Bob] A man, wearing a black suit\n[Alice]a woman, wearing a white shirt",
1203
+ "bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
1204
+ array2string(
1205
+ [
1206
+ "[Bob] at home, read new paper #at home, The newspaper says there is a treasure house in the forest.",
1207
+ "[Bob] on the road, near the forest",
1208
+ "[Alice] is make a call at home # [Bob] invited [Alice] to join him on an adventure.",
1209
+ "[NC]A tiger appeared in the forest, at night ",
1210
+ "[NC] The car on the road, near the forest #They drives to the forest in search of treasure.",
1211
+ "[Bob] very frightened, open mouth, in the forest, at night",
1212
+ "[Alice] very frightened, open mouth, in the forest, at night",
1213
+ "[Bob] and [Alice] running very fast, in the forest, at night",
1214
+ "[NC] A house in the forest, at night #Suddenly, They discovers the treasure house!",
1215
+ "[Bob] and [Alice] in the house filled with treasure, laughing, at night #He is overjoyed inside the house.",
1216
+ ]
1217
+ ),
1218
+ "Comic book",
1219
+ "Only Using Textual Description",
1220
+ get_image_path_list("./examples/taylor"),
1221
+ 768,
1222
+ 768,
1223
+ ],
1224
+ [
1225
+ 0,
1226
+ 0.5,
1227
+ 0.5,
1228
+ 2,
1229
+ "[Bob] A man img, wearing a black suit\n[Alice]a woman img, wearing a white shirt",
1230
+ "bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
1231
+ array2string(
1232
+ [
1233
+ "[Bob] at home, read new paper #at home, The newspaper says there is a treasure house in the forest.",
1234
+ "[Bob] on the road, near the forest",
1235
+ "[Alice] is make a call at home # [Bob] invited [Alice] to join him on an adventure.",
1236
+ "[NC] The car on the road, near the forest #They drives to the forest in search of treasure.",
1237
+ "[NC]A tiger appeared in the forest, at night ",
1238
+ "[Bob] very frightened, open mouth, in the forest, at night",
1239
+ "[Alice] very frightened, open mouth, in the forest, at night",
1240
+ "[Bob] running very fast, in the forest, at night",
1241
+ "[NC] A house in the forest, at night #Suddenly, They discovers the treasure house!",
1242
+ "[Bob] in the house filled with treasure, laughing, at night #They are overjoyed inside the house.",
1243
+ ]
1244
+ ),
1245
+ "Comic book",
1246
+ "Using Ref Images",
1247
+ get_image_path_list("./examples/twoperson"),
1248
+ 1024,
1249
+ 1024,
1250
+ ],
1251
+ [
1252
+ 1,
1253
+ 0.5,
1254
+ 0.5,
1255
+ 3,
1256
+ "[Taylor]a woman img, wearing a white T-shirt, blue loose hair",
1257
+ "bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
1258
+ array2string(
1259
+ [
1260
+ "[Taylor]wake up in the bed",
1261
+ "[Taylor]have breakfast",
1262
+ "[Taylor]is on the road, go to company",
1263
+ "[Taylor]work in the company",
1264
+ "[Taylor]Take a walk next to the company at noon",
1265
+ "[Taylor]lying in bed at night",
1266
+ ]
1267
+ ),
1268
+ "Japanese Anime",
1269
+ "Using Ref Images",
1270
+ get_image_path_list("./examples/taylor"),
1271
+ 768,
1272
+ 768,
1273
+ ],
1274
+ [
1275
+ 0,
1276
+ 0.5,
1277
+ 0.5,
1278
+ 3,
1279
+ "[Bob]a man, wearing black jacket",
1280
+ "bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
1281
+ array2string(
1282
+ [
1283
+ "[Bob]wake up in the bed",
1284
+ "[Bob]have breakfast",
1285
+ "[Bob]is on the road, go to the company, close look",
1286
+ "[Bob]work in the company",
1287
+ "[Bob]laughing happily",
1288
+ "[Bob]lying in bed at night",
1289
+ ]
1290
+ ),
1291
+ "Japanese Anime",
1292
+ "Only Using Textual Description",
1293
+ get_image_path_list("./examples/taylor"),
1294
+ 768,
1295
+ 768,
1296
+ ],
1297
+ [
1298
+ 0,
1299
+ 0.3,
1300
+ 0.5,
1301
+ 3,
1302
+ "[Kitty]a girl, wearing white shirt, black skirt, black tie, yellow hair",
1303
+ "bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs",
1304
+ array2string(
1305
+ [
1306
+ "[Kitty]at home #at home, began to go to drawing",
1307
+ "[Kitty]sitting alone on a park bench.",
1308
+ "[Kitty]reading a book on a park bench.",
1309
+ "[NC]A squirrel approaches, peeking over the bench. ",
1310
+ "[Kitty]look around in the park. # She looks around and enjoys the beauty of nature.",
1311
+ "[NC]leaf falls from the tree, landing on the sketchbook.",
1312
+ "[Kitty]picks up the leaf, examining its details closely.",
1313
+ "[NC]The brown squirrel appear.",
1314
+ "[Kitty]is very happy # She is very happy to see the squirrel again",
1315
+ "[NC]The brown squirrel takes the cracker and scampers up a tree. # She gives the squirrel cracker",
1316
+ ]
1317
+ ),
1318
+ "Japanese Anime",
1319
+ "Only Using Textual Description",
1320
+ get_image_path_list("./examples/taylor"),
1321
+ 768,
1322
+ 768,
1323
+ ],
1324
+ ],
1325
+ inputs=[
1326
+ seed_,
1327
+ sa32_,
1328
+ sa64_,
1329
+ id_length_,
1330
+ general_prompt,
1331
+ negative_prompt,
1332
+ prompt_array,
1333
+ style,
1334
+ model_type,
1335
+ files,
1336
+ G_height,
1337
+ G_width,
1338
+ ],
1339
+ # outputs=[post_sketch, binary_matrixes, *color_row, *colors, *prompts, gen_prompt_vis, general_prompt, seed_],
1340
+ # run_on_click=True,
1341
+ label="😺 Examples 😺",
1342
+ )
1343
+ gr.Markdown(article)
1344
+
1345
+
1346
+ demo.launch(server_name="0.0.0.0", share=True)
images/logo.png ADDED
images/pad_images.png ADDED
myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2010 Pallets
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted provided that the following conditions are
5
+ met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ 3. Neither the name of the copyright holder nor the names of its
15
+ contributors may be used to endorse or promote products derived from
16
+ this software without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
21
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
24
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: MarkupSafe
3
+ Version: 2.1.5
4
+ Summary: Safely add untrusted strings to HTML/XML markup.
5
+ Home-page: https://palletsprojects.com/p/markupsafe/
6
+ Maintainer: Pallets
7
+ Maintainer-email: contact@palletsprojects.com
8
+ License: BSD-3-Clause
9
+ Project-URL: Donate, https://palletsprojects.com/donate
10
+ Project-URL: Documentation, https://markupsafe.palletsprojects.com/
11
+ Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
12
+ Project-URL: Source Code, https://github.com/pallets/markupsafe/
13
+ Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/
14
+ Project-URL: Chat, https://discord.gg/pallets
15
+ Classifier: Development Status :: 5 - Production/Stable
16
+ Classifier: Environment :: Web Environment
17
+ Classifier: Intended Audience :: Developers
18
+ Classifier: License :: OSI Approved :: BSD License
19
+ Classifier: Operating System :: OS Independent
20
+ Classifier: Programming Language :: Python
21
+ Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
22
+ Classifier: Topic :: Text Processing :: Markup :: HTML
23
+ Requires-Python: >=3.7
24
+ Description-Content-Type: text/x-rst
25
+ License-File: LICENSE.rst
26
+
27
+ MarkupSafe
28
+ ==========
29
+
30
+ MarkupSafe implements a text object that escapes characters so it is
31
+ safe to use in HTML and XML. Characters that have special meanings are
32
+ replaced so that they display as the actual characters. This mitigates
33
+ injection attacks, meaning untrusted user input can safely be displayed
34
+ on a page.
35
+
36
+
37
+ Installing
38
+ ----------
39
+
40
+ Install and update using `pip`_:
41
+
42
+ .. code-block:: text
43
+
44
+ pip install -U MarkupSafe
45
+
46
+ .. _pip: https://pip.pypa.io/en/stable/getting-started/
47
+
48
+
49
+ Examples
50
+ --------
51
+
52
+ .. code-block:: pycon
53
+
54
+ >>> from markupsafe import Markup, escape
55
+
56
+ >>> # escape replaces special characters and wraps in Markup
57
+ >>> escape("<script>alert(document.cookie);</script>")
58
+ Markup('&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
59
+
60
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
61
+ >>> Markup("<strong>Hello</strong>")
62
+ Markup('<strong>hello</strong>')
63
+
64
+ >>> escape(Markup("<strong>Hello</strong>"))
65
+ Markup('<strong>hello</strong>')
66
+
67
+ >>> # Markup is a str subclass
68
+ >>> # methods and operators escape their arguments
69
+ >>> template = Markup("Hello <em>{name}</em>")
70
+ >>> template.format(name='"World"')
71
+ Markup('Hello <em>&#34;World&#34;</em>')
72
+
73
+
74
+ Donate
75
+ ------
76
+
77
+ The Pallets organization develops and supports MarkupSafe and other
78
+ popular packages. In order to grow the community of contributors and
79
+ users, and allow the maintainers to devote more time to the projects,
80
+ `please donate today`_.
81
+
82
+ .. _please donate today: https://palletsprojects.com/donate
83
+
84
+
85
+ Links
86
+ -----
87
+
88
+ - Documentation: https://markupsafe.palletsprojects.com/
89
+ - Changes: https://markupsafe.palletsprojects.com/changes/
90
+ - PyPI Releases: https://pypi.org/project/MarkupSafe/
91
+ - Source Code: https://github.com/pallets/markupsafe/
92
+ - Issue Tracker: https://github.com/pallets/markupsafe/issues/
93
+ - Chat: https://discord.gg/pallets
myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MarkupSafe-2.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ MarkupSafe-2.1.5.dist-info/LICENSE.rst,sha256=RjHsDbX9kKVH4zaBcmTGeYIUM4FG-KyUtKV_lu6MnsQ,1503
3
+ MarkupSafe-2.1.5.dist-info/METADATA,sha256=icNlaniV7YIQZ1BScCVqNaRtm7MAgfw8d3OBmoSVyAY,3096
4
+ MarkupSafe-2.1.5.dist-info/RECORD,,
5
+ MarkupSafe-2.1.5.dist-info/WHEEL,sha256=ircjsfhzblqgSzO8ow7-0pXK-RVqDqNRGQ8F650AUNM,102
6
+ MarkupSafe-2.1.5.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
7
+ markupsafe/__init__.py,sha256=m1ysNeqf55zbEoJtaovca40ivrkEFolPlw5bGoC5Gi4,11290
8
+ markupsafe/__pycache__/__init__.cpython-311.pyc,,
9
+ markupsafe/__pycache__/_native.cpython-311.pyc,,
10
+ markupsafe/_native.py,sha256=_Q7UsXCOvgdonCgqG3l5asANI6eo50EKnDM-mlwEC5M,1776
11
+ markupsafe/_speedups.c,sha256=n3jzzaJwXcoN8nTFyA53f3vSqsWK2vujI-v6QYifjhQ,7403
12
+ markupsafe/_speedups.cp311-win_amd64.pyd,sha256=MEqnkyBOHmstwQr50hKitovHjrHhMJ0gYmya4Fu1DK0,15872
13
+ markupsafe/_speedups.pyi,sha256=f5QtwIOP0eLrxh2v5p6SmaYmlcHIGIfmz0DovaqL0OU,238
14
+ markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp311-cp311-win_amd64
5
+
myenv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ markupsafe
myenv/Lib/site-packages/PIL/BdfFontFile.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # bitmap distribution font (bdf) file parser
6
+ #
7
+ # history:
8
+ # 1996-05-16 fl created (as bdf2pil)
9
+ # 1997-08-25 fl converted to FontFile driver
10
+ # 2001-05-25 fl removed bogus __init__ call
11
+ # 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
12
+ # 2003-04-22 fl more robustification (from Graham Dumpleton)
13
+ #
14
+ # Copyright (c) 1997-2003 by Secret Labs AB.
15
+ # Copyright (c) 1997-2003 by Fredrik Lundh.
16
+ #
17
+ # See the README file for information on usage and redistribution.
18
+ #
19
+
20
+ """
21
+ Parse X Bitmap Distribution Format (BDF)
22
+ """
23
+ from __future__ import annotations
24
+
25
+ from typing import BinaryIO
26
+
27
+ from . import FontFile, Image
28
+
29
+ bdf_slant = {
30
+ "R": "Roman",
31
+ "I": "Italic",
32
+ "O": "Oblique",
33
+ "RI": "Reverse Italic",
34
+ "RO": "Reverse Oblique",
35
+ "OT": "Other",
36
+ }
37
+
38
+ bdf_spacing = {"P": "Proportional", "M": "Monospaced", "C": "Cell"}
39
+
40
+
41
+ def bdf_char(
42
+ f: BinaryIO,
43
+ ) -> (
44
+ tuple[
45
+ str,
46
+ int,
47
+ tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]],
48
+ Image.Image,
49
+ ]
50
+ | None
51
+ ):
52
+ # skip to STARTCHAR
53
+ while True:
54
+ s = f.readline()
55
+ if not s:
56
+ return None
57
+ if s[:9] == b"STARTCHAR":
58
+ break
59
+ id = s[9:].strip().decode("ascii")
60
+
61
+ # load symbol properties
62
+ props = {}
63
+ while True:
64
+ s = f.readline()
65
+ if not s or s[:6] == b"BITMAP":
66
+ break
67
+ i = s.find(b" ")
68
+ props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
69
+
70
+ # load bitmap
71
+ bitmap = bytearray()
72
+ while True:
73
+ s = f.readline()
74
+ if not s or s[:7] == b"ENDCHAR":
75
+ break
76
+ bitmap += s[:-1]
77
+
78
+ # The word BBX
79
+ # followed by the width in x (BBw), height in y (BBh),
80
+ # and x and y displacement (BBxoff0, BByoff0)
81
+ # of the lower left corner from the origin of the character.
82
+ width, height, x_disp, y_disp = (int(p) for p in props["BBX"].split())
83
+
84
+ # The word DWIDTH
85
+ # followed by the width in x and y of the character in device pixels.
86
+ dwx, dwy = (int(p) for p in props["DWIDTH"].split())
87
+
88
+ bbox = (
89
+ (dwx, dwy),
90
+ (x_disp, -y_disp - height, width + x_disp, -y_disp),
91
+ (0, 0, width, height),
92
+ )
93
+
94
+ try:
95
+ im = Image.frombytes("1", (width, height), bitmap, "hex", "1")
96
+ except ValueError:
97
+ # deal with zero-width characters
98
+ im = Image.new("1", (width, height))
99
+
100
+ return id, int(props["ENCODING"]), bbox, im
101
+
102
+
103
+ class BdfFontFile(FontFile.FontFile):
104
+ """Font file plugin for the X11 BDF format."""
105
+
106
+ def __init__(self, fp: BinaryIO) -> None:
107
+ super().__init__()
108
+
109
+ s = fp.readline()
110
+ if s[:13] != b"STARTFONT 2.1":
111
+ msg = "not a valid BDF file"
112
+ raise SyntaxError(msg)
113
+
114
+ props = {}
115
+ comments = []
116
+
117
+ while True:
118
+ s = fp.readline()
119
+ if not s or s[:13] == b"ENDPROPERTIES":
120
+ break
121
+ i = s.find(b" ")
122
+ props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii")
123
+ if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
124
+ if s.find(b"LogicalFontDescription") < 0:
125
+ comments.append(s[i + 1 : -1].decode("ascii"))
126
+
127
+ while True:
128
+ c = bdf_char(fp)
129
+ if not c:
130
+ break
131
+ id, ch, (xy, dst, src), im = c
132
+ if 0 <= ch < len(self.glyph):
133
+ self.glyph[ch] = xy, dst, src, im
myenv/Lib/site-packages/PIL/BlpImagePlugin.py ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Blizzard Mipmap Format (.blp)
3
+ Jerome Leclanche <jerome@leclan.ch>
4
+
5
+ The contents of this file are hereby released in the public domain (CC0)
6
+ Full text of the CC0 license:
7
+ https://creativecommons.org/publicdomain/zero/1.0/
8
+
9
+ BLP1 files, used mostly in Warcraft III, are not fully supported.
10
+ All types of BLP2 files used in World of Warcraft are supported.
11
+
12
+ The BLP file structure consists of a header, up to 16 mipmaps of the
13
+ texture
14
+
15
+ Texture sizes must be powers of two, though the two dimensions do
16
+ not have to be equal; 512x256 is valid, but 512x200 is not.
17
+ The first mipmap (mipmap #0) is the full size image; each subsequent
18
+ mipmap halves both dimensions. The final mipmap should be 1x1.
19
+
20
+ BLP files come in many different flavours:
21
+ * JPEG-compressed (type == 0) - only supported for BLP1.
22
+ * RAW images (type == 1, encoding == 1). Each mipmap is stored as an
23
+ array of 8-bit values, one per pixel, left to right, top to bottom.
24
+ Each value is an index to the palette.
25
+ * DXT-compressed (type == 1, encoding == 2):
26
+ - DXT1 compression is used if alpha_encoding == 0.
27
+ - An additional alpha bit is used if alpha_depth == 1.
28
+ - DXT3 compression is used if alpha_encoding == 1.
29
+ - DXT5 compression is used if alpha_encoding == 7.
30
+ """
31
+
32
+ from __future__ import annotations
33
+
34
+ import abc
35
+ import os
36
+ import struct
37
+ from enum import IntEnum
38
+ from io import BytesIO
39
+ from typing import IO
40
+
41
+ from . import Image, ImageFile
42
+
43
+
44
+ class Format(IntEnum):
45
+ JPEG = 0
46
+
47
+
48
+ class Encoding(IntEnum):
49
+ UNCOMPRESSED = 1
50
+ DXT = 2
51
+ UNCOMPRESSED_RAW_BGRA = 3
52
+
53
+
54
+ class AlphaEncoding(IntEnum):
55
+ DXT1 = 0
56
+ DXT3 = 1
57
+ DXT5 = 7
58
+
59
+
60
+ def unpack_565(i: int) -> tuple[int, int, int]:
61
+ return ((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3
62
+
63
+
64
+ def decode_dxt1(
65
+ data: bytes, alpha: bool = False
66
+ ) -> tuple[bytearray, bytearray, bytearray, bytearray]:
67
+ """
68
+ input: one "row" of data (i.e. will produce 4*width pixels)
69
+ """
70
+
71
+ blocks = len(data) // 8 # number of blocks in row
72
+ ret = (bytearray(), bytearray(), bytearray(), bytearray())
73
+
74
+ for block_index in range(blocks):
75
+ # Decode next 8-byte block.
76
+ idx = block_index * 8
77
+ color0, color1, bits = struct.unpack_from("<HHI", data, idx)
78
+
79
+ r0, g0, b0 = unpack_565(color0)
80
+ r1, g1, b1 = unpack_565(color1)
81
+
82
+ # Decode this block into 4x4 pixels
83
+ # Accumulate the results onto our 4 row accumulators
84
+ for j in range(4):
85
+ for i in range(4):
86
+ # get next control op and generate a pixel
87
+
88
+ control = bits & 3
89
+ bits = bits >> 2
90
+
91
+ a = 0xFF
92
+ if control == 0:
93
+ r, g, b = r0, g0, b0
94
+ elif control == 1:
95
+ r, g, b = r1, g1, b1
96
+ elif control == 2:
97
+ if color0 > color1:
98
+ r = (2 * r0 + r1) // 3
99
+ g = (2 * g0 + g1) // 3
100
+ b = (2 * b0 + b1) // 3
101
+ else:
102
+ r = (r0 + r1) // 2
103
+ g = (g0 + g1) // 2
104
+ b = (b0 + b1) // 2
105
+ elif control == 3:
106
+ if color0 > color1:
107
+ r = (2 * r1 + r0) // 3
108
+ g = (2 * g1 + g0) // 3
109
+ b = (2 * b1 + b0) // 3
110
+ else:
111
+ r, g, b, a = 0, 0, 0, 0
112
+
113
+ if alpha:
114
+ ret[j].extend([r, g, b, a])
115
+ else:
116
+ ret[j].extend([r, g, b])
117
+
118
+ return ret
119
+
120
+
121
+ def decode_dxt3(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
122
+ """
123
+ input: one "row" of data (i.e. will produce 4*width pixels)
124
+ """
125
+
126
+ blocks = len(data) // 16 # number of blocks in row
127
+ ret = (bytearray(), bytearray(), bytearray(), bytearray())
128
+
129
+ for block_index in range(blocks):
130
+ idx = block_index * 16
131
+ block = data[idx : idx + 16]
132
+ # Decode next 16-byte block.
133
+ bits = struct.unpack_from("<8B", block)
134
+ color0, color1 = struct.unpack_from("<HH", block, 8)
135
+
136
+ (code,) = struct.unpack_from("<I", block, 12)
137
+
138
+ r0, g0, b0 = unpack_565(color0)
139
+ r1, g1, b1 = unpack_565(color1)
140
+
141
+ for j in range(4):
142
+ high = False # Do we want the higher bits?
143
+ for i in range(4):
144
+ alphacode_index = (4 * j + i) // 2
145
+ a = bits[alphacode_index]
146
+ if high:
147
+ high = False
148
+ a >>= 4
149
+ else:
150
+ high = True
151
+ a &= 0xF
152
+ a *= 17 # We get a value between 0 and 15
153
+
154
+ color_code = (code >> 2 * (4 * j + i)) & 0x03
155
+
156
+ if color_code == 0:
157
+ r, g, b = r0, g0, b0
158
+ elif color_code == 1:
159
+ r, g, b = r1, g1, b1
160
+ elif color_code == 2:
161
+ r = (2 * r0 + r1) // 3
162
+ g = (2 * g0 + g1) // 3
163
+ b = (2 * b0 + b1) // 3
164
+ elif color_code == 3:
165
+ r = (2 * r1 + r0) // 3
166
+ g = (2 * g1 + g0) // 3
167
+ b = (2 * b1 + b0) // 3
168
+
169
+ ret[j].extend([r, g, b, a])
170
+
171
+ return ret
172
+
173
+
174
+ def decode_dxt5(data: bytes) -> tuple[bytearray, bytearray, bytearray, bytearray]:
175
+ """
176
+ input: one "row" of data (i.e. will produce 4 * width pixels)
177
+ """
178
+
179
+ blocks = len(data) // 16 # number of blocks in row
180
+ ret = (bytearray(), bytearray(), bytearray(), bytearray())
181
+
182
+ for block_index in range(blocks):
183
+ idx = block_index * 16
184
+ block = data[idx : idx + 16]
185
+ # Decode next 16-byte block.
186
+ a0, a1 = struct.unpack_from("<BB", block)
187
+
188
+ bits = struct.unpack_from("<6B", block, 2)
189
+ alphacode1 = bits[2] | (bits[3] << 8) | (bits[4] << 16) | (bits[5] << 24)
190
+ alphacode2 = bits[0] | (bits[1] << 8)
191
+
192
+ color0, color1 = struct.unpack_from("<HH", block, 8)
193
+
194
+ (code,) = struct.unpack_from("<I", block, 12)
195
+
196
+ r0, g0, b0 = unpack_565(color0)
197
+ r1, g1, b1 = unpack_565(color1)
198
+
199
+ for j in range(4):
200
+ for i in range(4):
201
+ # get next control op and generate a pixel
202
+ alphacode_index = 3 * (4 * j + i)
203
+
204
+ if alphacode_index <= 12:
205
+ alphacode = (alphacode2 >> alphacode_index) & 0x07
206
+ elif alphacode_index == 15:
207
+ alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06)
208
+ else: # alphacode_index >= 18 and alphacode_index <= 45
209
+ alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07
210
+
211
+ if alphacode == 0:
212
+ a = a0
213
+ elif alphacode == 1:
214
+ a = a1
215
+ elif a0 > a1:
216
+ a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7
217
+ elif alphacode == 6:
218
+ a = 0
219
+ elif alphacode == 7:
220
+ a = 255
221
+ else:
222
+ a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5
223
+
224
+ color_code = (code >> 2 * (4 * j + i)) & 0x03
225
+
226
+ if color_code == 0:
227
+ r, g, b = r0, g0, b0
228
+ elif color_code == 1:
229
+ r, g, b = r1, g1, b1
230
+ elif color_code == 2:
231
+ r = (2 * r0 + r1) // 3
232
+ g = (2 * g0 + g1) // 3
233
+ b = (2 * b0 + b1) // 3
234
+ elif color_code == 3:
235
+ r = (2 * r1 + r0) // 3
236
+ g = (2 * g1 + g0) // 3
237
+ b = (2 * b1 + b0) // 3
238
+
239
+ ret[j].extend([r, g, b, a])
240
+
241
+ return ret
242
+
243
+
244
+ class BLPFormatError(NotImplementedError):
245
+ pass
246
+
247
+
248
+ def _accept(prefix: bytes) -> bool:
249
+ return prefix[:4] in (b"BLP1", b"BLP2")
250
+
251
+
252
+ class BlpImageFile(ImageFile.ImageFile):
253
+ """
254
+ Blizzard Mipmap Format
255
+ """
256
+
257
+ format = "BLP"
258
+ format_description = "Blizzard Mipmap Format"
259
+
260
+ def _open(self) -> None:
261
+ self.magic = self.fp.read(4)
262
+
263
+ self.fp.seek(5, os.SEEK_CUR)
264
+ (self._blp_alpha_depth,) = struct.unpack("<b", self.fp.read(1))
265
+
266
+ self.fp.seek(2, os.SEEK_CUR)
267
+ self._size = struct.unpack("<II", self.fp.read(8))
268
+
269
+ if self.magic in (b"BLP1", b"BLP2"):
270
+ decoder = self.magic.decode()
271
+ else:
272
+ msg = f"Bad BLP magic {repr(self.magic)}"
273
+ raise BLPFormatError(msg)
274
+
275
+ self._mode = "RGBA" if self._blp_alpha_depth else "RGB"
276
+ self.tile = [(decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))]
277
+
278
+
279
+ class _BLPBaseDecoder(ImageFile.PyDecoder):
280
+ _pulls_fd = True
281
+
282
+ def decode(self, buffer: bytes) -> tuple[int, int]:
283
+ try:
284
+ self._read_blp_header()
285
+ self._load()
286
+ except struct.error as e:
287
+ msg = "Truncated BLP file"
288
+ raise OSError(msg) from e
289
+ return -1, 0
290
+
291
+ @abc.abstractmethod
292
+ def _load(self) -> None:
293
+ pass
294
+
295
+ def _read_blp_header(self) -> None:
296
+ assert self.fd is not None
297
+ self.fd.seek(4)
298
+ (self._blp_compression,) = struct.unpack("<i", self._safe_read(4))
299
+
300
+ (self._blp_encoding,) = struct.unpack("<b", self._safe_read(1))
301
+ (self._blp_alpha_depth,) = struct.unpack("<b", self._safe_read(1))
302
+ (self._blp_alpha_encoding,) = struct.unpack("<b", self._safe_read(1))
303
+ self.fd.seek(1, os.SEEK_CUR) # mips
304
+
305
+ self.size = struct.unpack("<II", self._safe_read(8))
306
+
307
+ if isinstance(self, BLP1Decoder):
308
+ # Only present for BLP1
309
+ (self._blp_encoding,) = struct.unpack("<i", self._safe_read(4))
310
+ self.fd.seek(4, os.SEEK_CUR) # subtype
311
+
312
+ self._blp_offsets = struct.unpack("<16I", self._safe_read(16 * 4))
313
+ self._blp_lengths = struct.unpack("<16I", self._safe_read(16 * 4))
314
+
315
+ def _safe_read(self, length: int) -> bytes:
316
+ return ImageFile._safe_read(self.fd, length)
317
+
318
+ def _read_palette(self) -> list[tuple[int, int, int, int]]:
319
+ ret = []
320
+ for i in range(256):
321
+ try:
322
+ b, g, r, a = struct.unpack("<4B", self._safe_read(4))
323
+ except struct.error:
324
+ break
325
+ ret.append((b, g, r, a))
326
+ return ret
327
+
328
+ def _read_bgra(self, palette: list[tuple[int, int, int, int]]) -> bytearray:
329
+ data = bytearray()
330
+ _data = BytesIO(self._safe_read(self._blp_lengths[0]))
331
+ while True:
332
+ try:
333
+ (offset,) = struct.unpack("<B", _data.read(1))
334
+ except struct.error:
335
+ break
336
+ b, g, r, a = palette[offset]
337
+ d: tuple[int, ...] = (r, g, b)
338
+ if self._blp_alpha_depth:
339
+ d += (a,)
340
+ data.extend(d)
341
+ return data
342
+
343
+
344
+ class BLP1Decoder(_BLPBaseDecoder):
345
+ def _load(self) -> None:
346
+ if self._blp_compression == Format.JPEG:
347
+ self._decode_jpeg_stream()
348
+
349
+ elif self._blp_compression == 1:
350
+ if self._blp_encoding in (4, 5):
351
+ palette = self._read_palette()
352
+ data = self._read_bgra(palette)
353
+ self.set_as_raw(data)
354
+ else:
355
+ msg = f"Unsupported BLP encoding {repr(self._blp_encoding)}"
356
+ raise BLPFormatError(msg)
357
+ else:
358
+ msg = f"Unsupported BLP compression {repr(self._blp_encoding)}"
359
+ raise BLPFormatError(msg)
360
+
361
+ def _decode_jpeg_stream(self) -> None:
362
+ from .JpegImagePlugin import JpegImageFile
363
+
364
+ (jpeg_header_size,) = struct.unpack("<I", self._safe_read(4))
365
+ jpeg_header = self._safe_read(jpeg_header_size)
366
+ assert self.fd is not None
367
+ self._safe_read(self._blp_offsets[0] - self.fd.tell()) # What IS this?
368
+ data = self._safe_read(self._blp_lengths[0])
369
+ data = jpeg_header + data
370
+ image = JpegImageFile(BytesIO(data))
371
+ Image._decompression_bomb_check(image.size)
372
+ if image.mode == "CMYK":
373
+ decoder_name, extents, offset, args = image.tile[0]
374
+ image.tile = [(decoder_name, extents, offset, (args[0], "CMYK"))]
375
+ r, g, b = image.convert("RGB").split()
376
+ reversed_image = Image.merge("RGB", (b, g, r))
377
+ self.set_as_raw(reversed_image.tobytes())
378
+
379
+
380
+ class BLP2Decoder(_BLPBaseDecoder):
381
+ def _load(self) -> None:
382
+ palette = self._read_palette()
383
+
384
+ assert self.fd is not None
385
+ self.fd.seek(self._blp_offsets[0])
386
+
387
+ if self._blp_compression == 1:
388
+ # Uncompressed or DirectX compression
389
+
390
+ if self._blp_encoding == Encoding.UNCOMPRESSED:
391
+ data = self._read_bgra(palette)
392
+
393
+ elif self._blp_encoding == Encoding.DXT:
394
+ data = bytearray()
395
+ if self._blp_alpha_encoding == AlphaEncoding.DXT1:
396
+ linesize = (self.size[0] + 3) // 4 * 8
397
+ for yb in range((self.size[1] + 3) // 4):
398
+ for d in decode_dxt1(
399
+ self._safe_read(linesize), alpha=bool(self._blp_alpha_depth)
400
+ ):
401
+ data += d
402
+
403
+ elif self._blp_alpha_encoding == AlphaEncoding.DXT3:
404
+ linesize = (self.size[0] + 3) // 4 * 16
405
+ for yb in range((self.size[1] + 3) // 4):
406
+ for d in decode_dxt3(self._safe_read(linesize)):
407
+ data += d
408
+
409
+ elif self._blp_alpha_encoding == AlphaEncoding.DXT5:
410
+ linesize = (self.size[0] + 3) // 4 * 16
411
+ for yb in range((self.size[1] + 3) // 4):
412
+ for d in decode_dxt5(self._safe_read(linesize)):
413
+ data += d
414
+ else:
415
+ msg = f"Unsupported alpha encoding {repr(self._blp_alpha_encoding)}"
416
+ raise BLPFormatError(msg)
417
+ else:
418
+ msg = f"Unknown BLP encoding {repr(self._blp_encoding)}"
419
+ raise BLPFormatError(msg)
420
+
421
+ else:
422
+ msg = f"Unknown BLP compression {repr(self._blp_compression)}"
423
+ raise BLPFormatError(msg)
424
+
425
+ self.set_as_raw(data)
426
+
427
+
428
+ class BLPEncoder(ImageFile.PyEncoder):
429
+ _pushes_fd = True
430
+
431
+ def _write_palette(self) -> bytes:
432
+ data = b""
433
+ assert self.im is not None
434
+ palette = self.im.getpalette("RGBA", "RGBA")
435
+ for i in range(len(palette) // 4):
436
+ r, g, b, a = palette[i * 4 : (i + 1) * 4]
437
+ data += struct.pack("<4B", b, g, r, a)
438
+ while len(data) < 256 * 4:
439
+ data += b"\x00" * 4
440
+ return data
441
+
442
+ def encode(self, bufsize: int) -> tuple[int, int, bytes]:
443
+ palette_data = self._write_palette()
444
+
445
+ offset = 20 + 16 * 4 * 2 + len(palette_data)
446
+ data = struct.pack("<16I", offset, *((0,) * 15))
447
+
448
+ assert self.im is not None
449
+ w, h = self.im.size
450
+ data += struct.pack("<16I", w * h, *((0,) * 15))
451
+
452
+ data += palette_data
453
+
454
+ for y in range(h):
455
+ for x in range(w):
456
+ data += struct.pack("<B", self.im.getpixel((x, y)))
457
+
458
+ return len(data), 0, data
459
+
460
+
461
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
462
+ if im.mode != "P":
463
+ msg = "Unsupported BLP image mode"
464
+ raise ValueError(msg)
465
+
466
+ magic = b"BLP1" if im.encoderinfo.get("blp_version") == "BLP1" else b"BLP2"
467
+ fp.write(magic)
468
+
469
+ fp.write(struct.pack("<i", 1)) # Uncompressed or DirectX compression
470
+ fp.write(struct.pack("<b", Encoding.UNCOMPRESSED))
471
+ fp.write(struct.pack("<b", 1 if im.palette.mode == "RGBA" else 0))
472
+ fp.write(struct.pack("<b", 0)) # alpha encoding
473
+ fp.write(struct.pack("<b", 0)) # mips
474
+ fp.write(struct.pack("<II", *im.size))
475
+ if magic == b"BLP1":
476
+ fp.write(struct.pack("<i", 5))
477
+ fp.write(struct.pack("<i", 0))
478
+
479
+ ImageFile._save(im, fp, [("BLP", (0, 0) + im.size, 0, im.mode)])
480
+
481
+
482
+ Image.register_open(BlpImageFile.format, BlpImageFile, _accept)
483
+ Image.register_extension(BlpImageFile.format, ".blp")
484
+ Image.register_decoder("BLP1", BLP1Decoder)
485
+ Image.register_decoder("BLP2", BLP2Decoder)
486
+
487
+ Image.register_save(BlpImageFile.format, _save)
488
+ Image.register_encoder("BLP", BLPEncoder)
myenv/Lib/site-packages/PIL/BmpImagePlugin.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # BMP file handler
6
+ #
7
+ # Windows (and OS/2) native bitmap storage format.
8
+ #
9
+ # history:
10
+ # 1995-09-01 fl Created
11
+ # 1996-04-30 fl Added save
12
+ # 1997-08-27 fl Fixed save of 1-bit images
13
+ # 1998-03-06 fl Load P images as L where possible
14
+ # 1998-07-03 fl Load P images as 1 where possible
15
+ # 1998-12-29 fl Handle small palettes
16
+ # 2002-12-30 fl Fixed load of 1-bit palette images
17
+ # 2003-04-21 fl Fixed load of 1-bit monochrome images
18
+ # 2003-04-23 fl Added limited support for BI_BITFIELDS compression
19
+ #
20
+ # Copyright (c) 1997-2003 by Secret Labs AB
21
+ # Copyright (c) 1995-2003 by Fredrik Lundh
22
+ #
23
+ # See the README file for information on usage and redistribution.
24
+ #
25
+ from __future__ import annotations
26
+
27
+ import os
28
+ from typing import IO
29
+
30
+ from . import Image, ImageFile, ImagePalette
31
+ from ._binary import i16le as i16
32
+ from ._binary import i32le as i32
33
+ from ._binary import o8
34
+ from ._binary import o16le as o16
35
+ from ._binary import o32le as o32
36
+
37
+ #
38
+ # --------------------------------------------------------------------
39
+ # Read BMP file
40
+
41
+ BIT2MODE = {
42
+ # bits => mode, rawmode
43
+ 1: ("P", "P;1"),
44
+ 4: ("P", "P;4"),
45
+ 8: ("P", "P"),
46
+ 16: ("RGB", "BGR;15"),
47
+ 24: ("RGB", "BGR"),
48
+ 32: ("RGB", "BGRX"),
49
+ }
50
+
51
+
52
+ def _accept(prefix: bytes) -> bool:
53
+ return prefix[:2] == b"BM"
54
+
55
+
56
+ def _dib_accept(prefix: bytes) -> bool:
57
+ return i32(prefix) in [12, 40, 52, 56, 64, 108, 124]
58
+
59
+
60
+ # =============================================================================
61
+ # Image plugin for the Windows BMP format.
62
+ # =============================================================================
63
+ class BmpImageFile(ImageFile.ImageFile):
64
+ """Image plugin for the Windows Bitmap format (BMP)"""
65
+
66
+ # ------------------------------------------------------------- Description
67
+ format_description = "Windows Bitmap"
68
+ format = "BMP"
69
+
70
+ # -------------------------------------------------- BMP Compression values
71
+ COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5}
72
+ for k, v in COMPRESSIONS.items():
73
+ vars()[k] = v
74
+
75
+ def _bitmap(self, header=0, offset=0):
76
+ """Read relevant info about the BMP"""
77
+ read, seek = self.fp.read, self.fp.seek
78
+ if header:
79
+ seek(header)
80
+ # read bmp header size @offset 14 (this is part of the header size)
81
+ file_info = {"header_size": i32(read(4)), "direction": -1}
82
+
83
+ # -------------------- If requested, read header at a specific position
84
+ # read the rest of the bmp header, without its size
85
+ header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4)
86
+
87
+ # ------------------------------- Windows Bitmap v2, IBM OS/2 Bitmap v1
88
+ # ----- This format has different offsets because of width/height types
89
+ # 12: BITMAPCOREHEADER/OS21XBITMAPHEADER
90
+ if file_info["header_size"] == 12:
91
+ file_info["width"] = i16(header_data, 0)
92
+ file_info["height"] = i16(header_data, 2)
93
+ file_info["planes"] = i16(header_data, 4)
94
+ file_info["bits"] = i16(header_data, 6)
95
+ file_info["compression"] = self.RAW
96
+ file_info["palette_padding"] = 3
97
+
98
+ # --------------------------------------------- Windows Bitmap v3 to v5
99
+ # 40: BITMAPINFOHEADER
100
+ # 52: BITMAPV2HEADER
101
+ # 56: BITMAPV3HEADER
102
+ # 64: BITMAPCOREHEADER2/OS22XBITMAPHEADER
103
+ # 108: BITMAPV4HEADER
104
+ # 124: BITMAPV5HEADER
105
+ elif file_info["header_size"] in (40, 52, 56, 64, 108, 124):
106
+ file_info["y_flip"] = header_data[7] == 0xFF
107
+ file_info["direction"] = 1 if file_info["y_flip"] else -1
108
+ file_info["width"] = i32(header_data, 0)
109
+ file_info["height"] = (
110
+ i32(header_data, 4)
111
+ if not file_info["y_flip"]
112
+ else 2**32 - i32(header_data, 4)
113
+ )
114
+ file_info["planes"] = i16(header_data, 8)
115
+ file_info["bits"] = i16(header_data, 10)
116
+ file_info["compression"] = i32(header_data, 12)
117
+ # byte size of pixel data
118
+ file_info["data_size"] = i32(header_data, 16)
119
+ file_info["pixels_per_meter"] = (
120
+ i32(header_data, 20),
121
+ i32(header_data, 24),
122
+ )
123
+ file_info["colors"] = i32(header_data, 28)
124
+ file_info["palette_padding"] = 4
125
+ self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"])
126
+ if file_info["compression"] == self.BITFIELDS:
127
+ masks = ["r_mask", "g_mask", "b_mask"]
128
+ if len(header_data) >= 48:
129
+ if len(header_data) >= 52:
130
+ masks.append("a_mask")
131
+ else:
132
+ file_info["a_mask"] = 0x0
133
+ for idx, mask in enumerate(masks):
134
+ file_info[mask] = i32(header_data, 36 + idx * 4)
135
+ else:
136
+ # 40 byte headers only have the three components in the
137
+ # bitfields masks, ref:
138
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
139
+ # See also
140
+ # https://github.com/python-pillow/Pillow/issues/1293
141
+ # There is a 4th component in the RGBQuad, in the alpha
142
+ # location, but it is listed as a reserved component,
143
+ # and it is not generally an alpha channel
144
+ file_info["a_mask"] = 0x0
145
+ for mask in masks:
146
+ file_info[mask] = i32(read(4))
147
+ file_info["rgb_mask"] = (
148
+ file_info["r_mask"],
149
+ file_info["g_mask"],
150
+ file_info["b_mask"],
151
+ )
152
+ file_info["rgba_mask"] = (
153
+ file_info["r_mask"],
154
+ file_info["g_mask"],
155
+ file_info["b_mask"],
156
+ file_info["a_mask"],
157
+ )
158
+ else:
159
+ msg = f"Unsupported BMP header type ({file_info['header_size']})"
160
+ raise OSError(msg)
161
+
162
+ # ------------------ Special case : header is reported 40, which
163
+ # ---------------------- is shorter than real size for bpp >= 16
164
+ self._size = file_info["width"], file_info["height"]
165
+
166
+ # ------- If color count was not found in the header, compute from bits
167
+ file_info["colors"] = (
168
+ file_info["colors"]
169
+ if file_info.get("colors", 0)
170
+ else (1 << file_info["bits"])
171
+ )
172
+ if offset == 14 + file_info["header_size"] and file_info["bits"] <= 8:
173
+ offset += 4 * file_info["colors"]
174
+
175
+ # ---------------------- Check bit depth for unusual unsupported values
176
+ self._mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None))
177
+ if self.mode is None:
178
+ msg = f"Unsupported BMP pixel depth ({file_info['bits']})"
179
+ raise OSError(msg)
180
+
181
+ # ---------------- Process BMP with Bitfields compression (not palette)
182
+ decoder_name = "raw"
183
+ if file_info["compression"] == self.BITFIELDS:
184
+ SUPPORTED = {
185
+ 32: [
186
+ (0xFF0000, 0xFF00, 0xFF, 0x0),
187
+ (0xFF000000, 0xFF0000, 0xFF00, 0x0),
188
+ (0xFF000000, 0xFF00, 0xFF, 0x0),
189
+ (0xFF000000, 0xFF0000, 0xFF00, 0xFF),
190
+ (0xFF, 0xFF00, 0xFF0000, 0xFF000000),
191
+ (0xFF0000, 0xFF00, 0xFF, 0xFF000000),
192
+ (0xFF000000, 0xFF00, 0xFF, 0xFF0000),
193
+ (0x0, 0x0, 0x0, 0x0),
194
+ ],
195
+ 24: [(0xFF0000, 0xFF00, 0xFF)],
196
+ 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],
197
+ }
198
+ MASK_MODES = {
199
+ (32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX",
200
+ (32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR",
201
+ (32, (0xFF000000, 0xFF00, 0xFF, 0x0)): "BGXR",
202
+ (32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): "ABGR",
203
+ (32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA",
204
+ (32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA",
205
+ (32, (0xFF000000, 0xFF00, 0xFF, 0xFF0000)): "BGAR",
206
+ (32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
207
+ (24, (0xFF0000, 0xFF00, 0xFF)): "BGR",
208
+ (16, (0xF800, 0x7E0, 0x1F)): "BGR;16",
209
+ (16, (0x7C00, 0x3E0, 0x1F)): "BGR;15",
210
+ }
211
+ if file_info["bits"] in SUPPORTED:
212
+ if (
213
+ file_info["bits"] == 32
214
+ and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]]
215
+ ):
216
+ raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])]
217
+ self._mode = "RGBA" if "A" in raw_mode else self.mode
218
+ elif (
219
+ file_info["bits"] in (24, 16)
220
+ and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]]
221
+ ):
222
+ raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])]
223
+ else:
224
+ msg = "Unsupported BMP bitfields layout"
225
+ raise OSError(msg)
226
+ else:
227
+ msg = "Unsupported BMP bitfields layout"
228
+ raise OSError(msg)
229
+ elif file_info["compression"] == self.RAW:
230
+ if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset
231
+ raw_mode, self._mode = "BGRA", "RGBA"
232
+ elif file_info["compression"] in (self.RLE8, self.RLE4):
233
+ decoder_name = "bmp_rle"
234
+ else:
235
+ msg = f"Unsupported BMP compression ({file_info['compression']})"
236
+ raise OSError(msg)
237
+
238
+ # --------------- Once the header is processed, process the palette/LUT
239
+ if self.mode == "P": # Paletted for 1, 4 and 8 bit images
240
+ # ---------------------------------------------------- 1-bit images
241
+ if not (0 < file_info["colors"] <= 65536):
242
+ msg = f"Unsupported BMP Palette size ({file_info['colors']})"
243
+ raise OSError(msg)
244
+ else:
245
+ padding = file_info["palette_padding"]
246
+ palette = read(padding * file_info["colors"])
247
+ grayscale = True
248
+ indices = (
249
+ (0, 255)
250
+ if file_info["colors"] == 2
251
+ else list(range(file_info["colors"]))
252
+ )
253
+
254
+ # ----------------- Check if grayscale and ignore palette if so
255
+ for ind, val in enumerate(indices):
256
+ rgb = palette[ind * padding : ind * padding + 3]
257
+ if rgb != o8(val) * 3:
258
+ grayscale = False
259
+
260
+ # ------- If all colors are gray, white or black, ditch palette
261
+ if grayscale:
262
+ self._mode = "1" if file_info["colors"] == 2 else "L"
263
+ raw_mode = self.mode
264
+ else:
265
+ self._mode = "P"
266
+ self.palette = ImagePalette.raw(
267
+ "BGRX" if padding == 4 else "BGR", palette
268
+ )
269
+
270
+ # ---------------------------- Finally set the tile data for the plugin
271
+ self.info["compression"] = file_info["compression"]
272
+ args = [raw_mode]
273
+ if decoder_name == "bmp_rle":
274
+ args.append(file_info["compression"] == self.RLE4)
275
+ else:
276
+ args.append(((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3))
277
+ args.append(file_info["direction"])
278
+ self.tile = [
279
+ (
280
+ decoder_name,
281
+ (0, 0, file_info["width"], file_info["height"]),
282
+ offset or self.fp.tell(),
283
+ tuple(args),
284
+ )
285
+ ]
286
+
287
+ def _open(self) -> None:
288
+ """Open file, check magic number and read header"""
289
+ # read 14 bytes: magic number, filesize, reserved, header final offset
290
+ head_data = self.fp.read(14)
291
+ # choke if the file does not have the required magic bytes
292
+ if not _accept(head_data):
293
+ msg = "Not a BMP file"
294
+ raise SyntaxError(msg)
295
+ # read the start position of the BMP image data (u32)
296
+ offset = i32(head_data, 10)
297
+ # load bitmap information (offset=raster info)
298
+ self._bitmap(offset=offset)
299
+
300
+
301
+ class BmpRleDecoder(ImageFile.PyDecoder):
302
+ _pulls_fd = True
303
+
304
+ def decode(self, buffer: bytes) -> tuple[int, int]:
305
+ assert self.fd is not None
306
+ rle4 = self.args[1]
307
+ data = bytearray()
308
+ x = 0
309
+ dest_length = self.state.xsize * self.state.ysize
310
+ while len(data) < dest_length:
311
+ pixels = self.fd.read(1)
312
+ byte = self.fd.read(1)
313
+ if not pixels or not byte:
314
+ break
315
+ num_pixels = pixels[0]
316
+ if num_pixels:
317
+ # encoded mode
318
+ if x + num_pixels > self.state.xsize:
319
+ # Too much data for row
320
+ num_pixels = max(0, self.state.xsize - x)
321
+ if rle4:
322
+ first_pixel = o8(byte[0] >> 4)
323
+ second_pixel = o8(byte[0] & 0x0F)
324
+ for index in range(num_pixels):
325
+ if index % 2 == 0:
326
+ data += first_pixel
327
+ else:
328
+ data += second_pixel
329
+ else:
330
+ data += byte * num_pixels
331
+ x += num_pixels
332
+ else:
333
+ if byte[0] == 0:
334
+ # end of line
335
+ while len(data) % self.state.xsize != 0:
336
+ data += b"\x00"
337
+ x = 0
338
+ elif byte[0] == 1:
339
+ # end of bitmap
340
+ break
341
+ elif byte[0] == 2:
342
+ # delta
343
+ bytes_read = self.fd.read(2)
344
+ if len(bytes_read) < 2:
345
+ break
346
+ right, up = self.fd.read(2)
347
+ data += b"\x00" * (right + up * self.state.xsize)
348
+ x = len(data) % self.state.xsize
349
+ else:
350
+ # absolute mode
351
+ if rle4:
352
+ # 2 pixels per byte
353
+ byte_count = byte[0] // 2
354
+ bytes_read = self.fd.read(byte_count)
355
+ for byte_read in bytes_read:
356
+ data += o8(byte_read >> 4)
357
+ data += o8(byte_read & 0x0F)
358
+ else:
359
+ byte_count = byte[0]
360
+ bytes_read = self.fd.read(byte_count)
361
+ data += bytes_read
362
+ if len(bytes_read) < byte_count:
363
+ break
364
+ x += byte[0]
365
+
366
+ # align to 16-bit word boundary
367
+ if self.fd.tell() % 2 != 0:
368
+ self.fd.seek(1, os.SEEK_CUR)
369
+ rawmode = "L" if self.mode == "L" else "P"
370
+ self.set_as_raw(bytes(data), (rawmode, 0, self.args[-1]))
371
+ return -1, 0
372
+
373
+
374
+ # =============================================================================
375
+ # Image plugin for the DIB format (BMP alias)
376
+ # =============================================================================
377
+ class DibImageFile(BmpImageFile):
378
+ format = "DIB"
379
+ format_description = "Windows Bitmap"
380
+
381
+ def _open(self) -> None:
382
+ self._bitmap()
383
+
384
+
385
+ #
386
+ # --------------------------------------------------------------------
387
+ # Write BMP file
388
+
389
+
390
+ SAVE = {
391
+ "1": ("1", 1, 2),
392
+ "L": ("L", 8, 256),
393
+ "P": ("P", 8, 256),
394
+ "RGB": ("BGR", 24, 0),
395
+ "RGBA": ("BGRA", 32, 0),
396
+ }
397
+
398
+
399
+ def _dib_save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
400
+ _save(im, fp, filename, False)
401
+
402
+
403
+ def _save(
404
+ im: Image.Image, fp: IO[bytes], filename: str | bytes, bitmap_header: bool = True
405
+ ) -> None:
406
+ try:
407
+ rawmode, bits, colors = SAVE[im.mode]
408
+ except KeyError as e:
409
+ msg = f"cannot write mode {im.mode} as BMP"
410
+ raise OSError(msg) from e
411
+
412
+ info = im.encoderinfo
413
+
414
+ dpi = info.get("dpi", (96, 96))
415
+
416
+ # 1 meter == 39.3701 inches
417
+ ppm = tuple(int(x * 39.3701 + 0.5) for x in dpi)
418
+
419
+ stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3)
420
+ header = 40 # or 64 for OS/2 version 2
421
+ image = stride * im.size[1]
422
+
423
+ if im.mode == "1":
424
+ palette = b"".join(o8(i) * 4 for i in (0, 255))
425
+ elif im.mode == "L":
426
+ palette = b"".join(o8(i) * 4 for i in range(256))
427
+ elif im.mode == "P":
428
+ palette = im.im.getpalette("RGB", "BGRX")
429
+ colors = len(palette) // 4
430
+ else:
431
+ palette = None
432
+
433
+ # bitmap header
434
+ if bitmap_header:
435
+ offset = 14 + header + colors * 4
436
+ file_size = offset + image
437
+ if file_size > 2**32 - 1:
438
+ msg = "File size is too large for the BMP format"
439
+ raise ValueError(msg)
440
+ fp.write(
441
+ b"BM" # file type (magic)
442
+ + o32(file_size) # file size
443
+ + o32(0) # reserved
444
+ + o32(offset) # image data offset
445
+ )
446
+
447
+ # bitmap info header
448
+ fp.write(
449
+ o32(header) # info header size
450
+ + o32(im.size[0]) # width
451
+ + o32(im.size[1]) # height
452
+ + o16(1) # planes
453
+ + o16(bits) # depth
454
+ + o32(0) # compression (0=uncompressed)
455
+ + o32(image) # size of bitmap
456
+ + o32(ppm[0]) # resolution
457
+ + o32(ppm[1]) # resolution
458
+ + o32(colors) # colors used
459
+ + o32(colors) # colors important
460
+ )
461
+
462
+ fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
463
+
464
+ if palette:
465
+ fp.write(palette)
466
+
467
+ ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))])
468
+
469
+
470
+ #
471
+ # --------------------------------------------------------------------
472
+ # Registry
473
+
474
+
475
+ Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
476
+ Image.register_save(BmpImageFile.format, _save)
477
+
478
+ Image.register_extension(BmpImageFile.format, ".bmp")
479
+
480
+ Image.register_mime(BmpImageFile.format, "image/bmp")
481
+
482
+ Image.register_decoder("bmp_rle", BmpRleDecoder)
483
+
484
+ Image.register_open(DibImageFile.format, DibImageFile, _dib_accept)
485
+ Image.register_save(DibImageFile.format, _dib_save)
486
+
487
+ Image.register_extension(DibImageFile.format, ".dib")
488
+
489
+ Image.register_mime(DibImageFile.format, "image/bmp")
myenv/Lib/site-packages/PIL/BufrStubImagePlugin.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # BUFR stub adapter
6
+ #
7
+ # Copyright (c) 1996-2003 by Fredrik Lundh
8
+ #
9
+ # See the README file for information on usage and redistribution.
10
+ #
11
+ from __future__ import annotations
12
+
13
+ from typing import IO
14
+
15
+ from . import Image, ImageFile
16
+
17
+ _handler = None
18
+
19
+
20
+ def register_handler(handler: ImageFile.StubHandler | None) -> None:
21
+ """
22
+ Install application-specific BUFR image handler.
23
+
24
+ :param handler: Handler object.
25
+ """
26
+ global _handler
27
+ _handler = handler
28
+
29
+
30
+ # --------------------------------------------------------------------
31
+ # Image adapter
32
+
33
+
34
+ def _accept(prefix: bytes) -> bool:
35
+ return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC"
36
+
37
+
38
+ class BufrStubImageFile(ImageFile.StubImageFile):
39
+ format = "BUFR"
40
+ format_description = "BUFR"
41
+
42
+ def _open(self) -> None:
43
+ offset = self.fp.tell()
44
+
45
+ if not _accept(self.fp.read(4)):
46
+ msg = "Not a BUFR file"
47
+ raise SyntaxError(msg)
48
+
49
+ self.fp.seek(offset)
50
+
51
+ # make something up
52
+ self._mode = "F"
53
+ self._size = 1, 1
54
+
55
+ loader = self._load()
56
+ if loader:
57
+ loader.open(self)
58
+
59
+ def _load(self) -> ImageFile.StubHandler | None:
60
+ return _handler
61
+
62
+
63
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
64
+ if _handler is None or not hasattr(_handler, "save"):
65
+ msg = "BUFR save handler not installed"
66
+ raise OSError(msg)
67
+ _handler.save(im, fp, filename)
68
+
69
+
70
+ # --------------------------------------------------------------------
71
+ # Registry
72
+
73
+ Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
74
+ Image.register_save(BufrStubImageFile.format, _save)
75
+
76
+ Image.register_extension(BufrStubImageFile.format, ".bufr")
myenv/Lib/site-packages/PIL/ContainerIO.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # a class to read from a container file
6
+ #
7
+ # History:
8
+ # 1995-06-18 fl Created
9
+ # 1995-09-07 fl Added readline(), readlines()
10
+ #
11
+ # Copyright (c) 1997-2001 by Secret Labs AB
12
+ # Copyright (c) 1995 by Fredrik Lundh
13
+ #
14
+ # See the README file for information on usage and redistribution.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ import io
19
+ from typing import IO, AnyStr, Generic, Literal
20
+
21
+
22
+ class ContainerIO(Generic[AnyStr]):
23
+ """
24
+ A file object that provides read access to a part of an existing
25
+ file (for example a TAR file).
26
+ """
27
+
28
+ def __init__(self, file: IO[AnyStr], offset: int, length: int) -> None:
29
+ """
30
+ Create file object.
31
+
32
+ :param file: Existing file.
33
+ :param offset: Start of region, in bytes.
34
+ :param length: Size of region, in bytes.
35
+ """
36
+ self.fh: IO[AnyStr] = file
37
+ self.pos = 0
38
+ self.offset = offset
39
+ self.length = length
40
+ self.fh.seek(offset)
41
+
42
+ ##
43
+ # Always false.
44
+
45
+ def isatty(self) -> bool:
46
+ return False
47
+
48
+ def seek(self, offset: int, mode: Literal[0, 1, 2] = io.SEEK_SET) -> None:
49
+ """
50
+ Move file pointer.
51
+
52
+ :param offset: Offset in bytes.
53
+ :param mode: Starting position. Use 0 for beginning of region, 1
54
+ for current offset, and 2 for end of region. You cannot move
55
+ the pointer outside the defined region.
56
+ """
57
+ if mode == 1:
58
+ self.pos = self.pos + offset
59
+ elif mode == 2:
60
+ self.pos = self.length + offset
61
+ else:
62
+ self.pos = offset
63
+ # clamp
64
+ self.pos = max(0, min(self.pos, self.length))
65
+ self.fh.seek(self.offset + self.pos)
66
+
67
+ def tell(self) -> int:
68
+ """
69
+ Get current file pointer.
70
+
71
+ :returns: Offset from start of region, in bytes.
72
+ """
73
+ return self.pos
74
+
75
+ def read(self, n: int = 0) -> AnyStr:
76
+ """
77
+ Read data.
78
+
79
+ :param n: Number of bytes to read. If omitted or zero,
80
+ read until end of region.
81
+ :returns: An 8-bit string.
82
+ """
83
+ if n:
84
+ n = min(n, self.length - self.pos)
85
+ else:
86
+ n = self.length - self.pos
87
+ if not n: # EOF
88
+ return b"" if "b" in self.fh.mode else "" # type: ignore[return-value]
89
+ self.pos = self.pos + n
90
+ return self.fh.read(n)
91
+
92
+ def readline(self) -> AnyStr:
93
+ """
94
+ Read a line of text.
95
+
96
+ :returns: An 8-bit string.
97
+ """
98
+ s: AnyStr = b"" if "b" in self.fh.mode else "" # type: ignore[assignment]
99
+ newline_character = b"\n" if "b" in self.fh.mode else "\n"
100
+ while True:
101
+ c = self.read(1)
102
+ if not c:
103
+ break
104
+ s = s + c
105
+ if c == newline_character:
106
+ break
107
+ return s
108
+
109
+ def readlines(self) -> list[AnyStr]:
110
+ """
111
+ Read multiple lines of text.
112
+
113
+ :returns: A list of 8-bit strings.
114
+ """
115
+ lines = []
116
+ while True:
117
+ s = self.readline()
118
+ if not s:
119
+ break
120
+ lines.append(s)
121
+ return lines
myenv/Lib/site-packages/PIL/CurImagePlugin.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # Windows Cursor support for PIL
6
+ #
7
+ # notes:
8
+ # uses BmpImagePlugin.py to read the bitmap data.
9
+ #
10
+ # history:
11
+ # 96-05-27 fl Created
12
+ #
13
+ # Copyright (c) Secret Labs AB 1997.
14
+ # Copyright (c) Fredrik Lundh 1996.
15
+ #
16
+ # See the README file for information on usage and redistribution.
17
+ #
18
+ from __future__ import annotations
19
+
20
+ from . import BmpImagePlugin, Image
21
+ from ._binary import i16le as i16
22
+ from ._binary import i32le as i32
23
+
24
+ #
25
+ # --------------------------------------------------------------------
26
+
27
+
28
+ def _accept(prefix: bytes) -> bool:
29
+ return prefix[:4] == b"\0\0\2\0"
30
+
31
+
32
+ ##
33
+ # Image plugin for Windows Cursor files.
34
+
35
+
36
+ class CurImageFile(BmpImagePlugin.BmpImageFile):
37
+ format = "CUR"
38
+ format_description = "Windows Cursor"
39
+
40
+ def _open(self) -> None:
41
+ offset = self.fp.tell()
42
+
43
+ # check magic
44
+ s = self.fp.read(6)
45
+ if not _accept(s):
46
+ msg = "not a CUR file"
47
+ raise SyntaxError(msg)
48
+
49
+ # pick the largest cursor in the file
50
+ m = b""
51
+ for i in range(i16(s, 4)):
52
+ s = self.fp.read(16)
53
+ if not m:
54
+ m = s
55
+ elif s[0] > m[0] and s[1] > m[1]:
56
+ m = s
57
+ if not m:
58
+ msg = "No cursors were found"
59
+ raise TypeError(msg)
60
+
61
+ # load as bitmap
62
+ self._bitmap(i32(m, 12) + offset)
63
+
64
+ # patch up the bitmap height
65
+ self._size = self.size[0], self.size[1] // 2
66
+ d, e, o, a = self.tile[0]
67
+ self.tile[0] = d, (0, 0) + self.size, o, a
68
+
69
+
70
+ #
71
+ # --------------------------------------------------------------------
72
+
73
+ Image.register_open(CurImageFile.format, CurImageFile, _accept)
74
+
75
+ Image.register_extension(CurImageFile.format, ".cur")
myenv/Lib/site-packages/PIL/DcxImagePlugin.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # DCX file handling
6
+ #
7
+ # DCX is a container file format defined by Intel, commonly used
8
+ # for fax applications. Each DCX file consists of a directory
9
+ # (a list of file offsets) followed by a set of (usually 1-bit)
10
+ # PCX files.
11
+ #
12
+ # History:
13
+ # 1995-09-09 fl Created
14
+ # 1996-03-20 fl Properly derived from PcxImageFile.
15
+ # 1998-07-15 fl Renamed offset attribute to avoid name clash
16
+ # 2002-07-30 fl Fixed file handling
17
+ #
18
+ # Copyright (c) 1997-98 by Secret Labs AB.
19
+ # Copyright (c) 1995-96 by Fredrik Lundh.
20
+ #
21
+ # See the README file for information on usage and redistribution.
22
+ #
23
+ from __future__ import annotations
24
+
25
+ from . import Image
26
+ from ._binary import i32le as i32
27
+ from .PcxImagePlugin import PcxImageFile
28
+
29
+ MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
30
+
31
+
32
+ def _accept(prefix: bytes) -> bool:
33
+ return len(prefix) >= 4 and i32(prefix) == MAGIC
34
+
35
+
36
+ ##
37
+ # Image plugin for the Intel DCX format.
38
+
39
+
40
+ class DcxImageFile(PcxImageFile):
41
+ format = "DCX"
42
+ format_description = "Intel DCX"
43
+ _close_exclusive_fp_after_loading = False
44
+
45
+ def _open(self) -> None:
46
+ # Header
47
+ s = self.fp.read(4)
48
+ if not _accept(s):
49
+ msg = "not a DCX file"
50
+ raise SyntaxError(msg)
51
+
52
+ # Component directory
53
+ self._offset = []
54
+ for i in range(1024):
55
+ offset = i32(self.fp.read(4))
56
+ if not offset:
57
+ break
58
+ self._offset.append(offset)
59
+
60
+ self._fp = self.fp
61
+ self.frame = -1
62
+ self.n_frames = len(self._offset)
63
+ self.is_animated = self.n_frames > 1
64
+ self.seek(0)
65
+
66
+ def seek(self, frame: int) -> None:
67
+ if not self._seek_check(frame):
68
+ return
69
+ self.frame = frame
70
+ self.fp = self._fp
71
+ self.fp.seek(self._offset[frame])
72
+ PcxImageFile._open(self)
73
+
74
+ def tell(self) -> int:
75
+ return self.frame
76
+
77
+
78
+ Image.register_open(DcxImageFile.format, DcxImageFile, _accept)
79
+
80
+ Image.register_extension(DcxImageFile.format, ".dcx")
myenv/Lib/site-packages/PIL/DdsImagePlugin.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A Pillow loader for .dds files (S3TC-compressed aka DXTC)
3
+ Jerome Leclanche <jerome@leclan.ch>
4
+
5
+ Documentation:
6
+ https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt
7
+
8
+ The contents of this file are hereby released in the public domain (CC0)
9
+ Full text of the CC0 license:
10
+ https://creativecommons.org/publicdomain/zero/1.0/
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import io
16
+ import struct
17
+ import sys
18
+ from enum import IntEnum, IntFlag
19
+ from typing import IO
20
+
21
+ from . import Image, ImageFile, ImagePalette
22
+ from ._binary import i32le as i32
23
+ from ._binary import o8
24
+ from ._binary import o32le as o32
25
+
26
+ # Magic ("DDS ")
27
+ DDS_MAGIC = 0x20534444
28
+
29
+
30
+ # DDS flags
31
+ class DDSD(IntFlag):
32
+ CAPS = 0x1
33
+ HEIGHT = 0x2
34
+ WIDTH = 0x4
35
+ PITCH = 0x8
36
+ PIXELFORMAT = 0x1000
37
+ MIPMAPCOUNT = 0x20000
38
+ LINEARSIZE = 0x80000
39
+ DEPTH = 0x800000
40
+
41
+
42
+ # DDS caps
43
+ class DDSCAPS(IntFlag):
44
+ COMPLEX = 0x8
45
+ TEXTURE = 0x1000
46
+ MIPMAP = 0x400000
47
+
48
+
49
+ class DDSCAPS2(IntFlag):
50
+ CUBEMAP = 0x200
51
+ CUBEMAP_POSITIVEX = 0x400
52
+ CUBEMAP_NEGATIVEX = 0x800
53
+ CUBEMAP_POSITIVEY = 0x1000
54
+ CUBEMAP_NEGATIVEY = 0x2000
55
+ CUBEMAP_POSITIVEZ = 0x4000
56
+ CUBEMAP_NEGATIVEZ = 0x8000
57
+ VOLUME = 0x200000
58
+
59
+
60
+ # Pixel Format
61
+ class DDPF(IntFlag):
62
+ ALPHAPIXELS = 0x1
63
+ ALPHA = 0x2
64
+ FOURCC = 0x4
65
+ PALETTEINDEXED8 = 0x20
66
+ RGB = 0x40
67
+ LUMINANCE = 0x20000
68
+
69
+
70
+ # dxgiformat.h
71
+ class DXGI_FORMAT(IntEnum):
72
+ UNKNOWN = 0
73
+ R32G32B32A32_TYPELESS = 1
74
+ R32G32B32A32_FLOAT = 2
75
+ R32G32B32A32_UINT = 3
76
+ R32G32B32A32_SINT = 4
77
+ R32G32B32_TYPELESS = 5
78
+ R32G32B32_FLOAT = 6
79
+ R32G32B32_UINT = 7
80
+ R32G32B32_SINT = 8
81
+ R16G16B16A16_TYPELESS = 9
82
+ R16G16B16A16_FLOAT = 10
83
+ R16G16B16A16_UNORM = 11
84
+ R16G16B16A16_UINT = 12
85
+ R16G16B16A16_SNORM = 13
86
+ R16G16B16A16_SINT = 14
87
+ R32G32_TYPELESS = 15
88
+ R32G32_FLOAT = 16
89
+ R32G32_UINT = 17
90
+ R32G32_SINT = 18
91
+ R32G8X24_TYPELESS = 19
92
+ D32_FLOAT_S8X24_UINT = 20
93
+ R32_FLOAT_X8X24_TYPELESS = 21
94
+ X32_TYPELESS_G8X24_UINT = 22
95
+ R10G10B10A2_TYPELESS = 23
96
+ R10G10B10A2_UNORM = 24
97
+ R10G10B10A2_UINT = 25
98
+ R11G11B10_FLOAT = 26
99
+ R8G8B8A8_TYPELESS = 27
100
+ R8G8B8A8_UNORM = 28
101
+ R8G8B8A8_UNORM_SRGB = 29
102
+ R8G8B8A8_UINT = 30
103
+ R8G8B8A8_SNORM = 31
104
+ R8G8B8A8_SINT = 32
105
+ R16G16_TYPELESS = 33
106
+ R16G16_FLOAT = 34
107
+ R16G16_UNORM = 35
108
+ R16G16_UINT = 36
109
+ R16G16_SNORM = 37
110
+ R16G16_SINT = 38
111
+ R32_TYPELESS = 39
112
+ D32_FLOAT = 40
113
+ R32_FLOAT = 41
114
+ R32_UINT = 42
115
+ R32_SINT = 43
116
+ R24G8_TYPELESS = 44
117
+ D24_UNORM_S8_UINT = 45
118
+ R24_UNORM_X8_TYPELESS = 46
119
+ X24_TYPELESS_G8_UINT = 47
120
+ R8G8_TYPELESS = 48
121
+ R8G8_UNORM = 49
122
+ R8G8_UINT = 50
123
+ R8G8_SNORM = 51
124
+ R8G8_SINT = 52
125
+ R16_TYPELESS = 53
126
+ R16_FLOAT = 54
127
+ D16_UNORM = 55
128
+ R16_UNORM = 56
129
+ R16_UINT = 57
130
+ R16_SNORM = 58
131
+ R16_SINT = 59
132
+ R8_TYPELESS = 60
133
+ R8_UNORM = 61
134
+ R8_UINT = 62
135
+ R8_SNORM = 63
136
+ R8_SINT = 64
137
+ A8_UNORM = 65
138
+ R1_UNORM = 66
139
+ R9G9B9E5_SHAREDEXP = 67
140
+ R8G8_B8G8_UNORM = 68
141
+ G8R8_G8B8_UNORM = 69
142
+ BC1_TYPELESS = 70
143
+ BC1_UNORM = 71
144
+ BC1_UNORM_SRGB = 72
145
+ BC2_TYPELESS = 73
146
+ BC2_UNORM = 74
147
+ BC2_UNORM_SRGB = 75
148
+ BC3_TYPELESS = 76
149
+ BC3_UNORM = 77
150
+ BC3_UNORM_SRGB = 78
151
+ BC4_TYPELESS = 79
152
+ BC4_UNORM = 80
153
+ BC4_SNORM = 81
154
+ BC5_TYPELESS = 82
155
+ BC5_UNORM = 83
156
+ BC5_SNORM = 84
157
+ B5G6R5_UNORM = 85
158
+ B5G5R5A1_UNORM = 86
159
+ B8G8R8A8_UNORM = 87
160
+ B8G8R8X8_UNORM = 88
161
+ R10G10B10_XR_BIAS_A2_UNORM = 89
162
+ B8G8R8A8_TYPELESS = 90
163
+ B8G8R8A8_UNORM_SRGB = 91
164
+ B8G8R8X8_TYPELESS = 92
165
+ B8G8R8X8_UNORM_SRGB = 93
166
+ BC6H_TYPELESS = 94
167
+ BC6H_UF16 = 95
168
+ BC6H_SF16 = 96
169
+ BC7_TYPELESS = 97
170
+ BC7_UNORM = 98
171
+ BC7_UNORM_SRGB = 99
172
+ AYUV = 100
173
+ Y410 = 101
174
+ Y416 = 102
175
+ NV12 = 103
176
+ P010 = 104
177
+ P016 = 105
178
+ OPAQUE_420 = 106
179
+ YUY2 = 107
180
+ Y210 = 108
181
+ Y216 = 109
182
+ NV11 = 110
183
+ AI44 = 111
184
+ IA44 = 112
185
+ P8 = 113
186
+ A8P8 = 114
187
+ B4G4R4A4_UNORM = 115
188
+ P208 = 130
189
+ V208 = 131
190
+ V408 = 132
191
+ SAMPLER_FEEDBACK_MIN_MIP_OPAQUE = 189
192
+ SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE = 190
193
+
194
+
195
+ class D3DFMT(IntEnum):
196
+ UNKNOWN = 0
197
+ R8G8B8 = 20
198
+ A8R8G8B8 = 21
199
+ X8R8G8B8 = 22
200
+ R5G6B5 = 23
201
+ X1R5G5B5 = 24
202
+ A1R5G5B5 = 25
203
+ A4R4G4B4 = 26
204
+ R3G3B2 = 27
205
+ A8 = 28
206
+ A8R3G3B2 = 29
207
+ X4R4G4B4 = 30
208
+ A2B10G10R10 = 31
209
+ A8B8G8R8 = 32
210
+ X8B8G8R8 = 33
211
+ G16R16 = 34
212
+ A2R10G10B10 = 35
213
+ A16B16G16R16 = 36
214
+ A8P8 = 40
215
+ P8 = 41
216
+ L8 = 50
217
+ A8L8 = 51
218
+ A4L4 = 52
219
+ V8U8 = 60
220
+ L6V5U5 = 61
221
+ X8L8V8U8 = 62
222
+ Q8W8V8U8 = 63
223
+ V16U16 = 64
224
+ A2W10V10U10 = 67
225
+ D16_LOCKABLE = 70
226
+ D32 = 71
227
+ D15S1 = 73
228
+ D24S8 = 75
229
+ D24X8 = 77
230
+ D24X4S4 = 79
231
+ D16 = 80
232
+ D32F_LOCKABLE = 82
233
+ D24FS8 = 83
234
+ D32_LOCKABLE = 84
235
+ S8_LOCKABLE = 85
236
+ L16 = 81
237
+ VERTEXDATA = 100
238
+ INDEX16 = 101
239
+ INDEX32 = 102
240
+ Q16W16V16U16 = 110
241
+ R16F = 111
242
+ G16R16F = 112
243
+ A16B16G16R16F = 113
244
+ R32F = 114
245
+ G32R32F = 115
246
+ A32B32G32R32F = 116
247
+ CxV8U8 = 117
248
+ A1 = 118
249
+ A2B10G10R10_XR_BIAS = 119
250
+ BINARYBUFFER = 199
251
+
252
+ UYVY = i32(b"UYVY")
253
+ R8G8_B8G8 = i32(b"RGBG")
254
+ YUY2 = i32(b"YUY2")
255
+ G8R8_G8B8 = i32(b"GRGB")
256
+ DXT1 = i32(b"DXT1")
257
+ DXT2 = i32(b"DXT2")
258
+ DXT3 = i32(b"DXT3")
259
+ DXT4 = i32(b"DXT4")
260
+ DXT5 = i32(b"DXT5")
261
+ DX10 = i32(b"DX10")
262
+ BC4S = i32(b"BC4S")
263
+ BC4U = i32(b"BC4U")
264
+ BC5S = i32(b"BC5S")
265
+ BC5U = i32(b"BC5U")
266
+ ATI1 = i32(b"ATI1")
267
+ ATI2 = i32(b"ATI2")
268
+ MULTI2_ARGB8 = i32(b"MET1")
269
+
270
+
271
+ # Backward compatibility layer
272
+ module = sys.modules[__name__]
273
+ for item in DDSD:
274
+ assert item.name is not None
275
+ setattr(module, f"DDSD_{item.name}", item.value)
276
+ for item1 in DDSCAPS:
277
+ assert item1.name is not None
278
+ setattr(module, f"DDSCAPS_{item1.name}", item1.value)
279
+ for item2 in DDSCAPS2:
280
+ assert item2.name is not None
281
+ setattr(module, f"DDSCAPS2_{item2.name}", item2.value)
282
+ for item3 in DDPF:
283
+ assert item3.name is not None
284
+ setattr(module, f"DDPF_{item3.name}", item3.value)
285
+
286
+ DDS_FOURCC = DDPF.FOURCC
287
+ DDS_RGB = DDPF.RGB
288
+ DDS_RGBA = DDPF.RGB | DDPF.ALPHAPIXELS
289
+ DDS_LUMINANCE = DDPF.LUMINANCE
290
+ DDS_LUMINANCEA = DDPF.LUMINANCE | DDPF.ALPHAPIXELS
291
+ DDS_ALPHA = DDPF.ALPHA
292
+ DDS_PAL8 = DDPF.PALETTEINDEXED8
293
+
294
+ DDS_HEADER_FLAGS_TEXTURE = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PIXELFORMAT
295
+ DDS_HEADER_FLAGS_MIPMAP = DDSD.MIPMAPCOUNT
296
+ DDS_HEADER_FLAGS_VOLUME = DDSD.DEPTH
297
+ DDS_HEADER_FLAGS_PITCH = DDSD.PITCH
298
+ DDS_HEADER_FLAGS_LINEARSIZE = DDSD.LINEARSIZE
299
+
300
+ DDS_HEIGHT = DDSD.HEIGHT
301
+ DDS_WIDTH = DDSD.WIDTH
302
+
303
+ DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS.TEXTURE
304
+ DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS.COMPLEX | DDSCAPS.MIPMAP
305
+ DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS.COMPLEX
306
+
307
+ DDS_CUBEMAP_POSITIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEX
308
+ DDS_CUBEMAP_NEGATIVEX = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEX
309
+ DDS_CUBEMAP_POSITIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEY
310
+ DDS_CUBEMAP_NEGATIVEY = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEY
311
+ DDS_CUBEMAP_POSITIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_POSITIVEZ
312
+ DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2.CUBEMAP | DDSCAPS2.CUBEMAP_NEGATIVEZ
313
+
314
+ DXT1_FOURCC = D3DFMT.DXT1
315
+ DXT3_FOURCC = D3DFMT.DXT3
316
+ DXT5_FOURCC = D3DFMT.DXT5
317
+
318
+ DXGI_FORMAT_R8G8B8A8_TYPELESS = DXGI_FORMAT.R8G8B8A8_TYPELESS
319
+ DXGI_FORMAT_R8G8B8A8_UNORM = DXGI_FORMAT.R8G8B8A8_UNORM
320
+ DXGI_FORMAT_R8G8B8A8_UNORM_SRGB = DXGI_FORMAT.R8G8B8A8_UNORM_SRGB
321
+ DXGI_FORMAT_BC5_TYPELESS = DXGI_FORMAT.BC5_TYPELESS
322
+ DXGI_FORMAT_BC5_UNORM = DXGI_FORMAT.BC5_UNORM
323
+ DXGI_FORMAT_BC5_SNORM = DXGI_FORMAT.BC5_SNORM
324
+ DXGI_FORMAT_BC6H_UF16 = DXGI_FORMAT.BC6H_UF16
325
+ DXGI_FORMAT_BC6H_SF16 = DXGI_FORMAT.BC6H_SF16
326
+ DXGI_FORMAT_BC7_TYPELESS = DXGI_FORMAT.BC7_TYPELESS
327
+ DXGI_FORMAT_BC7_UNORM = DXGI_FORMAT.BC7_UNORM
328
+ DXGI_FORMAT_BC7_UNORM_SRGB = DXGI_FORMAT.BC7_UNORM_SRGB
329
+
330
+
331
+ class DdsImageFile(ImageFile.ImageFile):
332
+ format = "DDS"
333
+ format_description = "DirectDraw Surface"
334
+
335
+ def _open(self) -> None:
336
+ if not _accept(self.fp.read(4)):
337
+ msg = "not a DDS file"
338
+ raise SyntaxError(msg)
339
+ (header_size,) = struct.unpack("<I", self.fp.read(4))
340
+ if header_size != 124:
341
+ msg = f"Unsupported header size {repr(header_size)}"
342
+ raise OSError(msg)
343
+ header_bytes = self.fp.read(header_size - 4)
344
+ if len(header_bytes) != 120:
345
+ msg = f"Incomplete header: {len(header_bytes)} bytes"
346
+ raise OSError(msg)
347
+ header = io.BytesIO(header_bytes)
348
+
349
+ flags, height, width = struct.unpack("<3I", header.read(12))
350
+ self._size = (width, height)
351
+ extents = (0, 0) + self.size
352
+
353
+ pitch, depth, mipmaps = struct.unpack("<3I", header.read(12))
354
+ struct.unpack("<11I", header.read(44)) # reserved
355
+
356
+ # pixel format
357
+ pfsize, pfflags, fourcc, bitcount = struct.unpack("<4I", header.read(16))
358
+ n = 0
359
+ rawmode = None
360
+ if pfflags & DDPF.RGB:
361
+ # Texture contains uncompressed RGB data
362
+ if pfflags & DDPF.ALPHAPIXELS:
363
+ self._mode = "RGBA"
364
+ mask_count = 4
365
+ else:
366
+ self._mode = "RGB"
367
+ mask_count = 3
368
+
369
+ masks = struct.unpack(f"<{mask_count}I", header.read(mask_count * 4))
370
+ self.tile = [("dds_rgb", extents, 0, (bitcount, masks))]
371
+ return
372
+ elif pfflags & DDPF.LUMINANCE:
373
+ if bitcount == 8:
374
+ self._mode = "L"
375
+ elif bitcount == 16 and pfflags & DDPF.ALPHAPIXELS:
376
+ self._mode = "LA"
377
+ else:
378
+ msg = f"Unsupported bitcount {bitcount} for {pfflags}"
379
+ raise OSError(msg)
380
+ elif pfflags & DDPF.PALETTEINDEXED8:
381
+ self._mode = "P"
382
+ self.palette = ImagePalette.raw("RGBA", self.fp.read(1024))
383
+ self.palette.mode = "RGBA"
384
+ elif pfflags & DDPF.FOURCC:
385
+ offset = header_size + 4
386
+ if fourcc == D3DFMT.DXT1:
387
+ self._mode = "RGBA"
388
+ self.pixel_format = "DXT1"
389
+ n = 1
390
+ elif fourcc == D3DFMT.DXT3:
391
+ self._mode = "RGBA"
392
+ self.pixel_format = "DXT3"
393
+ n = 2
394
+ elif fourcc == D3DFMT.DXT5:
395
+ self._mode = "RGBA"
396
+ self.pixel_format = "DXT5"
397
+ n = 3
398
+ elif fourcc in (D3DFMT.BC4U, D3DFMT.ATI1):
399
+ self._mode = "L"
400
+ self.pixel_format = "BC4"
401
+ n = 4
402
+ elif fourcc == D3DFMT.BC5S:
403
+ self._mode = "RGB"
404
+ self.pixel_format = "BC5S"
405
+ n = 5
406
+ elif fourcc in (D3DFMT.BC5U, D3DFMT.ATI2):
407
+ self._mode = "RGB"
408
+ self.pixel_format = "BC5"
409
+ n = 5
410
+ elif fourcc == D3DFMT.DX10:
411
+ offset += 20
412
+ # ignoring flags which pertain to volume textures and cubemaps
413
+ (dxgi_format,) = struct.unpack("<I", self.fp.read(4))
414
+ self.fp.read(16)
415
+ if dxgi_format in (
416
+ DXGI_FORMAT.BC1_UNORM,
417
+ DXGI_FORMAT.BC1_TYPELESS,
418
+ ):
419
+ self._mode = "RGBA"
420
+ self.pixel_format = "BC1"
421
+ n = 1
422
+ elif dxgi_format in (DXGI_FORMAT.BC4_TYPELESS, DXGI_FORMAT.BC4_UNORM):
423
+ self._mode = "L"
424
+ self.pixel_format = "BC4"
425
+ n = 4
426
+ elif dxgi_format in (DXGI_FORMAT.BC5_TYPELESS, DXGI_FORMAT.BC5_UNORM):
427
+ self._mode = "RGB"
428
+ self.pixel_format = "BC5"
429
+ n = 5
430
+ elif dxgi_format == DXGI_FORMAT.BC5_SNORM:
431
+ self._mode = "RGB"
432
+ self.pixel_format = "BC5S"
433
+ n = 5
434
+ elif dxgi_format == DXGI_FORMAT.BC6H_UF16:
435
+ self._mode = "RGB"
436
+ self.pixel_format = "BC6H"
437
+ n = 6
438
+ elif dxgi_format == DXGI_FORMAT.BC6H_SF16:
439
+ self._mode = "RGB"
440
+ self.pixel_format = "BC6HS"
441
+ n = 6
442
+ elif dxgi_format in (
443
+ DXGI_FORMAT.BC7_TYPELESS,
444
+ DXGI_FORMAT.BC7_UNORM,
445
+ DXGI_FORMAT.BC7_UNORM_SRGB,
446
+ ):
447
+ self._mode = "RGBA"
448
+ self.pixel_format = "BC7"
449
+ n = 7
450
+ if dxgi_format == DXGI_FORMAT.BC7_UNORM_SRGB:
451
+ self.info["gamma"] = 1 / 2.2
452
+ elif dxgi_format in (
453
+ DXGI_FORMAT.R8G8B8A8_TYPELESS,
454
+ DXGI_FORMAT.R8G8B8A8_UNORM,
455
+ DXGI_FORMAT.R8G8B8A8_UNORM_SRGB,
456
+ ):
457
+ self._mode = "RGBA"
458
+ if dxgi_format == DXGI_FORMAT.R8G8B8A8_UNORM_SRGB:
459
+ self.info["gamma"] = 1 / 2.2
460
+ else:
461
+ msg = f"Unimplemented DXGI format {dxgi_format}"
462
+ raise NotImplementedError(msg)
463
+ else:
464
+ msg = f"Unimplemented pixel format {repr(fourcc)}"
465
+ raise NotImplementedError(msg)
466
+ else:
467
+ msg = f"Unknown pixel format flags {pfflags}"
468
+ raise NotImplementedError(msg)
469
+
470
+ if n:
471
+ self.tile = [
472
+ ImageFile._Tile("bcn", extents, offset, (n, self.pixel_format))
473
+ ]
474
+ else:
475
+ self.tile = [ImageFile._Tile("raw", extents, 0, rawmode or self.mode)]
476
+
477
+ def load_seek(self, pos: int) -> None:
478
+ pass
479
+
480
+
481
+ class DdsRgbDecoder(ImageFile.PyDecoder):
482
+ _pulls_fd = True
483
+
484
+ def decode(self, buffer: bytes) -> tuple[int, int]:
485
+ assert self.fd is not None
486
+ bitcount, masks = self.args
487
+
488
+ # Some masks will be padded with zeros, e.g. R 0b11 G 0b1100
489
+ # Calculate how many zeros each mask is padded with
490
+ mask_offsets = []
491
+ # And the maximum value of each channel without the padding
492
+ mask_totals = []
493
+ for mask in masks:
494
+ offset = 0
495
+ if mask != 0:
496
+ while mask >> (offset + 1) << (offset + 1) == mask:
497
+ offset += 1
498
+ mask_offsets.append(offset)
499
+ mask_totals.append(mask >> offset)
500
+
501
+ data = bytearray()
502
+ bytecount = bitcount // 8
503
+ dest_length = self.state.xsize * self.state.ysize * len(masks)
504
+ while len(data) < dest_length:
505
+ value = int.from_bytes(self.fd.read(bytecount), "little")
506
+ for i, mask in enumerate(masks):
507
+ masked_value = value & mask
508
+ # Remove the zero padding, and scale it to 8 bits
509
+ data += o8(
510
+ int(((masked_value >> mask_offsets[i]) / mask_totals[i]) * 255)
511
+ )
512
+ self.set_as_raw(data)
513
+ return -1, 0
514
+
515
+
516
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
517
+ if im.mode not in ("RGB", "RGBA", "L", "LA"):
518
+ msg = f"cannot write mode {im.mode} as DDS"
519
+ raise OSError(msg)
520
+
521
+ alpha = im.mode[-1] == "A"
522
+ if im.mode[0] == "L":
523
+ pixel_flags = DDPF.LUMINANCE
524
+ rawmode = im.mode
525
+ if alpha:
526
+ rgba_mask = [0x000000FF, 0x000000FF, 0x000000FF]
527
+ else:
528
+ rgba_mask = [0xFF000000, 0xFF000000, 0xFF000000]
529
+ else:
530
+ pixel_flags = DDPF.RGB
531
+ rawmode = im.mode[::-1]
532
+ rgba_mask = [0x00FF0000, 0x0000FF00, 0x000000FF]
533
+
534
+ if alpha:
535
+ r, g, b, a = im.split()
536
+ im = Image.merge("RGBA", (a, r, g, b))
537
+ if alpha:
538
+ pixel_flags |= DDPF.ALPHAPIXELS
539
+ rgba_mask.append(0xFF000000 if alpha else 0)
540
+
541
+ flags = DDSD.CAPS | DDSD.HEIGHT | DDSD.WIDTH | DDSD.PITCH | DDSD.PIXELFORMAT
542
+ bitcount = len(im.getbands()) * 8
543
+ pitch = (im.width * bitcount + 7) // 8
544
+
545
+ fp.write(
546
+ o32(DDS_MAGIC)
547
+ + struct.pack(
548
+ "<7I",
549
+ 124, # header size
550
+ flags, # flags
551
+ im.height,
552
+ im.width,
553
+ pitch,
554
+ 0, # depth
555
+ 0, # mipmaps
556
+ )
557
+ + struct.pack("11I", *((0,) * 11)) # reserved
558
+ # pfsize, pfflags, fourcc, bitcount
559
+ + struct.pack("<4I", 32, pixel_flags, 0, bitcount)
560
+ + struct.pack("<4I", *rgba_mask) # dwRGBABitMask
561
+ + struct.pack("<5I", DDSCAPS.TEXTURE, 0, 0, 0, 0)
562
+ )
563
+ ImageFile._save(
564
+ im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]
565
+ )
566
+
567
+
568
+ def _accept(prefix: bytes) -> bool:
569
+ return prefix[:4] == b"DDS "
570
+
571
+
572
+ Image.register_open(DdsImageFile.format, DdsImageFile, _accept)
573
+ Image.register_decoder("dds_rgb", DdsRgbDecoder)
574
+ Image.register_save(DdsImageFile.format, _save)
575
+ Image.register_extension(DdsImageFile.format, ".dds")
myenv/Lib/site-packages/PIL/EpsImagePlugin.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # EPS file handling
6
+ #
7
+ # History:
8
+ # 1995-09-01 fl Created (0.1)
9
+ # 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
10
+ # 1996-08-22 fl Don't choke on floating point BoundingBox values
11
+ # 1996-08-23 fl Handle files from Macintosh (0.3)
12
+ # 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
13
+ # 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
14
+ # 2014-05-07 e Handling of EPS with binary preview and fixed resolution
15
+ # resizing
16
+ #
17
+ # Copyright (c) 1997-2003 by Secret Labs AB.
18
+ # Copyright (c) 1995-2003 by Fredrik Lundh
19
+ #
20
+ # See the README file for information on usage and redistribution.
21
+ #
22
+ from __future__ import annotations
23
+
24
+ import io
25
+ import os
26
+ import re
27
+ import subprocess
28
+ import sys
29
+ import tempfile
30
+ from typing import IO
31
+
32
+ from . import Image, ImageFile
33
+ from ._binary import i32le as i32
34
+ from ._deprecate import deprecate
35
+
36
+ # --------------------------------------------------------------------
37
+
38
+
39
+ split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
40
+ field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
41
+
42
+ gs_binary: str | bool | None = None
43
+ gs_windows_binary = None
44
+
45
+
46
+ def has_ghostscript() -> bool:
47
+ global gs_binary, gs_windows_binary
48
+ if gs_binary is None:
49
+ if sys.platform.startswith("win"):
50
+ if gs_windows_binary is None:
51
+ import shutil
52
+
53
+ for binary in ("gswin32c", "gswin64c", "gs"):
54
+ if shutil.which(binary) is not None:
55
+ gs_windows_binary = binary
56
+ break
57
+ else:
58
+ gs_windows_binary = False
59
+ gs_binary = gs_windows_binary
60
+ else:
61
+ try:
62
+ subprocess.check_call(["gs", "--version"], stdout=subprocess.DEVNULL)
63
+ gs_binary = "gs"
64
+ except OSError:
65
+ gs_binary = False
66
+ return gs_binary is not False
67
+
68
+
69
+ def Ghostscript(tile, size, fp, scale=1, transparency=False):
70
+ """Render an image using Ghostscript"""
71
+ global gs_binary
72
+ if not has_ghostscript():
73
+ msg = "Unable to locate Ghostscript on paths"
74
+ raise OSError(msg)
75
+
76
+ # Unpack decoder tile
77
+ decoder, tile, offset, data = tile[0]
78
+ length, bbox = data
79
+
80
+ # Hack to support hi-res rendering
81
+ scale = int(scale) or 1
82
+ width = size[0] * scale
83
+ height = size[1] * scale
84
+ # resolution is dependent on bbox and size
85
+ res_x = 72.0 * width / (bbox[2] - bbox[0])
86
+ res_y = 72.0 * height / (bbox[3] - bbox[1])
87
+
88
+ out_fd, outfile = tempfile.mkstemp()
89
+ os.close(out_fd)
90
+
91
+ infile_temp = None
92
+ if hasattr(fp, "name") and os.path.exists(fp.name):
93
+ infile = fp.name
94
+ else:
95
+ in_fd, infile_temp = tempfile.mkstemp()
96
+ os.close(in_fd)
97
+ infile = infile_temp
98
+
99
+ # Ignore length and offset!
100
+ # Ghostscript can read it
101
+ # Copy whole file to read in Ghostscript
102
+ with open(infile_temp, "wb") as f:
103
+ # fetch length of fp
104
+ fp.seek(0, io.SEEK_END)
105
+ fsize = fp.tell()
106
+ # ensure start position
107
+ # go back
108
+ fp.seek(0)
109
+ lengthfile = fsize
110
+ while lengthfile > 0:
111
+ s = fp.read(min(lengthfile, 100 * 1024))
112
+ if not s:
113
+ break
114
+ lengthfile -= len(s)
115
+ f.write(s)
116
+
117
+ device = "pngalpha" if transparency else "ppmraw"
118
+
119
+ # Build Ghostscript command
120
+ command = [
121
+ gs_binary,
122
+ "-q", # quiet mode
123
+ f"-g{width:d}x{height:d}", # set output geometry (pixels)
124
+ f"-r{res_x:f}x{res_y:f}", # set input DPI (dots per inch)
125
+ "-dBATCH", # exit after processing
126
+ "-dNOPAUSE", # don't pause between pages
127
+ "-dSAFER", # safe mode
128
+ f"-sDEVICE={device}",
129
+ f"-sOutputFile={outfile}", # output file
130
+ # adjust for image origin
131
+ "-c",
132
+ f"{-bbox[0]} {-bbox[1]} translate",
133
+ "-f",
134
+ infile, # input file
135
+ # showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272)
136
+ "-c",
137
+ "showpage",
138
+ ]
139
+
140
+ # push data through Ghostscript
141
+ try:
142
+ startupinfo = None
143
+ if sys.platform.startswith("win"):
144
+ startupinfo = subprocess.STARTUPINFO()
145
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
146
+ subprocess.check_call(command, startupinfo=startupinfo)
147
+ out_im = Image.open(outfile)
148
+ out_im.load()
149
+ finally:
150
+ try:
151
+ os.unlink(outfile)
152
+ if infile_temp:
153
+ os.unlink(infile_temp)
154
+ except OSError:
155
+ pass
156
+
157
+ im = out_im.im.copy()
158
+ out_im.close()
159
+ return im
160
+
161
+
162
+ class PSFile:
163
+ """
164
+ Wrapper for bytesio object that treats either CR or LF as end of line.
165
+ This class is no longer used internally, but kept for backwards compatibility.
166
+ """
167
+
168
+ def __init__(self, fp):
169
+ deprecate(
170
+ "PSFile",
171
+ 11,
172
+ action="If you need the functionality of this class "
173
+ "you will need to implement it yourself.",
174
+ )
175
+ self.fp = fp
176
+ self.char = None
177
+
178
+ def seek(self, offset, whence=io.SEEK_SET):
179
+ self.char = None
180
+ self.fp.seek(offset, whence)
181
+
182
+ def readline(self) -> str:
183
+ s = [self.char or b""]
184
+ self.char = None
185
+
186
+ c = self.fp.read(1)
187
+ while (c not in b"\r\n") and len(c):
188
+ s.append(c)
189
+ c = self.fp.read(1)
190
+
191
+ self.char = self.fp.read(1)
192
+ # line endings can be 1 or 2 of \r \n, in either order
193
+ if self.char in b"\r\n":
194
+ self.char = None
195
+
196
+ return b"".join(s).decode("latin-1")
197
+
198
+
199
+ def _accept(prefix: bytes) -> bool:
200
+ return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5)
201
+
202
+
203
+ ##
204
+ # Image plugin for Encapsulated PostScript. This plugin supports only
205
+ # a few variants of this format.
206
+
207
+
208
+ class EpsImageFile(ImageFile.ImageFile):
209
+ """EPS File Parser for the Python Imaging Library"""
210
+
211
+ format = "EPS"
212
+ format_description = "Encapsulated Postscript"
213
+
214
+ mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"}
215
+
216
+ def _open(self) -> None:
217
+ (length, offset) = self._find_offset(self.fp)
218
+
219
+ # go to offset - start of "%!PS"
220
+ self.fp.seek(offset)
221
+
222
+ self._mode = "RGB"
223
+ self._size = None
224
+
225
+ byte_arr = bytearray(255)
226
+ bytes_mv = memoryview(byte_arr)
227
+ bytes_read = 0
228
+ reading_header_comments = True
229
+ reading_trailer_comments = False
230
+ trailer_reached = False
231
+
232
+ def check_required_header_comments() -> None:
233
+ """
234
+ The EPS specification requires that some headers exist.
235
+ This should be checked when the header comments formally end,
236
+ when image data starts, or when the file ends, whichever comes first.
237
+ """
238
+ if "PS-Adobe" not in self.info:
239
+ msg = 'EPS header missing "%!PS-Adobe" comment'
240
+ raise SyntaxError(msg)
241
+ if "BoundingBox" not in self.info:
242
+ msg = 'EPS header missing "%%BoundingBox" comment'
243
+ raise SyntaxError(msg)
244
+
245
+ def _read_comment(s: str) -> bool:
246
+ nonlocal reading_trailer_comments
247
+ try:
248
+ m = split.match(s)
249
+ except re.error as e:
250
+ msg = "not an EPS file"
251
+ raise SyntaxError(msg) from e
252
+
253
+ if not m:
254
+ return False
255
+
256
+ k, v = m.group(1, 2)
257
+ self.info[k] = v
258
+ if k == "BoundingBox":
259
+ if v == "(atend)":
260
+ reading_trailer_comments = True
261
+ elif not self._size or (trailer_reached and reading_trailer_comments):
262
+ try:
263
+ # Note: The DSC spec says that BoundingBox
264
+ # fields should be integers, but some drivers
265
+ # put floating point values there anyway.
266
+ box = [int(float(i)) for i in v.split()]
267
+ self._size = box[2] - box[0], box[3] - box[1]
268
+ self.tile = [("eps", (0, 0) + self.size, offset, (length, box))]
269
+ except Exception:
270
+ pass
271
+ return True
272
+
273
+ while True:
274
+ byte = self.fp.read(1)
275
+ if byte == b"":
276
+ # if we didn't read a byte we must be at the end of the file
277
+ if bytes_read == 0:
278
+ if reading_header_comments:
279
+ check_required_header_comments()
280
+ break
281
+ elif byte in b"\r\n":
282
+ # if we read a line ending character, ignore it and parse what
283
+ # we have already read. if we haven't read any other characters,
284
+ # continue reading
285
+ if bytes_read == 0:
286
+ continue
287
+ else:
288
+ # ASCII/hexadecimal lines in an EPS file must not exceed
289
+ # 255 characters, not including line ending characters
290
+ if bytes_read >= 255:
291
+ # only enforce this for lines starting with a "%",
292
+ # otherwise assume it's binary data
293
+ if byte_arr[0] == ord("%"):
294
+ msg = "not an EPS file"
295
+ raise SyntaxError(msg)
296
+ else:
297
+ if reading_header_comments:
298
+ check_required_header_comments()
299
+ reading_header_comments = False
300
+ # reset bytes_read so we can keep reading
301
+ # data until the end of the line
302
+ bytes_read = 0
303
+ byte_arr[bytes_read] = byte[0]
304
+ bytes_read += 1
305
+ continue
306
+
307
+ if reading_header_comments:
308
+ # Load EPS header
309
+
310
+ # if this line doesn't start with a "%",
311
+ # or does start with "%%EndComments",
312
+ # then we've reached the end of the header/comments
313
+ if byte_arr[0] != ord("%") or bytes_mv[:13] == b"%%EndComments":
314
+ check_required_header_comments()
315
+ reading_header_comments = False
316
+ continue
317
+
318
+ s = str(bytes_mv[:bytes_read], "latin-1")
319
+ if not _read_comment(s):
320
+ m = field.match(s)
321
+ if m:
322
+ k = m.group(1)
323
+ if k[:8] == "PS-Adobe":
324
+ self.info["PS-Adobe"] = k[9:]
325
+ else:
326
+ self.info[k] = ""
327
+ elif s[0] == "%":
328
+ # handle non-DSC PostScript comments that some
329
+ # tools mistakenly put in the Comments section
330
+ pass
331
+ else:
332
+ msg = "bad EPS header"
333
+ raise OSError(msg)
334
+ elif bytes_mv[:11] == b"%ImageData:":
335
+ # Check for an "ImageData" descriptor
336
+ # https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577413_pgfId-1035096
337
+
338
+ # Values:
339
+ # columns
340
+ # rows
341
+ # bit depth (1 or 8)
342
+ # mode (1: L, 2: LAB, 3: RGB, 4: CMYK)
343
+ # number of padding channels
344
+ # block size (number of bytes per row per channel)
345
+ # binary/ascii (1: binary, 2: ascii)
346
+ # data start identifier (the image data follows after a single line
347
+ # consisting only of this quoted value)
348
+ image_data_values = byte_arr[11:bytes_read].split(None, 7)
349
+ columns, rows, bit_depth, mode_id = (
350
+ int(value) for value in image_data_values[:4]
351
+ )
352
+
353
+ if bit_depth == 1:
354
+ self._mode = "1"
355
+ elif bit_depth == 8:
356
+ try:
357
+ self._mode = self.mode_map[mode_id]
358
+ except ValueError:
359
+ break
360
+ else:
361
+ break
362
+
363
+ self._size = columns, rows
364
+ return
365
+ elif bytes_mv[:5] == b"%%EOF":
366
+ break
367
+ elif trailer_reached and reading_trailer_comments:
368
+ # Load EPS trailer
369
+ s = str(bytes_mv[:bytes_read], "latin-1")
370
+ _read_comment(s)
371
+ elif bytes_mv[:9] == b"%%Trailer":
372
+ trailer_reached = True
373
+ bytes_read = 0
374
+
375
+ if not self._size:
376
+ msg = "cannot determine EPS bounding box"
377
+ raise OSError(msg)
378
+
379
+ def _find_offset(self, fp):
380
+ s = fp.read(4)
381
+
382
+ if s == b"%!PS":
383
+ # for HEAD without binary preview
384
+ fp.seek(0, io.SEEK_END)
385
+ length = fp.tell()
386
+ offset = 0
387
+ elif i32(s) == 0xC6D3D0C5:
388
+ # FIX for: Some EPS file not handled correctly / issue #302
389
+ # EPS can contain binary data
390
+ # or start directly with latin coding
391
+ # more info see:
392
+ # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
393
+ s = fp.read(8)
394
+ offset = i32(s)
395
+ length = i32(s, 4)
396
+ else:
397
+ msg = "not an EPS file"
398
+ raise SyntaxError(msg)
399
+
400
+ return length, offset
401
+
402
+ def load(self, scale=1, transparency=False):
403
+ # Load EPS via Ghostscript
404
+ if self.tile:
405
+ self.im = Ghostscript(self.tile, self.size, self.fp, scale, transparency)
406
+ self._mode = self.im.mode
407
+ self._size = self.im.size
408
+ self.tile = []
409
+ return Image.Image.load(self)
410
+
411
+ def load_seek(self, pos: int) -> None:
412
+ # we can't incrementally load, so force ImageFile.parser to
413
+ # use our custom load method by defining this method.
414
+ pass
415
+
416
+
417
+ # --------------------------------------------------------------------
418
+
419
+
420
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes, eps: int = 1) -> None:
421
+ """EPS Writer for the Python Imaging Library."""
422
+
423
+ # make sure image data is available
424
+ im.load()
425
+
426
+ # determine PostScript image mode
427
+ if im.mode == "L":
428
+ operator = (8, 1, b"image")
429
+ elif im.mode == "RGB":
430
+ operator = (8, 3, b"false 3 colorimage")
431
+ elif im.mode == "CMYK":
432
+ operator = (8, 4, b"false 4 colorimage")
433
+ else:
434
+ msg = "image mode is not supported"
435
+ raise ValueError(msg)
436
+
437
+ if eps:
438
+ # write EPS header
439
+ fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
440
+ fp.write(b"%%Creator: PIL 0.1 EpsEncode\n")
441
+ # fp.write("%%CreationDate: %s"...)
442
+ fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size)
443
+ fp.write(b"%%Pages: 1\n")
444
+ fp.write(b"%%EndComments\n")
445
+ fp.write(b"%%Page: 1 1\n")
446
+ fp.write(b"%%ImageData: %d %d " % im.size)
447
+ fp.write(b'%d %d 0 1 1 "%s"\n' % operator)
448
+
449
+ # image header
450
+ fp.write(b"gsave\n")
451
+ fp.write(b"10 dict begin\n")
452
+ fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1]))
453
+ fp.write(b"%d %d scale\n" % im.size)
454
+ fp.write(b"%d %d 8\n" % im.size) # <= bits
455
+ fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
456
+ fp.write(b"{ currentfile buf readhexstring pop } bind\n")
457
+ fp.write(operator[2] + b"\n")
458
+ if hasattr(fp, "flush"):
459
+ fp.flush()
460
+
461
+ ImageFile._save(im, fp, [("eps", (0, 0) + im.size, 0, None)])
462
+
463
+ fp.write(b"\n%%%%EndBinary\n")
464
+ fp.write(b"grestore end\n")
465
+ if hasattr(fp, "flush"):
466
+ fp.flush()
467
+
468
+
469
+ # --------------------------------------------------------------------
470
+
471
+
472
+ Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
473
+
474
+ Image.register_save(EpsImageFile.format, _save)
475
+
476
+ Image.register_extensions(EpsImageFile.format, [".ps", ".eps"])
477
+
478
+ Image.register_mime(EpsImageFile.format, "application/postscript")
myenv/Lib/site-packages/PIL/ExifTags.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # EXIF tags
6
+ #
7
+ # Copyright (c) 2003 by Secret Labs AB
8
+ #
9
+ # See the README file for information on usage and redistribution.
10
+ #
11
+
12
+ """
13
+ This module provides constants and clear-text names for various
14
+ well-known EXIF tags.
15
+ """
16
+ from __future__ import annotations
17
+
18
+ from enum import IntEnum
19
+
20
+
21
+ class Base(IntEnum):
22
+ # possibly incomplete
23
+ InteropIndex = 0x0001
24
+ ProcessingSoftware = 0x000B
25
+ NewSubfileType = 0x00FE
26
+ SubfileType = 0x00FF
27
+ ImageWidth = 0x0100
28
+ ImageLength = 0x0101
29
+ BitsPerSample = 0x0102
30
+ Compression = 0x0103
31
+ PhotometricInterpretation = 0x0106
32
+ Thresholding = 0x0107
33
+ CellWidth = 0x0108
34
+ CellLength = 0x0109
35
+ FillOrder = 0x010A
36
+ DocumentName = 0x010D
37
+ ImageDescription = 0x010E
38
+ Make = 0x010F
39
+ Model = 0x0110
40
+ StripOffsets = 0x0111
41
+ Orientation = 0x0112
42
+ SamplesPerPixel = 0x0115
43
+ RowsPerStrip = 0x0116
44
+ StripByteCounts = 0x0117
45
+ MinSampleValue = 0x0118
46
+ MaxSampleValue = 0x0119
47
+ XResolution = 0x011A
48
+ YResolution = 0x011B
49
+ PlanarConfiguration = 0x011C
50
+ PageName = 0x011D
51
+ FreeOffsets = 0x0120
52
+ FreeByteCounts = 0x0121
53
+ GrayResponseUnit = 0x0122
54
+ GrayResponseCurve = 0x0123
55
+ T4Options = 0x0124
56
+ T6Options = 0x0125
57
+ ResolutionUnit = 0x0128
58
+ PageNumber = 0x0129
59
+ TransferFunction = 0x012D
60
+ Software = 0x0131
61
+ DateTime = 0x0132
62
+ Artist = 0x013B
63
+ HostComputer = 0x013C
64
+ Predictor = 0x013D
65
+ WhitePoint = 0x013E
66
+ PrimaryChromaticities = 0x013F
67
+ ColorMap = 0x0140
68
+ HalftoneHints = 0x0141
69
+ TileWidth = 0x0142
70
+ TileLength = 0x0143
71
+ TileOffsets = 0x0144
72
+ TileByteCounts = 0x0145
73
+ SubIFDs = 0x014A
74
+ InkSet = 0x014C
75
+ InkNames = 0x014D
76
+ NumberOfInks = 0x014E
77
+ DotRange = 0x0150
78
+ TargetPrinter = 0x0151
79
+ ExtraSamples = 0x0152
80
+ SampleFormat = 0x0153
81
+ SMinSampleValue = 0x0154
82
+ SMaxSampleValue = 0x0155
83
+ TransferRange = 0x0156
84
+ ClipPath = 0x0157
85
+ XClipPathUnits = 0x0158
86
+ YClipPathUnits = 0x0159
87
+ Indexed = 0x015A
88
+ JPEGTables = 0x015B
89
+ OPIProxy = 0x015F
90
+ JPEGProc = 0x0200
91
+ JpegIFOffset = 0x0201
92
+ JpegIFByteCount = 0x0202
93
+ JpegRestartInterval = 0x0203
94
+ JpegLosslessPredictors = 0x0205
95
+ JpegPointTransforms = 0x0206
96
+ JpegQTables = 0x0207
97
+ JpegDCTables = 0x0208
98
+ JpegACTables = 0x0209
99
+ YCbCrCoefficients = 0x0211
100
+ YCbCrSubSampling = 0x0212
101
+ YCbCrPositioning = 0x0213
102
+ ReferenceBlackWhite = 0x0214
103
+ XMLPacket = 0x02BC
104
+ RelatedImageFileFormat = 0x1000
105
+ RelatedImageWidth = 0x1001
106
+ RelatedImageLength = 0x1002
107
+ Rating = 0x4746
108
+ RatingPercent = 0x4749
109
+ ImageID = 0x800D
110
+ CFARepeatPatternDim = 0x828D
111
+ BatteryLevel = 0x828F
112
+ Copyright = 0x8298
113
+ ExposureTime = 0x829A
114
+ FNumber = 0x829D
115
+ IPTCNAA = 0x83BB
116
+ ImageResources = 0x8649
117
+ ExifOffset = 0x8769
118
+ InterColorProfile = 0x8773
119
+ ExposureProgram = 0x8822
120
+ SpectralSensitivity = 0x8824
121
+ GPSInfo = 0x8825
122
+ ISOSpeedRatings = 0x8827
123
+ OECF = 0x8828
124
+ Interlace = 0x8829
125
+ TimeZoneOffset = 0x882A
126
+ SelfTimerMode = 0x882B
127
+ SensitivityType = 0x8830
128
+ StandardOutputSensitivity = 0x8831
129
+ RecommendedExposureIndex = 0x8832
130
+ ISOSpeed = 0x8833
131
+ ISOSpeedLatitudeyyy = 0x8834
132
+ ISOSpeedLatitudezzz = 0x8835
133
+ ExifVersion = 0x9000
134
+ DateTimeOriginal = 0x9003
135
+ DateTimeDigitized = 0x9004
136
+ OffsetTime = 0x9010
137
+ OffsetTimeOriginal = 0x9011
138
+ OffsetTimeDigitized = 0x9012
139
+ ComponentsConfiguration = 0x9101
140
+ CompressedBitsPerPixel = 0x9102
141
+ ShutterSpeedValue = 0x9201
142
+ ApertureValue = 0x9202
143
+ BrightnessValue = 0x9203
144
+ ExposureBiasValue = 0x9204
145
+ MaxApertureValue = 0x9205
146
+ SubjectDistance = 0x9206
147
+ MeteringMode = 0x9207
148
+ LightSource = 0x9208
149
+ Flash = 0x9209
150
+ FocalLength = 0x920A
151
+ Noise = 0x920D
152
+ ImageNumber = 0x9211
153
+ SecurityClassification = 0x9212
154
+ ImageHistory = 0x9213
155
+ TIFFEPStandardID = 0x9216
156
+ MakerNote = 0x927C
157
+ UserComment = 0x9286
158
+ SubsecTime = 0x9290
159
+ SubsecTimeOriginal = 0x9291
160
+ SubsecTimeDigitized = 0x9292
161
+ AmbientTemperature = 0x9400
162
+ Humidity = 0x9401
163
+ Pressure = 0x9402
164
+ WaterDepth = 0x9403
165
+ Acceleration = 0x9404
166
+ CameraElevationAngle = 0x9405
167
+ XPTitle = 0x9C9B
168
+ XPComment = 0x9C9C
169
+ XPAuthor = 0x9C9D
170
+ XPKeywords = 0x9C9E
171
+ XPSubject = 0x9C9F
172
+ FlashPixVersion = 0xA000
173
+ ColorSpace = 0xA001
174
+ ExifImageWidth = 0xA002
175
+ ExifImageHeight = 0xA003
176
+ RelatedSoundFile = 0xA004
177
+ ExifInteroperabilityOffset = 0xA005
178
+ FlashEnergy = 0xA20B
179
+ SpatialFrequencyResponse = 0xA20C
180
+ FocalPlaneXResolution = 0xA20E
181
+ FocalPlaneYResolution = 0xA20F
182
+ FocalPlaneResolutionUnit = 0xA210
183
+ SubjectLocation = 0xA214
184
+ ExposureIndex = 0xA215
185
+ SensingMethod = 0xA217
186
+ FileSource = 0xA300
187
+ SceneType = 0xA301
188
+ CFAPattern = 0xA302
189
+ CustomRendered = 0xA401
190
+ ExposureMode = 0xA402
191
+ WhiteBalance = 0xA403
192
+ DigitalZoomRatio = 0xA404
193
+ FocalLengthIn35mmFilm = 0xA405
194
+ SceneCaptureType = 0xA406
195
+ GainControl = 0xA407
196
+ Contrast = 0xA408
197
+ Saturation = 0xA409
198
+ Sharpness = 0xA40A
199
+ DeviceSettingDescription = 0xA40B
200
+ SubjectDistanceRange = 0xA40C
201
+ ImageUniqueID = 0xA420
202
+ CameraOwnerName = 0xA430
203
+ BodySerialNumber = 0xA431
204
+ LensSpecification = 0xA432
205
+ LensMake = 0xA433
206
+ LensModel = 0xA434
207
+ LensSerialNumber = 0xA435
208
+ CompositeImage = 0xA460
209
+ CompositeImageCount = 0xA461
210
+ CompositeImageExposureTimes = 0xA462
211
+ Gamma = 0xA500
212
+ PrintImageMatching = 0xC4A5
213
+ DNGVersion = 0xC612
214
+ DNGBackwardVersion = 0xC613
215
+ UniqueCameraModel = 0xC614
216
+ LocalizedCameraModel = 0xC615
217
+ CFAPlaneColor = 0xC616
218
+ CFALayout = 0xC617
219
+ LinearizationTable = 0xC618
220
+ BlackLevelRepeatDim = 0xC619
221
+ BlackLevel = 0xC61A
222
+ BlackLevelDeltaH = 0xC61B
223
+ BlackLevelDeltaV = 0xC61C
224
+ WhiteLevel = 0xC61D
225
+ DefaultScale = 0xC61E
226
+ DefaultCropOrigin = 0xC61F
227
+ DefaultCropSize = 0xC620
228
+ ColorMatrix1 = 0xC621
229
+ ColorMatrix2 = 0xC622
230
+ CameraCalibration1 = 0xC623
231
+ CameraCalibration2 = 0xC624
232
+ ReductionMatrix1 = 0xC625
233
+ ReductionMatrix2 = 0xC626
234
+ AnalogBalance = 0xC627
235
+ AsShotNeutral = 0xC628
236
+ AsShotWhiteXY = 0xC629
237
+ BaselineExposure = 0xC62A
238
+ BaselineNoise = 0xC62B
239
+ BaselineSharpness = 0xC62C
240
+ BayerGreenSplit = 0xC62D
241
+ LinearResponseLimit = 0xC62E
242
+ CameraSerialNumber = 0xC62F
243
+ LensInfo = 0xC630
244
+ ChromaBlurRadius = 0xC631
245
+ AntiAliasStrength = 0xC632
246
+ ShadowScale = 0xC633
247
+ DNGPrivateData = 0xC634
248
+ MakerNoteSafety = 0xC635
249
+ CalibrationIlluminant1 = 0xC65A
250
+ CalibrationIlluminant2 = 0xC65B
251
+ BestQualityScale = 0xC65C
252
+ RawDataUniqueID = 0xC65D
253
+ OriginalRawFileName = 0xC68B
254
+ OriginalRawFileData = 0xC68C
255
+ ActiveArea = 0xC68D
256
+ MaskedAreas = 0xC68E
257
+ AsShotICCProfile = 0xC68F
258
+ AsShotPreProfileMatrix = 0xC690
259
+ CurrentICCProfile = 0xC691
260
+ CurrentPreProfileMatrix = 0xC692
261
+ ColorimetricReference = 0xC6BF
262
+ CameraCalibrationSignature = 0xC6F3
263
+ ProfileCalibrationSignature = 0xC6F4
264
+ AsShotProfileName = 0xC6F6
265
+ NoiseReductionApplied = 0xC6F7
266
+ ProfileName = 0xC6F8
267
+ ProfileHueSatMapDims = 0xC6F9
268
+ ProfileHueSatMapData1 = 0xC6FA
269
+ ProfileHueSatMapData2 = 0xC6FB
270
+ ProfileToneCurve = 0xC6FC
271
+ ProfileEmbedPolicy = 0xC6FD
272
+ ProfileCopyright = 0xC6FE
273
+ ForwardMatrix1 = 0xC714
274
+ ForwardMatrix2 = 0xC715
275
+ PreviewApplicationName = 0xC716
276
+ PreviewApplicationVersion = 0xC717
277
+ PreviewSettingsName = 0xC718
278
+ PreviewSettingsDigest = 0xC719
279
+ PreviewColorSpace = 0xC71A
280
+ PreviewDateTime = 0xC71B
281
+ RawImageDigest = 0xC71C
282
+ OriginalRawFileDigest = 0xC71D
283
+ SubTileBlockSize = 0xC71E
284
+ RowInterleaveFactor = 0xC71F
285
+ ProfileLookTableDims = 0xC725
286
+ ProfileLookTableData = 0xC726
287
+ OpcodeList1 = 0xC740
288
+ OpcodeList2 = 0xC741
289
+ OpcodeList3 = 0xC74E
290
+ NoiseProfile = 0xC761
291
+
292
+
293
+ """Maps EXIF tags to tag names."""
294
+ TAGS = {
295
+ **{i.value: i.name for i in Base},
296
+ 0x920C: "SpatialFrequencyResponse",
297
+ 0x9214: "SubjectLocation",
298
+ 0x9215: "ExposureIndex",
299
+ 0x828E: "CFAPattern",
300
+ 0x920B: "FlashEnergy",
301
+ 0x9216: "TIFF/EPStandardID",
302
+ }
303
+
304
+
305
+ class GPS(IntEnum):
306
+ GPSVersionID = 0
307
+ GPSLatitudeRef = 1
308
+ GPSLatitude = 2
309
+ GPSLongitudeRef = 3
310
+ GPSLongitude = 4
311
+ GPSAltitudeRef = 5
312
+ GPSAltitude = 6
313
+ GPSTimeStamp = 7
314
+ GPSSatellites = 8
315
+ GPSStatus = 9
316
+ GPSMeasureMode = 10
317
+ GPSDOP = 11
318
+ GPSSpeedRef = 12
319
+ GPSSpeed = 13
320
+ GPSTrackRef = 14
321
+ GPSTrack = 15
322
+ GPSImgDirectionRef = 16
323
+ GPSImgDirection = 17
324
+ GPSMapDatum = 18
325
+ GPSDestLatitudeRef = 19
326
+ GPSDestLatitude = 20
327
+ GPSDestLongitudeRef = 21
328
+ GPSDestLongitude = 22
329
+ GPSDestBearingRef = 23
330
+ GPSDestBearing = 24
331
+ GPSDestDistanceRef = 25
332
+ GPSDestDistance = 26
333
+ GPSProcessingMethod = 27
334
+ GPSAreaInformation = 28
335
+ GPSDateStamp = 29
336
+ GPSDifferential = 30
337
+ GPSHPositioningError = 31
338
+
339
+
340
+ """Maps EXIF GPS tags to tag names."""
341
+ GPSTAGS = {i.value: i.name for i in GPS}
342
+
343
+
344
+ class Interop(IntEnum):
345
+ InteropIndex = 1
346
+ InteropVersion = 2
347
+ RelatedImageFileFormat = 4096
348
+ RelatedImageWidth = 4097
349
+ RelatedImageHeight = 4098
350
+
351
+
352
+ class IFD(IntEnum):
353
+ Exif = 34665
354
+ GPSInfo = 34853
355
+ Makernote = 37500
356
+ Interop = 40965
357
+ IFD1 = -1
358
+
359
+
360
+ class LightSource(IntEnum):
361
+ Unknown = 0
362
+ Daylight = 1
363
+ Fluorescent = 2
364
+ Tungsten = 3
365
+ Flash = 4
366
+ Fine = 9
367
+ Cloudy = 10
368
+ Shade = 11
369
+ DaylightFluorescent = 12
370
+ DayWhiteFluorescent = 13
371
+ CoolWhiteFluorescent = 14
372
+ WhiteFluorescent = 15
373
+ StandardLightA = 17
374
+ StandardLightB = 18
375
+ StandardLightC = 19
376
+ D55 = 20
377
+ D65 = 21
378
+ D75 = 22
379
+ D50 = 23
380
+ ISO = 24
381
+ Other = 255
myenv/Lib/site-packages/PIL/FitsImagePlugin.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # FITS file handling
6
+ #
7
+ # Copyright (c) 1998-2003 by Fredrik Lundh
8
+ #
9
+ # See the README file for information on usage and redistribution.
10
+ #
11
+ from __future__ import annotations
12
+
13
+ import gzip
14
+ import math
15
+
16
+ from . import Image, ImageFile
17
+
18
+
19
+ def _accept(prefix: bytes) -> bool:
20
+ return prefix[:6] == b"SIMPLE"
21
+
22
+
23
+ class FitsImageFile(ImageFile.ImageFile):
24
+ format = "FITS"
25
+ format_description = "FITS"
26
+
27
+ def _open(self) -> None:
28
+ assert self.fp is not None
29
+
30
+ headers: dict[bytes, bytes] = {}
31
+ header_in_progress = False
32
+ decoder_name = ""
33
+ while True:
34
+ header = self.fp.read(80)
35
+ if not header:
36
+ msg = "Truncated FITS file"
37
+ raise OSError(msg)
38
+ keyword = header[:8].strip()
39
+ if keyword in (b"SIMPLE", b"XTENSION"):
40
+ header_in_progress = True
41
+ elif headers and not header_in_progress:
42
+ # This is now a data unit
43
+ break
44
+ elif keyword == b"END":
45
+ # Seek to the end of the header unit
46
+ self.fp.seek(math.ceil(self.fp.tell() / 2880) * 2880)
47
+ if not decoder_name:
48
+ decoder_name, offset, args = self._parse_headers(headers)
49
+
50
+ header_in_progress = False
51
+ continue
52
+
53
+ if decoder_name:
54
+ # Keep going to read past the headers
55
+ continue
56
+
57
+ value = header[8:].split(b"/")[0].strip()
58
+ if value.startswith(b"="):
59
+ value = value[1:].strip()
60
+ if not headers and (not _accept(keyword) or value != b"T"):
61
+ msg = "Not a FITS file"
62
+ raise SyntaxError(msg)
63
+ headers[keyword] = value
64
+
65
+ if not decoder_name:
66
+ msg = "No image data"
67
+ raise ValueError(msg)
68
+
69
+ offset += self.fp.tell() - 80
70
+ self.tile = [(decoder_name, (0, 0) + self.size, offset, args)]
71
+
72
+ def _get_size(
73
+ self, headers: dict[bytes, bytes], prefix: bytes
74
+ ) -> tuple[int, int] | None:
75
+ naxis = int(headers[prefix + b"NAXIS"])
76
+ if naxis == 0:
77
+ return None
78
+
79
+ if naxis == 1:
80
+ return 1, int(headers[prefix + b"NAXIS1"])
81
+ else:
82
+ return int(headers[prefix + b"NAXIS1"]), int(headers[prefix + b"NAXIS2"])
83
+
84
+ def _parse_headers(
85
+ self, headers: dict[bytes, bytes]
86
+ ) -> tuple[str, int, tuple[str | int, ...]]:
87
+ prefix = b""
88
+ decoder_name = "raw"
89
+ offset = 0
90
+ if (
91
+ headers.get(b"XTENSION") == b"'BINTABLE'"
92
+ and headers.get(b"ZIMAGE") == b"T"
93
+ and headers[b"ZCMPTYPE"] == b"'GZIP_1 '"
94
+ ):
95
+ no_prefix_size = self._get_size(headers, prefix) or (0, 0)
96
+ number_of_bits = int(headers[b"BITPIX"])
97
+ offset = no_prefix_size[0] * no_prefix_size[1] * (number_of_bits // 8)
98
+
99
+ prefix = b"Z"
100
+ decoder_name = "fits_gzip"
101
+
102
+ size = self._get_size(headers, prefix)
103
+ if not size:
104
+ return "", 0, ()
105
+
106
+ self._size = size
107
+
108
+ number_of_bits = int(headers[prefix + b"BITPIX"])
109
+ if number_of_bits == 8:
110
+ self._mode = "L"
111
+ elif number_of_bits == 16:
112
+ self._mode = "I;16"
113
+ elif number_of_bits == 32:
114
+ self._mode = "I"
115
+ elif number_of_bits in (-32, -64):
116
+ self._mode = "F"
117
+
118
+ args: tuple[str | int, ...]
119
+ if decoder_name == "raw":
120
+ args = (self.mode, 0, -1)
121
+ else:
122
+ args = (number_of_bits,)
123
+ return decoder_name, offset, args
124
+
125
+
126
+ class FitsGzipDecoder(ImageFile.PyDecoder):
127
+ _pulls_fd = True
128
+
129
+ def decode(self, buffer: bytes) -> tuple[int, int]:
130
+ assert self.fd is not None
131
+ value = gzip.decompress(self.fd.read())
132
+
133
+ rows = []
134
+ offset = 0
135
+ number_of_bits = min(self.args[0] // 8, 4)
136
+ for y in range(self.state.ysize):
137
+ row = bytearray()
138
+ for x in range(self.state.xsize):
139
+ row += value[offset + (4 - number_of_bits) : offset + 4]
140
+ offset += 4
141
+ rows.append(row)
142
+ self.set_as_raw(bytes([pixel for row in rows[::-1] for pixel in row]))
143
+ return -1, 0
144
+
145
+
146
+ # --------------------------------------------------------------------
147
+ # Registry
148
+
149
+ Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
150
+ Image.register_decoder("fits_gzip", FitsGzipDecoder)
151
+
152
+ Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])
myenv/Lib/site-packages/PIL/FliImagePlugin.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # FLI/FLC file handling.
6
+ #
7
+ # History:
8
+ # 95-09-01 fl Created
9
+ # 97-01-03 fl Fixed parser, setup decoder tile
10
+ # 98-07-15 fl Renamed offset attribute to avoid name clash
11
+ #
12
+ # Copyright (c) Secret Labs AB 1997-98.
13
+ # Copyright (c) Fredrik Lundh 1995-97.
14
+ #
15
+ # See the README file for information on usage and redistribution.
16
+ #
17
+ from __future__ import annotations
18
+
19
+ import os
20
+
21
+ from . import Image, ImageFile, ImagePalette
22
+ from ._binary import i16le as i16
23
+ from ._binary import i32le as i32
24
+ from ._binary import o8
25
+
26
+ #
27
+ # decoder
28
+
29
+
30
+ def _accept(prefix: bytes) -> bool:
31
+ return (
32
+ len(prefix) >= 6
33
+ and i16(prefix, 4) in [0xAF11, 0xAF12]
34
+ and i16(prefix, 14) in [0, 3] # flags
35
+ )
36
+
37
+
38
+ ##
39
+ # Image plugin for the FLI/FLC animation format. Use the <b>seek</b>
40
+ # method to load individual frames.
41
+
42
+
43
+ class FliImageFile(ImageFile.ImageFile):
44
+ format = "FLI"
45
+ format_description = "Autodesk FLI/FLC Animation"
46
+ _close_exclusive_fp_after_loading = False
47
+
48
+ def _open(self):
49
+ # HEAD
50
+ s = self.fp.read(128)
51
+ if not (_accept(s) and s[20:22] == b"\x00\x00"):
52
+ msg = "not an FLI/FLC file"
53
+ raise SyntaxError(msg)
54
+
55
+ # frames
56
+ self.n_frames = i16(s, 6)
57
+ self.is_animated = self.n_frames > 1
58
+
59
+ # image characteristics
60
+ self._mode = "P"
61
+ self._size = i16(s, 8), i16(s, 10)
62
+
63
+ # animation speed
64
+ duration = i32(s, 16)
65
+ magic = i16(s, 4)
66
+ if magic == 0xAF11:
67
+ duration = (duration * 1000) // 70
68
+ self.info["duration"] = duration
69
+
70
+ # look for palette
71
+ palette = [(a, a, a) for a in range(256)]
72
+
73
+ s = self.fp.read(16)
74
+
75
+ self.__offset = 128
76
+
77
+ if i16(s, 4) == 0xF100:
78
+ # prefix chunk; ignore it
79
+ self.__offset = self.__offset + i32(s)
80
+ self.fp.seek(self.__offset)
81
+ s = self.fp.read(16)
82
+
83
+ if i16(s, 4) == 0xF1FA:
84
+ # look for palette chunk
85
+ number_of_subchunks = i16(s, 6)
86
+ chunk_size = None
87
+ for _ in range(number_of_subchunks):
88
+ if chunk_size is not None:
89
+ self.fp.seek(chunk_size - 6, os.SEEK_CUR)
90
+ s = self.fp.read(6)
91
+ chunk_type = i16(s, 4)
92
+ if chunk_type in (4, 11):
93
+ self._palette(palette, 2 if chunk_type == 11 else 0)
94
+ break
95
+ chunk_size = i32(s)
96
+ if not chunk_size:
97
+ break
98
+
99
+ palette = [o8(r) + o8(g) + o8(b) for (r, g, b) in palette]
100
+ self.palette = ImagePalette.raw("RGB", b"".join(palette))
101
+
102
+ # set things up to decode first frame
103
+ self.__frame = -1
104
+ self._fp = self.fp
105
+ self.__rewind = self.fp.tell()
106
+ self.seek(0)
107
+
108
+ def _palette(self, palette, shift):
109
+ # load palette
110
+
111
+ i = 0
112
+ for e in range(i16(self.fp.read(2))):
113
+ s = self.fp.read(2)
114
+ i = i + s[0]
115
+ n = s[1]
116
+ if n == 0:
117
+ n = 256
118
+ s = self.fp.read(n * 3)
119
+ for n in range(0, len(s), 3):
120
+ r = s[n] << shift
121
+ g = s[n + 1] << shift
122
+ b = s[n + 2] << shift
123
+ palette[i] = (r, g, b)
124
+ i += 1
125
+
126
+ def seek(self, frame: int) -> None:
127
+ if not self._seek_check(frame):
128
+ return
129
+ if frame < self.__frame:
130
+ self._seek(0)
131
+
132
+ for f in range(self.__frame + 1, frame + 1):
133
+ self._seek(f)
134
+
135
+ def _seek(self, frame: int) -> None:
136
+ if frame == 0:
137
+ self.__frame = -1
138
+ self._fp.seek(self.__rewind)
139
+ self.__offset = 128
140
+ else:
141
+ # ensure that the previous frame was loaded
142
+ self.load()
143
+
144
+ if frame != self.__frame + 1:
145
+ msg = f"cannot seek to frame {frame}"
146
+ raise ValueError(msg)
147
+ self.__frame = frame
148
+
149
+ # move to next frame
150
+ self.fp = self._fp
151
+ self.fp.seek(self.__offset)
152
+
153
+ s = self.fp.read(4)
154
+ if not s:
155
+ msg = "missing frame size"
156
+ raise EOFError(msg)
157
+
158
+ framesize = i32(s)
159
+
160
+ self.decodermaxblock = framesize
161
+ self.tile = [("fli", (0, 0) + self.size, self.__offset, None)]
162
+
163
+ self.__offset += framesize
164
+
165
+ def tell(self) -> int:
166
+ return self.__frame
167
+
168
+
169
+ #
170
+ # registry
171
+
172
+ Image.register_open(FliImageFile.format, FliImageFile, _accept)
173
+
174
+ Image.register_extensions(FliImageFile.format, [".fli", ".flc"])
myenv/Lib/site-packages/PIL/FontFile.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # base class for raster font file parsers
6
+ #
7
+ # history:
8
+ # 1997-06-05 fl created
9
+ # 1997-08-19 fl restrict image width
10
+ #
11
+ # Copyright (c) 1997-1998 by Secret Labs AB
12
+ # Copyright (c) 1997-1998 by Fredrik Lundh
13
+ #
14
+ # See the README file for information on usage and redistribution.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ import os
19
+ from typing import BinaryIO
20
+
21
+ from . import Image, _binary
22
+
23
+ WIDTH = 800
24
+
25
+
26
+ def puti16(
27
+ fp: BinaryIO, values: tuple[int, int, int, int, int, int, int, int, int, int]
28
+ ) -> None:
29
+ """Write network order (big-endian) 16-bit sequence"""
30
+ for v in values:
31
+ if v < 0:
32
+ v += 65536
33
+ fp.write(_binary.o16be(v))
34
+
35
+
36
+ class FontFile:
37
+ """Base class for raster font file handlers."""
38
+
39
+ bitmap: Image.Image | None = None
40
+
41
+ def __init__(self) -> None:
42
+ self.info: dict[bytes, bytes | int] = {}
43
+ self.glyph: list[
44
+ tuple[
45
+ tuple[int, int],
46
+ tuple[int, int, int, int],
47
+ tuple[int, int, int, int],
48
+ Image.Image,
49
+ ]
50
+ | None
51
+ ] = [None] * 256
52
+
53
+ def __getitem__(self, ix: int) -> (
54
+ tuple[
55
+ tuple[int, int],
56
+ tuple[int, int, int, int],
57
+ tuple[int, int, int, int],
58
+ Image.Image,
59
+ ]
60
+ | None
61
+ ):
62
+ return self.glyph[ix]
63
+
64
+ def compile(self) -> None:
65
+ """Create metrics and bitmap"""
66
+
67
+ if self.bitmap:
68
+ return
69
+
70
+ # create bitmap large enough to hold all data
71
+ h = w = maxwidth = 0
72
+ lines = 1
73
+ for glyph in self.glyph:
74
+ if glyph:
75
+ d, dst, src, im = glyph
76
+ h = max(h, src[3] - src[1])
77
+ w = w + (src[2] - src[0])
78
+ if w > WIDTH:
79
+ lines += 1
80
+ w = src[2] - src[0]
81
+ maxwidth = max(maxwidth, w)
82
+
83
+ xsize = maxwidth
84
+ ysize = lines * h
85
+
86
+ if xsize == 0 and ysize == 0:
87
+ return
88
+
89
+ self.ysize = h
90
+
91
+ # paste glyphs into bitmap
92
+ self.bitmap = Image.new("1", (xsize, ysize))
93
+ self.metrics: list[
94
+ tuple[tuple[int, int], tuple[int, int, int, int], tuple[int, int, int, int]]
95
+ | None
96
+ ] = [None] * 256
97
+ x = y = 0
98
+ for i in range(256):
99
+ glyph = self[i]
100
+ if glyph:
101
+ d, dst, src, im = glyph
102
+ xx = src[2] - src[0]
103
+ x0, y0 = x, y
104
+ x = x + xx
105
+ if x > WIDTH:
106
+ x, y = 0, y + h
107
+ x0, y0 = x, y
108
+ x = xx
109
+ s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
110
+ self.bitmap.paste(im.crop(src), s)
111
+ self.metrics[i] = d, dst, s
112
+
113
+ def save(self, filename: str) -> None:
114
+ """Save font"""
115
+
116
+ self.compile()
117
+
118
+ # font data
119
+ if not self.bitmap:
120
+ msg = "No bitmap created"
121
+ raise ValueError(msg)
122
+ self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
123
+
124
+ # font metrics
125
+ with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp:
126
+ fp.write(b"PILfont\n")
127
+ fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!!
128
+ fp.write(b"DATA\n")
129
+ for id in range(256):
130
+ m = self.metrics[id]
131
+ if not m:
132
+ puti16(fp, (0,) * 10)
133
+ else:
134
+ puti16(fp, m[0] + m[1] + m[2])
myenv/Lib/site-packages/PIL/FpxImagePlugin.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # THIS IS WORK IN PROGRESS
3
+ #
4
+ # The Python Imaging Library.
5
+ # $Id$
6
+ #
7
+ # FlashPix support for PIL
8
+ #
9
+ # History:
10
+ # 97-01-25 fl Created (reads uncompressed RGB images only)
11
+ #
12
+ # Copyright (c) Secret Labs AB 1997.
13
+ # Copyright (c) Fredrik Lundh 1997.
14
+ #
15
+ # See the README file for information on usage and redistribution.
16
+ #
17
+ from __future__ import annotations
18
+
19
+ import olefile
20
+
21
+ from . import Image, ImageFile
22
+ from ._binary import i32le as i32
23
+
24
+ # we map from colour field tuples to (mode, rawmode) descriptors
25
+ MODES = {
26
+ # opacity
27
+ (0x00007FFE,): ("A", "L"),
28
+ # monochrome
29
+ (0x00010000,): ("L", "L"),
30
+ (0x00018000, 0x00017FFE): ("RGBA", "LA"),
31
+ # photo YCC
32
+ (0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
33
+ (0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"),
34
+ # standard RGB (NIFRGB)
35
+ (0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"),
36
+ (0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"),
37
+ }
38
+
39
+
40
+ #
41
+ # --------------------------------------------------------------------
42
+
43
+
44
+ def _accept(prefix: bytes) -> bool:
45
+ return prefix[:8] == olefile.MAGIC
46
+
47
+
48
+ ##
49
+ # Image plugin for the FlashPix images.
50
+
51
+
52
+ class FpxImageFile(ImageFile.ImageFile):
53
+ format = "FPX"
54
+ format_description = "FlashPix"
55
+
56
+ def _open(self):
57
+ #
58
+ # read the OLE directory and see if this is a likely
59
+ # to be a FlashPix file
60
+
61
+ try:
62
+ self.ole = olefile.OleFileIO(self.fp)
63
+ except OSError as e:
64
+ msg = "not an FPX file; invalid OLE file"
65
+ raise SyntaxError(msg) from e
66
+
67
+ if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
68
+ msg = "not an FPX file; bad root CLSID"
69
+ raise SyntaxError(msg)
70
+
71
+ self._open_index(1)
72
+
73
+ def _open_index(self, index: int = 1) -> None:
74
+ #
75
+ # get the Image Contents Property Set
76
+
77
+ prop = self.ole.getproperties(
78
+ [f"Data Object Store {index:06d}", "\005Image Contents"]
79
+ )
80
+
81
+ # size (highest resolution)
82
+
83
+ self._size = prop[0x1000002], prop[0x1000003]
84
+
85
+ size = max(self.size)
86
+ i = 1
87
+ while size > 64:
88
+ size = size // 2
89
+ i += 1
90
+ self.maxid = i - 1
91
+
92
+ # mode. instead of using a single field for this, flashpix
93
+ # requires you to specify the mode for each channel in each
94
+ # resolution subimage, and leaves it to the decoder to make
95
+ # sure that they all match. for now, we'll cheat and assume
96
+ # that this is always the case.
97
+
98
+ id = self.maxid << 16
99
+
100
+ s = prop[0x2000002 | id]
101
+
102
+ bands = i32(s, 4)
103
+ if bands > 4:
104
+ msg = "Invalid number of bands"
105
+ raise OSError(msg)
106
+
107
+ # note: for now, we ignore the "uncalibrated" flag
108
+ colors = tuple(i32(s, 8 + i * 4) & 0x7FFFFFFF for i in range(bands))
109
+
110
+ self._mode, self.rawmode = MODES[colors]
111
+
112
+ # load JPEG tables, if any
113
+ self.jpeg = {}
114
+ for i in range(256):
115
+ id = 0x3000001 | (i << 16)
116
+ if id in prop:
117
+ self.jpeg[i] = prop[id]
118
+
119
+ self._open_subimage(1, self.maxid)
120
+
121
+ def _open_subimage(self, index: int = 1, subimage: int = 0) -> None:
122
+ #
123
+ # setup tile descriptors for a given subimage
124
+
125
+ stream = [
126
+ f"Data Object Store {index:06d}",
127
+ f"Resolution {subimage:04d}",
128
+ "Subimage 0000 Header",
129
+ ]
130
+
131
+ fp = self.ole.openstream(stream)
132
+
133
+ # skip prefix
134
+ fp.read(28)
135
+
136
+ # header stream
137
+ s = fp.read(36)
138
+
139
+ size = i32(s, 4), i32(s, 8)
140
+ # tilecount = i32(s, 12)
141
+ tilesize = i32(s, 16), i32(s, 20)
142
+ # channels = i32(s, 24)
143
+ offset = i32(s, 28)
144
+ length = i32(s, 32)
145
+
146
+ if size != self.size:
147
+ msg = "subimage mismatch"
148
+ raise OSError(msg)
149
+
150
+ # get tile descriptors
151
+ fp.seek(28 + offset)
152
+ s = fp.read(i32(s, 12) * length)
153
+
154
+ x = y = 0
155
+ xsize, ysize = size
156
+ xtile, ytile = tilesize
157
+ self.tile = []
158
+
159
+ for i in range(0, len(s), length):
160
+ x1 = min(xsize, x + xtile)
161
+ y1 = min(ysize, y + ytile)
162
+
163
+ compression = i32(s, i + 8)
164
+
165
+ if compression == 0:
166
+ self.tile.append(
167
+ (
168
+ "raw",
169
+ (x, y, x1, y1),
170
+ i32(s, i) + 28,
171
+ (self.rawmode,),
172
+ )
173
+ )
174
+
175
+ elif compression == 1:
176
+ # FIXME: the fill decoder is not implemented
177
+ self.tile.append(
178
+ (
179
+ "fill",
180
+ (x, y, x1, y1),
181
+ i32(s, i) + 28,
182
+ (self.rawmode, s[12:16]),
183
+ )
184
+ )
185
+
186
+ elif compression == 2:
187
+ internal_color_conversion = s[14]
188
+ jpeg_tables = s[15]
189
+ rawmode = self.rawmode
190
+
191
+ if internal_color_conversion:
192
+ # The image is stored as usual (usually YCbCr).
193
+ if rawmode == "RGBA":
194
+ # For "RGBA", data is stored as YCbCrA based on
195
+ # negative RGB. The following trick works around
196
+ # this problem :
197
+ jpegmode, rawmode = "YCbCrK", "CMYK"
198
+ else:
199
+ jpegmode = None # let the decoder decide
200
+
201
+ else:
202
+ # The image is stored as defined by rawmode
203
+ jpegmode = rawmode
204
+
205
+ self.tile.append(
206
+ (
207
+ "jpeg",
208
+ (x, y, x1, y1),
209
+ i32(s, i) + 28,
210
+ (rawmode, jpegmode),
211
+ )
212
+ )
213
+
214
+ # FIXME: jpeg tables are tile dependent; the prefix
215
+ # data must be placed in the tile descriptor itself!
216
+
217
+ if jpeg_tables:
218
+ self.tile_prefix = self.jpeg[jpeg_tables]
219
+
220
+ else:
221
+ msg = "unknown/invalid compression"
222
+ raise OSError(msg)
223
+
224
+ x = x + xtile
225
+ if x >= xsize:
226
+ x, y = 0, y + ytile
227
+ if y >= ysize:
228
+ break # isn't really required
229
+
230
+ self.stream = stream
231
+ self._fp = self.fp
232
+ self.fp = None
233
+
234
+ def load(self):
235
+ if not self.fp:
236
+ self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"])
237
+
238
+ return ImageFile.ImageFile.load(self)
239
+
240
+ def close(self) -> None:
241
+ self.ole.close()
242
+ super().close()
243
+
244
+ def __exit__(self, *args: object) -> None:
245
+ self.ole.close()
246
+ super().__exit__()
247
+
248
+
249
+ #
250
+ # --------------------------------------------------------------------
251
+
252
+
253
+ Image.register_open(FpxImageFile.format, FpxImageFile, _accept)
254
+
255
+ Image.register_extension(FpxImageFile.format, ".fpx")
myenv/Lib/site-packages/PIL/FtexImagePlugin.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A Pillow loader for .ftc and .ftu files (FTEX)
3
+ Jerome Leclanche <jerome@leclan.ch>
4
+
5
+ The contents of this file are hereby released in the public domain (CC0)
6
+ Full text of the CC0 license:
7
+ https://creativecommons.org/publicdomain/zero/1.0/
8
+
9
+ Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001
10
+
11
+ The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a
12
+ packed custom format called FTEX. This file format uses file extensions FTC
13
+ and FTU.
14
+ * FTC files are compressed textures (using standard texture compression).
15
+ * FTU files are not compressed.
16
+ Texture File Format
17
+ The FTC and FTU texture files both use the same format. This
18
+ has the following structure:
19
+ {header}
20
+ {format_directory}
21
+ {data}
22
+ Where:
23
+ {header} = {
24
+ u32:magic,
25
+ u32:version,
26
+ u32:width,
27
+ u32:height,
28
+ u32:mipmap_count,
29
+ u32:format_count
30
+ }
31
+
32
+ * The "magic" number is "FTEX".
33
+ * "width" and "height" are the dimensions of the texture.
34
+ * "mipmap_count" is the number of mipmaps in the texture.
35
+ * "format_count" is the number of texture formats (different versions of the
36
+ same texture) in this file.
37
+
38
+ {format_directory} = format_count * { u32:format, u32:where }
39
+
40
+ The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB
41
+ uncompressed textures.
42
+ The texture data for a format starts at the position "where" in the file.
43
+
44
+ Each set of texture data in the file has the following structure:
45
+ {data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } }
46
+ * "mipmap_size" is the number of bytes in that mip level. For compressed
47
+ textures this is the size of the texture data compressed with DXT1. For 24 bit
48
+ uncompressed textures, this is 3 * width * height. Following this are the image
49
+ bytes for that mipmap level.
50
+
51
+ Note: All data is stored in little-Endian (Intel) byte order.
52
+ """
53
+
54
+ from __future__ import annotations
55
+
56
+ import struct
57
+ from enum import IntEnum
58
+ from io import BytesIO
59
+
60
+ from . import Image, ImageFile
61
+
62
+ MAGIC = b"FTEX"
63
+
64
+
65
+ class Format(IntEnum):
66
+ DXT1 = 0
67
+ UNCOMPRESSED = 1
68
+
69
+
70
+ class FtexImageFile(ImageFile.ImageFile):
71
+ format = "FTEX"
72
+ format_description = "Texture File Format (IW2:EOC)"
73
+
74
+ def _open(self) -> None:
75
+ if not _accept(self.fp.read(4)):
76
+ msg = "not an FTEX file"
77
+ raise SyntaxError(msg)
78
+ struct.unpack("<i", self.fp.read(4)) # version
79
+ self._size = struct.unpack("<2i", self.fp.read(8))
80
+ mipmap_count, format_count = struct.unpack("<2i", self.fp.read(8))
81
+
82
+ self._mode = "RGB"
83
+
84
+ # Only support single-format files.
85
+ # I don't know of any multi-format file.
86
+ assert format_count == 1
87
+
88
+ format, where = struct.unpack("<2i", self.fp.read(8))
89
+ self.fp.seek(where)
90
+ (mipmap_size,) = struct.unpack("<i", self.fp.read(4))
91
+
92
+ data = self.fp.read(mipmap_size)
93
+
94
+ if format == Format.DXT1:
95
+ self._mode = "RGBA"
96
+ self.tile = [("bcn", (0, 0) + self.size, 0, 1)]
97
+ elif format == Format.UNCOMPRESSED:
98
+ self.tile = [("raw", (0, 0) + self.size, 0, ("RGB", 0, 1))]
99
+ else:
100
+ msg = f"Invalid texture compression format: {repr(format)}"
101
+ raise ValueError(msg)
102
+
103
+ self.fp.close()
104
+ self.fp = BytesIO(data)
105
+
106
+ def load_seek(self, pos: int) -> None:
107
+ pass
108
+
109
+
110
+ def _accept(prefix: bytes) -> bool:
111
+ return prefix[:4] == MAGIC
112
+
113
+
114
+ Image.register_open(FtexImageFile.format, FtexImageFile, _accept)
115
+ Image.register_extensions(FtexImageFile.format, [".ftc", ".ftu"])
myenv/Lib/site-packages/PIL/GbrImagePlugin.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ #
4
+ # load a GIMP brush file
5
+ #
6
+ # History:
7
+ # 96-03-14 fl Created
8
+ # 16-01-08 es Version 2
9
+ #
10
+ # Copyright (c) Secret Labs AB 1997.
11
+ # Copyright (c) Fredrik Lundh 1996.
12
+ # Copyright (c) Eric Soroos 2016.
13
+ #
14
+ # See the README file for information on usage and redistribution.
15
+ #
16
+ #
17
+ # See https://github.com/GNOME/gimp/blob/mainline/devel-docs/gbr.txt for
18
+ # format documentation.
19
+ #
20
+ # This code Interprets version 1 and 2 .gbr files.
21
+ # Version 1 files are obsolete, and should not be used for new
22
+ # brushes.
23
+ # Version 2 files are saved by GIMP v2.8 (at least)
24
+ # Version 3 files have a format specifier of 18 for 16bit floats in
25
+ # the color depth field. This is currently unsupported by Pillow.
26
+ from __future__ import annotations
27
+
28
+ from . import Image, ImageFile
29
+ from ._binary import i32be as i32
30
+
31
+
32
+ def _accept(prefix: bytes) -> bool:
33
+ return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2)
34
+
35
+
36
+ ##
37
+ # Image plugin for the GIMP brush format.
38
+
39
+
40
+ class GbrImageFile(ImageFile.ImageFile):
41
+ format = "GBR"
42
+ format_description = "GIMP brush file"
43
+
44
+ def _open(self) -> None:
45
+ header_size = i32(self.fp.read(4))
46
+ if header_size < 20:
47
+ msg = "not a GIMP brush"
48
+ raise SyntaxError(msg)
49
+ version = i32(self.fp.read(4))
50
+ if version not in (1, 2):
51
+ msg = f"Unsupported GIMP brush version: {version}"
52
+ raise SyntaxError(msg)
53
+
54
+ width = i32(self.fp.read(4))
55
+ height = i32(self.fp.read(4))
56
+ color_depth = i32(self.fp.read(4))
57
+ if width <= 0 or height <= 0:
58
+ msg = "not a GIMP brush"
59
+ raise SyntaxError(msg)
60
+ if color_depth not in (1, 4):
61
+ msg = f"Unsupported GIMP brush color depth: {color_depth}"
62
+ raise SyntaxError(msg)
63
+
64
+ if version == 1:
65
+ comment_length = header_size - 20
66
+ else:
67
+ comment_length = header_size - 28
68
+ magic_number = self.fp.read(4)
69
+ if magic_number != b"GIMP":
70
+ msg = "not a GIMP brush, bad magic number"
71
+ raise SyntaxError(msg)
72
+ self.info["spacing"] = i32(self.fp.read(4))
73
+
74
+ comment = self.fp.read(comment_length)[:-1]
75
+
76
+ if color_depth == 1:
77
+ self._mode = "L"
78
+ else:
79
+ self._mode = "RGBA"
80
+
81
+ self._size = width, height
82
+
83
+ self.info["comment"] = comment
84
+
85
+ # Image might not be small
86
+ Image._decompression_bomb_check(self.size)
87
+
88
+ # Data is an uncompressed block of w * h * bytes/pixel
89
+ self._data_size = width * height * color_depth
90
+
91
+ def load(self):
92
+ if not self.im:
93
+ self.im = Image.core.new(self.mode, self.size)
94
+ self.frombytes(self.fp.read(self._data_size))
95
+ return Image.Image.load(self)
96
+
97
+
98
+ #
99
+ # registry
100
+
101
+
102
+ Image.register_open(GbrImageFile.format, GbrImageFile, _accept)
103
+ Image.register_extension(GbrImageFile.format, ".gbr")
myenv/Lib/site-packages/PIL/GdImageFile.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # GD file handling
6
+ #
7
+ # History:
8
+ # 1996-04-12 fl Created
9
+ #
10
+ # Copyright (c) 1997 by Secret Labs AB.
11
+ # Copyright (c) 1996 by Fredrik Lundh.
12
+ #
13
+ # See the README file for information on usage and redistribution.
14
+ #
15
+
16
+
17
+ """
18
+ .. note::
19
+ This format cannot be automatically recognized, so the
20
+ class is not registered for use with :py:func:`PIL.Image.open()`. To open a
21
+ gd file, use the :py:func:`PIL.GdImageFile.open()` function instead.
22
+
23
+ .. warning::
24
+ THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This
25
+ implementation is provided for convenience and demonstrational
26
+ purposes only.
27
+ """
28
+ from __future__ import annotations
29
+
30
+ from typing import IO
31
+
32
+ from . import ImageFile, ImagePalette, UnidentifiedImageError
33
+ from ._binary import i16be as i16
34
+ from ._binary import i32be as i32
35
+ from ._typing import StrOrBytesPath
36
+
37
+
38
+ class GdImageFile(ImageFile.ImageFile):
39
+ """
40
+ Image plugin for the GD uncompressed format. Note that this format
41
+ is not supported by the standard :py:func:`PIL.Image.open()` function. To use
42
+ this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and
43
+ use the :py:func:`PIL.GdImageFile.open()` function.
44
+ """
45
+
46
+ format = "GD"
47
+ format_description = "GD uncompressed images"
48
+
49
+ def _open(self) -> None:
50
+ # Header
51
+ assert self.fp is not None
52
+
53
+ s = self.fp.read(1037)
54
+
55
+ if i16(s) not in [65534, 65535]:
56
+ msg = "Not a valid GD 2.x .gd file"
57
+ raise SyntaxError(msg)
58
+
59
+ self._mode = "L" # FIXME: "P"
60
+ self._size = i16(s, 2), i16(s, 4)
61
+
62
+ true_color = s[6]
63
+ true_color_offset = 2 if true_color else 0
64
+
65
+ # transparency index
66
+ tindex = i32(s, 7 + true_color_offset)
67
+ if tindex < 256:
68
+ self.info["transparency"] = tindex
69
+
70
+ self.palette = ImagePalette.raw(
71
+ "XBGR", s[7 + true_color_offset + 4 : 7 + true_color_offset + 4 + 256 * 4]
72
+ )
73
+
74
+ self.tile = [
75
+ (
76
+ "raw",
77
+ (0, 0) + self.size,
78
+ 7 + true_color_offset + 4 + 256 * 4,
79
+ ("L", 0, 1),
80
+ )
81
+ ]
82
+
83
+
84
+ def open(fp: StrOrBytesPath | IO[bytes], mode: str = "r") -> GdImageFile:
85
+ """
86
+ Load texture from a GD image file.
87
+
88
+ :param fp: GD file name, or an opened file handle.
89
+ :param mode: Optional mode. In this version, if the mode argument
90
+ is given, it must be "r".
91
+ :returns: An image instance.
92
+ :raises OSError: If the image could not be read.
93
+ """
94
+ if mode != "r":
95
+ msg = "bad mode"
96
+ raise ValueError(msg)
97
+
98
+ try:
99
+ return GdImageFile(fp)
100
+ except SyntaxError as e:
101
+ msg = "cannot identify this image file"
102
+ raise UnidentifiedImageError(msg) from e
myenv/Lib/site-packages/PIL/GifImagePlugin.py ADDED
@@ -0,0 +1,1159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library.
3
+ # $Id$
4
+ #
5
+ # GIF file handling
6
+ #
7
+ # History:
8
+ # 1995-09-01 fl Created
9
+ # 1996-12-14 fl Added interlace support
10
+ # 1996-12-30 fl Added animation support
11
+ # 1997-01-05 fl Added write support, fixed local colour map bug
12
+ # 1997-02-23 fl Make sure to load raster data in getdata()
13
+ # 1997-07-05 fl Support external decoder (0.4)
14
+ # 1998-07-09 fl Handle all modes when saving (0.5)
15
+ # 1998-07-15 fl Renamed offset attribute to avoid name clash
16
+ # 2001-04-16 fl Added rewind support (seek to frame 0) (0.6)
17
+ # 2001-04-17 fl Added palette optimization (0.7)
18
+ # 2002-06-06 fl Added transparency support for save (0.8)
19
+ # 2004-02-24 fl Disable interlacing for small images
20
+ #
21
+ # Copyright (c) 1997-2004 by Secret Labs AB
22
+ # Copyright (c) 1995-2004 by Fredrik Lundh
23
+ #
24
+ # See the README file for information on usage and redistribution.
25
+ #
26
+ from __future__ import annotations
27
+
28
+ import itertools
29
+ import math
30
+ import os
31
+ import subprocess
32
+ import sys
33
+ from enum import IntEnum
34
+ from functools import cached_property
35
+ from typing import IO, TYPE_CHECKING, Any, List, Literal, NamedTuple, Union
36
+
37
+ from . import (
38
+ Image,
39
+ ImageChops,
40
+ ImageFile,
41
+ ImageMath,
42
+ ImageOps,
43
+ ImagePalette,
44
+ ImageSequence,
45
+ )
46
+ from ._binary import i16le as i16
47
+ from ._binary import o8
48
+ from ._binary import o16le as o16
49
+
50
+ if TYPE_CHECKING:
51
+ from . import _imaging
52
+
53
+
54
+ class LoadingStrategy(IntEnum):
55
+ """.. versionadded:: 9.1.0"""
56
+
57
+ RGB_AFTER_FIRST = 0
58
+ RGB_AFTER_DIFFERENT_PALETTE_ONLY = 1
59
+ RGB_ALWAYS = 2
60
+
61
+
62
+ #: .. versionadded:: 9.1.0
63
+ LOADING_STRATEGY = LoadingStrategy.RGB_AFTER_FIRST
64
+
65
+ # --------------------------------------------------------------------
66
+ # Identify/read GIF files
67
+
68
+
69
+ def _accept(prefix: bytes) -> bool:
70
+ return prefix[:6] in [b"GIF87a", b"GIF89a"]
71
+
72
+
73
+ ##
74
+ # Image plugin for GIF images. This plugin supports both GIF87 and
75
+ # GIF89 images.
76
+
77
+
78
+ class GifImageFile(ImageFile.ImageFile):
79
+ format = "GIF"
80
+ format_description = "Compuserve GIF"
81
+ _close_exclusive_fp_after_loading = False
82
+
83
+ global_palette = None
84
+
85
+ def data(self) -> bytes | None:
86
+ s = self.fp.read(1)
87
+ if s and s[0]:
88
+ return self.fp.read(s[0])
89
+ return None
90
+
91
+ def _is_palette_needed(self, p: bytes) -> bool:
92
+ for i in range(0, len(p), 3):
93
+ if not (i // 3 == p[i] == p[i + 1] == p[i + 2]):
94
+ return True
95
+ return False
96
+
97
+ def _open(self) -> None:
98
+ # Screen
99
+ s = self.fp.read(13)
100
+ if not _accept(s):
101
+ msg = "not a GIF file"
102
+ raise SyntaxError(msg)
103
+
104
+ self.info["version"] = s[:6]
105
+ self._size = i16(s, 6), i16(s, 8)
106
+ self.tile = []
107
+ flags = s[10]
108
+ bits = (flags & 7) + 1
109
+
110
+ if flags & 128:
111
+ # get global palette
112
+ self.info["background"] = s[11]
113
+ # check if palette contains colour indices
114
+ p = self.fp.read(3 << bits)
115
+ if self._is_palette_needed(p):
116
+ p = ImagePalette.raw("RGB", p)
117
+ self.global_palette = self.palette = p
118
+
119
+ self._fp = self.fp # FIXME: hack
120
+ self.__rewind = self.fp.tell()
121
+ self._n_frames: int | None = None
122
+ self._seek(0) # get ready to read first frame
123
+
124
+ @property
125
+ def n_frames(self) -> int:
126
+ if self._n_frames is None:
127
+ current = self.tell()
128
+ try:
129
+ while True:
130
+ self._seek(self.tell() + 1, False)
131
+ except EOFError:
132
+ self._n_frames = self.tell() + 1
133
+ self.seek(current)
134
+ return self._n_frames
135
+
136
+ @cached_property
137
+ def is_animated(self) -> bool:
138
+ if self._n_frames is not None:
139
+ return self._n_frames != 1
140
+
141
+ current = self.tell()
142
+ if current:
143
+ return True
144
+
145
+ try:
146
+ self._seek(1, False)
147
+ is_animated = True
148
+ except EOFError:
149
+ is_animated = False
150
+
151
+ self.seek(current)
152
+ return is_animated
153
+
154
+ def seek(self, frame: int) -> None:
155
+ if not self._seek_check(frame):
156
+ return
157
+ if frame < self.__frame:
158
+ self.im = None
159
+ self._seek(0)
160
+
161
+ last_frame = self.__frame
162
+ for f in range(self.__frame + 1, frame + 1):
163
+ try:
164
+ self._seek(f)
165
+ except EOFError as e:
166
+ self.seek(last_frame)
167
+ msg = "no more images in GIF file"
168
+ raise EOFError(msg) from e
169
+
170
+ def _seek(self, frame: int, update_image: bool = True) -> None:
171
+ if frame == 0:
172
+ # rewind
173
+ self.__offset = 0
174
+ self.dispose: _imaging.ImagingCore | None = None
175
+ self.__frame = -1
176
+ self._fp.seek(self.__rewind)
177
+ self.disposal_method = 0
178
+ if "comment" in self.info:
179
+ del self.info["comment"]
180
+ else:
181
+ # ensure that the previous frame was loaded
182
+ if self.tile and update_image:
183
+ self.load()
184
+
185
+ if frame != self.__frame + 1:
186
+ msg = f"cannot seek to frame {frame}"
187
+ raise ValueError(msg)
188
+
189
+ self.fp = self._fp
190
+ if self.__offset:
191
+ # backup to last frame
192
+ self.fp.seek(self.__offset)
193
+ while self.data():
194
+ pass
195
+ self.__offset = 0
196
+
197
+ s = self.fp.read(1)
198
+ if not s or s == b";":
199
+ msg = "no more images in GIF file"
200
+ raise EOFError(msg)
201
+
202
+ palette: ImagePalette.ImagePalette | Literal[False] | None = None
203
+
204
+ info: dict[str, Any] = {}
205
+ frame_transparency = None
206
+ interlace = None
207
+ frame_dispose_extent = None
208
+ while True:
209
+ if not s:
210
+ s = self.fp.read(1)
211
+ if not s or s == b";":
212
+ break
213
+
214
+ elif s == b"!":
215
+ #
216
+ # extensions
217
+ #
218
+ s = self.fp.read(1)
219
+ block = self.data()
220
+ if s[0] == 249 and block is not None:
221
+ #
222
+ # graphic control extension
223
+ #
224
+ flags = block[0]
225
+ if flags & 1:
226
+ frame_transparency = block[3]
227
+ info["duration"] = i16(block, 1) * 10
228
+
229
+ # disposal method - find the value of bits 4 - 6
230
+ dispose_bits = 0b00011100 & flags
231
+ dispose_bits = dispose_bits >> 2
232
+ if dispose_bits:
233
+ # only set the dispose if it is not
234
+ # unspecified. I'm not sure if this is
235
+ # correct, but it seems to prevent the last
236
+ # frame from looking odd for some animations
237
+ self.disposal_method = dispose_bits
238
+ elif s[0] == 254:
239
+ #
240
+ # comment extension
241
+ #
242
+ comment = b""
243
+
244
+ # Read this comment block
245
+ while block:
246
+ comment += block
247
+ block = self.data()
248
+
249
+ if "comment" in info:
250
+ # If multiple comment blocks in frame, separate with \n
251
+ info["comment"] += b"\n" + comment
252
+ else:
253
+ info["comment"] = comment
254
+ s = None
255
+ continue
256
+ elif s[0] == 255 and frame == 0 and block is not None:
257
+ #
258
+ # application extension
259
+ #
260
+ info["extension"] = block, self.fp.tell()
261
+ if block[:11] == b"NETSCAPE2.0":
262
+ block = self.data()
263
+ if block and len(block) >= 3 and block[0] == 1:
264
+ self.info["loop"] = i16(block, 1)
265
+ while self.data():
266
+ pass
267
+
268
+ elif s == b",":
269
+ #
270
+ # local image
271
+ #
272
+ s = self.fp.read(9)
273
+
274
+ # extent
275
+ x0, y0 = i16(s, 0), i16(s, 2)
276
+ x1, y1 = x0 + i16(s, 4), y0 + i16(s, 6)
277
+ if (x1 > self.size[0] or y1 > self.size[1]) and update_image:
278
+ self._size = max(x1, self.size[0]), max(y1, self.size[1])
279
+ Image._decompression_bomb_check(self._size)
280
+ frame_dispose_extent = x0, y0, x1, y1
281
+ flags = s[8]
282
+
283
+ interlace = (flags & 64) != 0
284
+
285
+ if flags & 128:
286
+ bits = (flags & 7) + 1
287
+ p = self.fp.read(3 << bits)
288
+ if self._is_palette_needed(p):
289
+ palette = ImagePalette.raw("RGB", p)
290
+ else:
291
+ palette = False
292
+
293
+ # image data
294
+ bits = self.fp.read(1)[0]
295
+ self.__offset = self.fp.tell()
296
+ break
297
+ s = None
298
+
299
+ if interlace is None:
300
+ msg = "image not found in GIF frame"
301
+ raise EOFError(msg)
302
+
303
+ self.__frame = frame
304
+ if not update_image:
305
+ return
306
+
307
+ self.tile = []
308
+
309
+ if self.dispose:
310
+ self.im.paste(self.dispose, self.dispose_extent)
311
+
312
+ self._frame_palette = palette if palette is not None else self.global_palette
313
+ self._frame_transparency = frame_transparency
314
+ if frame == 0:
315
+ if self._frame_palette:
316
+ if LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS:
317
+ self._mode = "RGBA" if frame_transparency is not None else "RGB"
318
+ else:
319
+ self._mode = "P"
320
+ else:
321
+ self._mode = "L"
322
+
323
+ if not palette and self.global_palette:
324
+ from copy import copy
325
+
326
+ palette = copy(self.global_palette)
327
+ self.palette = palette
328
+ else:
329
+ if self.mode == "P":
330
+ if (
331
+ LOADING_STRATEGY != LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY
332
+ or palette
333
+ ):
334
+ self.pyaccess = None
335
+ if "transparency" in self.info:
336
+ self.im.putpalettealpha(self.info["transparency"], 0)
337
+ self.im = self.im.convert("RGBA", Image.Dither.FLOYDSTEINBERG)
338
+ self._mode = "RGBA"
339
+ del self.info["transparency"]
340
+ else:
341
+ self._mode = "RGB"
342
+ self.im = self.im.convert("RGB", Image.Dither.FLOYDSTEINBERG)
343
+
344
+ def _rgb(color: int) -> tuple[int, int, int]:
345
+ if self._frame_palette:
346
+ if color * 3 + 3 > len(self._frame_palette.palette):
347
+ color = 0
348
+ return tuple(self._frame_palette.palette[color * 3 : color * 3 + 3])
349
+ else:
350
+ return (color, color, color)
351
+
352
+ self.dispose = None
353
+ self.dispose_extent = frame_dispose_extent
354
+ if self.dispose_extent and self.disposal_method >= 2:
355
+ try:
356
+ if self.disposal_method == 2:
357
+ # replace with background colour
358
+
359
+ # only dispose the extent in this frame
360
+ x0, y0, x1, y1 = self.dispose_extent
361
+ dispose_size = (x1 - x0, y1 - y0)
362
+
363
+ Image._decompression_bomb_check(dispose_size)
364
+
365
+ # by convention, attempt to use transparency first
366
+ dispose_mode = "P"
367
+ color = self.info.get("transparency", frame_transparency)
368
+ if color is not None:
369
+ if self.mode in ("RGB", "RGBA"):
370
+ dispose_mode = "RGBA"
371
+ color = _rgb(color) + (0,)
372
+ else:
373
+ color = self.info.get("background", 0)
374
+ if self.mode in ("RGB", "RGBA"):
375
+ dispose_mode = "RGB"
376
+ color = _rgb(color)
377
+ self.dispose = Image.core.fill(dispose_mode, dispose_size, color)
378
+ else:
379
+ # replace with previous contents
380
+ if self.im is not None:
381
+ # only dispose the extent in this frame
382
+ self.dispose = self._crop(self.im, self.dispose_extent)
383
+ elif frame_transparency is not None:
384
+ x0, y0, x1, y1 = self.dispose_extent
385
+ dispose_size = (x1 - x0, y1 - y0)
386
+
387
+ Image._decompression_bomb_check(dispose_size)
388
+ dispose_mode = "P"
389
+ color = frame_transparency
390
+ if self.mode in ("RGB", "RGBA"):
391
+ dispose_mode = "RGBA"
392
+ color = _rgb(frame_transparency) + (0,)
393
+ self.dispose = Image.core.fill(
394
+ dispose_mode, dispose_size, color
395
+ )
396
+ except AttributeError:
397
+ pass
398
+
399
+ if interlace is not None:
400
+ transparency = -1
401
+ if frame_transparency is not None:
402
+ if frame == 0:
403
+ if LOADING_STRATEGY != LoadingStrategy.RGB_ALWAYS:
404
+ self.info["transparency"] = frame_transparency
405
+ elif self.mode not in ("RGB", "RGBA"):
406
+ transparency = frame_transparency
407
+ self.tile = [
408
+ (
409
+ "gif",
410
+ (x0, y0, x1, y1),
411
+ self.__offset,
412
+ (bits, interlace, transparency),
413
+ )
414
+ ]
415
+
416
+ if info.get("comment"):
417
+ self.info["comment"] = info["comment"]
418
+ for k in ["duration", "extension"]:
419
+ if k in info:
420
+ self.info[k] = info[k]
421
+ elif k in self.info:
422
+ del self.info[k]
423
+
424
+ def load_prepare(self) -> None:
425
+ temp_mode = "P" if self._frame_palette else "L"
426
+ self._prev_im = None
427
+ if self.__frame == 0:
428
+ if self._frame_transparency is not None:
429
+ self.im = Image.core.fill(
430
+ temp_mode, self.size, self._frame_transparency
431
+ )
432
+ elif self.mode in ("RGB", "RGBA"):
433
+ self._prev_im = self.im
434
+ if self._frame_palette:
435
+ self.im = Image.core.fill("P", self.size, self._frame_transparency or 0)
436
+ self.im.putpalette("RGB", *self._frame_palette.getdata())
437
+ else:
438
+ self.im = None
439
+ self._mode = temp_mode
440
+ self._frame_palette = None
441
+
442
+ super().load_prepare()
443
+
444
+ def load_end(self) -> None:
445
+ if self.__frame == 0:
446
+ if self.mode == "P" and LOADING_STRATEGY == LoadingStrategy.RGB_ALWAYS:
447
+ if self._frame_transparency is not None:
448
+ self.im.putpalettealpha(self._frame_transparency, 0)
449
+ self._mode = "RGBA"
450
+ else:
451
+ self._mode = "RGB"
452
+ self.im = self.im.convert(self.mode, Image.Dither.FLOYDSTEINBERG)
453
+ return
454
+ if not self._prev_im:
455
+ return
456
+ if self._frame_transparency is not None:
457
+ self.im.putpalettealpha(self._frame_transparency, 0)
458
+ frame_im = self.im.convert("RGBA")
459
+ else:
460
+ frame_im = self.im.convert("RGB")
461
+
462
+ assert self.dispose_extent is not None
463
+ frame_im = self._crop(frame_im, self.dispose_extent)
464
+
465
+ self.im = self._prev_im
466
+ self._mode = self.im.mode
467
+ if frame_im.mode == "RGBA":
468
+ self.im.paste(frame_im, self.dispose_extent, frame_im)
469
+ else:
470
+ self.im.paste(frame_im, self.dispose_extent)
471
+
472
+ def tell(self) -> int:
473
+ return self.__frame
474
+
475
+
476
+ # --------------------------------------------------------------------
477
+ # Write GIF files
478
+
479
+
480
+ RAWMODE = {"1": "L", "L": "L", "P": "P"}
481
+
482
+
483
+ def _normalize_mode(im: Image.Image) -> Image.Image:
484
+ """
485
+ Takes an image (or frame), returns an image in a mode that is appropriate
486
+ for saving in a Gif.
487
+
488
+ It may return the original image, or it may return an image converted to
489
+ palette or 'L' mode.
490
+
491
+ :param im: Image object
492
+ :returns: Image object
493
+ """
494
+ if im.mode in RAWMODE:
495
+ im.load()
496
+ return im
497
+ if Image.getmodebase(im.mode) == "RGB":
498
+ im = im.convert("P", palette=Image.Palette.ADAPTIVE)
499
+ if im.palette.mode == "RGBA":
500
+ for rgba in im.palette.colors:
501
+ if rgba[3] == 0:
502
+ im.info["transparency"] = im.palette.colors[rgba]
503
+ break
504
+ return im
505
+ return im.convert("L")
506
+
507
+
508
+ _Palette = Union[bytes, bytearray, List[int], ImagePalette.ImagePalette]
509
+
510
+
511
+ def _normalize_palette(
512
+ im: Image.Image, palette: _Palette | None, info: dict[str, Any]
513
+ ) -> Image.Image:
514
+ """
515
+ Normalizes the palette for image.
516
+ - Sets the palette to the incoming palette, if provided.
517
+ - Ensures that there's a palette for L mode images
518
+ - Optimizes the palette if necessary/desired.
519
+
520
+ :param im: Image object
521
+ :param palette: bytes object containing the source palette, or ....
522
+ :param info: encoderinfo
523
+ :returns: Image object
524
+ """
525
+ source_palette = None
526
+ if palette:
527
+ # a bytes palette
528
+ if isinstance(palette, (bytes, bytearray, list)):
529
+ source_palette = bytearray(palette[:768])
530
+ if isinstance(palette, ImagePalette.ImagePalette):
531
+ source_palette = bytearray(palette.palette)
532
+
533
+ if im.mode == "P":
534
+ if not source_palette:
535
+ source_palette = im.im.getpalette("RGB")[:768]
536
+ else: # L-mode
537
+ if not source_palette:
538
+ source_palette = bytearray(i // 3 for i in range(768))
539
+ im.palette = ImagePalette.ImagePalette("RGB", palette=source_palette)
540
+
541
+ used_palette_colors: list[int] | None
542
+ if palette:
543
+ used_palette_colors = []
544
+ assert source_palette is not None
545
+ for i in range(0, len(source_palette), 3):
546
+ source_color = tuple(source_palette[i : i + 3])
547
+ index = im.palette.colors.get(source_color)
548
+ if index in used_palette_colors:
549
+ index = None
550
+ used_palette_colors.append(index)
551
+ for i, index in enumerate(used_palette_colors):
552
+ if index is None:
553
+ for j in range(len(used_palette_colors)):
554
+ if j not in used_palette_colors:
555
+ used_palette_colors[i] = j
556
+ break
557
+ im = im.remap_palette(used_palette_colors)
558
+ else:
559
+ used_palette_colors = _get_optimize(im, info)
560
+ if used_palette_colors is not None:
561
+ im = im.remap_palette(used_palette_colors, source_palette)
562
+ if "transparency" in info:
563
+ try:
564
+ info["transparency"] = used_palette_colors.index(
565
+ info["transparency"]
566
+ )
567
+ except ValueError:
568
+ del info["transparency"]
569
+ return im
570
+
571
+ im.palette.palette = source_palette
572
+ return im
573
+
574
+
575
+ def _write_single_frame(
576
+ im: Image.Image,
577
+ fp: IO[bytes],
578
+ palette: _Palette | None,
579
+ ) -> None:
580
+ im_out = _normalize_mode(im)
581
+ for k, v in im_out.info.items():
582
+ im.encoderinfo.setdefault(k, v)
583
+ im_out = _normalize_palette(im_out, palette, im.encoderinfo)
584
+
585
+ for s in _get_global_header(im_out, im.encoderinfo):
586
+ fp.write(s)
587
+
588
+ # local image header
589
+ flags = 0
590
+ if get_interlace(im):
591
+ flags = flags | 64
592
+ _write_local_header(fp, im, (0, 0), flags)
593
+
594
+ im_out.encoderconfig = (8, get_interlace(im))
595
+ ImageFile._save(im_out, fp, [("gif", (0, 0) + im.size, 0, RAWMODE[im_out.mode])])
596
+
597
+ fp.write(b"\0") # end of image data
598
+
599
+
600
+ def _getbbox(
601
+ base_im: Image.Image, im_frame: Image.Image
602
+ ) -> tuple[Image.Image, tuple[int, int, int, int] | None]:
603
+ if _get_palette_bytes(im_frame) != _get_palette_bytes(base_im):
604
+ im_frame = im_frame.convert("RGBA")
605
+ base_im = base_im.convert("RGBA")
606
+ delta = ImageChops.subtract_modulo(im_frame, base_im)
607
+ return delta, delta.getbbox(alpha_only=False)
608
+
609
+
610
+ class _Frame(NamedTuple):
611
+ im: Image.Image
612
+ bbox: tuple[int, int, int, int] | None
613
+ encoderinfo: dict[str, Any]
614
+
615
+
616
+ def _write_multiple_frames(
617
+ im: Image.Image, fp: IO[bytes], palette: _Palette | None
618
+ ) -> bool:
619
+ duration = im.encoderinfo.get("duration")
620
+ disposal = im.encoderinfo.get("disposal", im.info.get("disposal"))
621
+
622
+ im_frames: list[_Frame] = []
623
+ previous_im: Image.Image | None = None
624
+ frame_count = 0
625
+ background_im = None
626
+ for imSequence in itertools.chain([im], im.encoderinfo.get("append_images", [])):
627
+ for im_frame in ImageSequence.Iterator(imSequence):
628
+ # a copy is required here since seek can still mutate the image
629
+ im_frame = _normalize_mode(im_frame.copy())
630
+ if frame_count == 0:
631
+ for k, v in im_frame.info.items():
632
+ if k == "transparency":
633
+ continue
634
+ im.encoderinfo.setdefault(k, v)
635
+
636
+ encoderinfo = im.encoderinfo.copy()
637
+ if "transparency" in im_frame.info:
638
+ encoderinfo.setdefault("transparency", im_frame.info["transparency"])
639
+ im_frame = _normalize_palette(im_frame, palette, encoderinfo)
640
+ if isinstance(duration, (list, tuple)):
641
+ encoderinfo["duration"] = duration[frame_count]
642
+ elif duration is None and "duration" in im_frame.info:
643
+ encoderinfo["duration"] = im_frame.info["duration"]
644
+ if isinstance(disposal, (list, tuple)):
645
+ encoderinfo["disposal"] = disposal[frame_count]
646
+ frame_count += 1
647
+
648
+ diff_frame = None
649
+ if im_frames and previous_im:
650
+ # delta frame
651
+ delta, bbox = _getbbox(previous_im, im_frame)
652
+ if not bbox:
653
+ # This frame is identical to the previous frame
654
+ if encoderinfo.get("duration"):
655
+ im_frames[-1].encoderinfo["duration"] += encoderinfo["duration"]
656
+ continue
657
+ if im_frames[-1].encoderinfo.get("disposal") == 2:
658
+ if background_im is None:
659
+ color = im.encoderinfo.get(
660
+ "transparency", im.info.get("transparency", (0, 0, 0))
661
+ )
662
+ background = _get_background(im_frame, color)
663
+ background_im = Image.new("P", im_frame.size, background)
664
+ background_im.putpalette(im_frames[0].im.palette)
665
+ bbox = _getbbox(background_im, im_frame)[1]
666
+ elif encoderinfo.get("optimize") and im_frame.mode != "1":
667
+ if "transparency" not in encoderinfo:
668
+ try:
669
+ encoderinfo["transparency"] = (
670
+ im_frame.palette._new_color_index(im_frame)
671
+ )
672
+ except ValueError:
673
+ pass
674
+ if "transparency" in encoderinfo:
675
+ # When the delta is zero, fill the image with transparency
676
+ diff_frame = im_frame.copy()
677
+ fill = Image.new("P", delta.size, encoderinfo["transparency"])
678
+ if delta.mode == "RGBA":
679
+ r, g, b, a = delta.split()
680
+ mask = ImageMath.lambda_eval(
681
+ lambda args: args["convert"](
682
+ args["max"](
683
+ args["max"](
684
+ args["max"](args["r"], args["g"]), args["b"]
685
+ ),
686
+ args["a"],
687
+ )
688
+ * 255,
689
+ "1",
690
+ ),
691
+ r=r,
692
+ g=g,
693
+ b=b,
694
+ a=a,
695
+ )
696
+ else:
697
+ if delta.mode == "P":
698
+ # Convert to L without considering palette
699
+ delta_l = Image.new("L", delta.size)
700
+ delta_l.putdata(delta.getdata())
701
+ delta = delta_l
702
+ mask = ImageMath.lambda_eval(
703
+ lambda args: args["convert"](args["im"] * 255, "1"),
704
+ im=delta,
705
+ )
706
+ diff_frame.paste(fill, mask=ImageOps.invert(mask))
707
+ else:
708
+ bbox = None
709
+ previous_im = im_frame
710
+ im_frames.append(_Frame(diff_frame or im_frame, bbox, encoderinfo))
711
+
712
+ if len(im_frames) == 1:
713
+ if "duration" in im.encoderinfo:
714
+ # Since multiple frames will not be written, use the combined duration
715
+ im.encoderinfo["duration"] = im_frames[0].encoderinfo["duration"]
716
+ return False
717
+
718
+ for frame_data in im_frames:
719
+ im_frame = frame_data.im
720
+ if not frame_data.bbox:
721
+ # global header
722
+ for s in _get_global_header(im_frame, frame_data.encoderinfo):
723
+ fp.write(s)
724
+ offset = (0, 0)
725
+ else:
726
+ # compress difference
727
+ if not palette:
728
+ frame_data.encoderinfo["include_color_table"] = True
729
+
730
+ im_frame = im_frame.crop(frame_data.bbox)
731
+ offset = frame_data.bbox[:2]
732
+ _write_frame_data(fp, im_frame, offset, frame_data.encoderinfo)
733
+ return True
734
+
735
+
736
+ def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
737
+ _save(im, fp, filename, save_all=True)
738
+
739
+
740
+ def _save(
741
+ im: Image.Image, fp: IO[bytes], filename: str | bytes, save_all: bool = False
742
+ ) -> None:
743
+ # header
744
+ if "palette" in im.encoderinfo or "palette" in im.info:
745
+ palette = im.encoderinfo.get("palette", im.info.get("palette"))
746
+ else:
747
+ palette = None
748
+ im.encoderinfo.setdefault("optimize", True)
749
+
750
+ if not save_all or not _write_multiple_frames(im, fp, palette):
751
+ _write_single_frame(im, fp, palette)
752
+
753
+ fp.write(b";") # end of file
754
+
755
+ if hasattr(fp, "flush"):
756
+ fp.flush()
757
+
758
+
759
+ def get_interlace(im: Image.Image) -> int:
760
+ interlace = im.encoderinfo.get("interlace", 1)
761
+
762
+ # workaround for @PIL153
763
+ if min(im.size) < 16:
764
+ interlace = 0
765
+
766
+ return interlace
767
+
768
+
769
+ def _write_local_header(
770
+ fp: IO[bytes], im: Image.Image, offset: tuple[int, int], flags: int
771
+ ) -> None:
772
+ try:
773
+ transparency = im.encoderinfo["transparency"]
774
+ except KeyError:
775
+ transparency = None
776
+
777
+ if "duration" in im.encoderinfo:
778
+ duration = int(im.encoderinfo["duration"] / 10)
779
+ else:
780
+ duration = 0
781
+
782
+ disposal = int(im.encoderinfo.get("disposal", 0))
783
+
784
+ if transparency is not None or duration != 0 or disposal:
785
+ packed_flag = 1 if transparency is not None else 0
786
+ packed_flag |= disposal << 2
787
+
788
+ fp.write(
789
+ b"!"
790
+ + o8(249) # extension intro
791
+ + o8(4) # length
792
+ + o8(packed_flag) # packed fields
793
+ + o16(duration) # duration
794
+ + o8(transparency or 0) # transparency index
795
+ + o8(0)
796
+ )
797
+
798
+ include_color_table = im.encoderinfo.get("include_color_table")
799
+ if include_color_table:
800
+ palette_bytes = _get_palette_bytes(im)
801
+ color_table_size = _get_color_table_size(palette_bytes)
802
+ if color_table_size:
803
+ flags = flags | 128 # local color table flag
804
+ flags = flags | color_table_size
805
+
806
+ fp.write(
807
+ b","
808
+ + o16(offset[0]) # offset
809
+ + o16(offset[1])
810
+ + o16(im.size[0]) # size
811
+ + o16(im.size[1])
812
+ + o8(flags) # flags
813
+ )
814
+ if include_color_table and color_table_size:
815
+ fp.write(_get_header_palette(palette_bytes))
816
+ fp.write(o8(8)) # bits
817
+
818
+
819
+ def _save_netpbm(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
820
+ # Unused by default.
821
+ # To use, uncomment the register_save call at the end of the file.
822
+ #
823
+ # If you need real GIF compression and/or RGB quantization, you
824
+ # can use the external NETPBM/PBMPLUS utilities. See comments
825
+ # below for information on how to enable this.
826
+ tempfile = im._dump()
827
+
828
+ try:
829
+ with open(filename, "wb") as f:
830
+ if im.mode != "RGB":
831
+ subprocess.check_call(
832
+ ["ppmtogif", tempfile], stdout=f, stderr=subprocess.DEVNULL
833
+ )
834
+ else:
835
+ # Pipe ppmquant output into ppmtogif
836
+ # "ppmquant 256 %s | ppmtogif > %s" % (tempfile, filename)
837
+ quant_cmd = ["ppmquant", "256", tempfile]
838
+ togif_cmd = ["ppmtogif"]
839
+ quant_proc = subprocess.Popen(
840
+ quant_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL
841
+ )
842
+ togif_proc = subprocess.Popen(
843
+ togif_cmd,
844
+ stdin=quant_proc.stdout,
845
+ stdout=f,
846
+ stderr=subprocess.DEVNULL,
847
+ )
848
+
849
+ # Allow ppmquant to receive SIGPIPE if ppmtogif exits
850
+ assert quant_proc.stdout is not None
851
+ quant_proc.stdout.close()
852
+
853
+ retcode = quant_proc.wait()
854
+ if retcode:
855
+ raise subprocess.CalledProcessError(retcode, quant_cmd)
856
+
857
+ retcode = togif_proc.wait()
858
+ if retcode:
859
+ raise subprocess.CalledProcessError(retcode, togif_cmd)
860
+ finally:
861
+ try:
862
+ os.unlink(tempfile)
863
+ except OSError:
864
+ pass
865
+
866
+
867
+ # Force optimization so that we can test performance against
868
+ # cases where it took lots of memory and time previously.
869
+ _FORCE_OPTIMIZE = False
870
+
871
+
872
+ def _get_optimize(im: Image.Image, info: dict[str, Any]) -> list[int] | None:
873
+ """
874
+ Palette optimization is a potentially expensive operation.
875
+
876
+ This function determines if the palette should be optimized using
877
+ some heuristics, then returns the list of palette entries in use.
878
+
879
+ :param im: Image object
880
+ :param info: encoderinfo
881
+ :returns: list of indexes of palette entries in use, or None
882
+ """
883
+ if im.mode in ("P", "L") and info and info.get("optimize"):
884
+ # Potentially expensive operation.
885
+
886
+ # The palette saves 3 bytes per color not used, but palette
887
+ # lengths are restricted to 3*(2**N) bytes. Max saving would
888
+ # be 768 -> 6 bytes if we went all the way down to 2 colors.
889
+ # * If we're over 128 colors, we can't save any space.
890
+ # * If there aren't any holes, it's not worth collapsing.
891
+ # * If we have a 'large' image, the palette is in the noise.
892
+
893
+ # create the new palette if not every color is used
894
+ optimise = _FORCE_OPTIMIZE or im.mode == "L"
895
+ if optimise or im.width * im.height < 512 * 512:
896
+ # check which colors are used
897
+ used_palette_colors = []
898
+ for i, count in enumerate(im.histogram()):
899
+ if count:
900
+ used_palette_colors.append(i)
901
+
902
+ if optimise or max(used_palette_colors) >= len(used_palette_colors):
903
+ return used_palette_colors
904
+
905
+ num_palette_colors = len(im.palette.palette) // Image.getmodebands(
906
+ im.palette.mode
907
+ )
908
+ current_palette_size = 1 << (num_palette_colors - 1).bit_length()
909
+ if (
910
+ # check that the palette would become smaller when saved
911
+ len(used_palette_colors) <= current_palette_size // 2
912
+ # check that the palette is not already the smallest possible size
913
+ and current_palette_size > 2
914
+ ):
915
+ return used_palette_colors
916
+ return None
917
+
918
+
919
+ def _get_color_table_size(palette_bytes: bytes) -> int:
920
+ # calculate the palette size for the header
921
+ if not palette_bytes:
922
+ return 0
923
+ elif len(palette_bytes) < 9:
924
+ return 1
925
+ else:
926
+ return math.ceil(math.log(len(palette_bytes) // 3, 2)) - 1
927
+
928
+
929
+ def _get_header_palette(palette_bytes: bytes) -> bytes:
930
+ """
931
+ Returns the palette, null padded to the next power of 2 (*3) bytes
932
+ suitable for direct inclusion in the GIF header
933
+
934
+ :param palette_bytes: Unpadded palette bytes, in RGBRGB form
935
+ :returns: Null padded palette
936
+ """
937
+ color_table_size = _get_color_table_size(palette_bytes)
938
+
939
+ # add the missing amount of bytes
940
+ # the palette has to be 2<<n in size
941
+ actual_target_size_diff = (2 << color_table_size) - len(palette_bytes) // 3
942
+ if actual_target_size_diff > 0:
943
+ palette_bytes += o8(0) * 3 * actual_target_size_diff
944
+ return palette_bytes
945
+
946
+
947
+ def _get_palette_bytes(im: Image.Image) -> bytes:
948
+ """
949
+ Gets the palette for inclusion in the gif header
950
+
951
+ :param im: Image object
952
+ :returns: Bytes, len<=768 suitable for inclusion in gif header
953
+ """
954
+ return im.palette.palette if im.palette else b""
955
+
956
+
957
+ def _get_background(
958
+ im: Image.Image,
959
+ info_background: int | tuple[int, int, int] | tuple[int, int, int, int] | None,
960
+ ) -> int:
961
+ background = 0
962
+ if info_background:
963
+ if isinstance(info_background, tuple):
964
+ # WebPImagePlugin stores an RGBA value in info["background"]
965
+ # So it must be converted to the same format as GifImagePlugin's
966
+ # info["background"] - a global color table index
967
+ try:
968
+ background = im.palette.getcolor(info_background, im)
969
+ except ValueError as e:
970
+ if str(e) not in (
971
+ # If all 256 colors are in use,
972
+ # then there is no need for the background color
973
+ "cannot allocate more than 256 colors",
974
+ # Ignore non-opaque WebP background
975
+ "cannot add non-opaque RGBA color to RGB palette",
976
+ ):
977
+ raise
978
+ else:
979
+ background = info_background
980
+ return background
981
+
982
+
983
+ def _get_global_header(im: Image.Image, info: dict[str, Any]) -> list[bytes]:
984
+ """Return a list of strings representing a GIF header"""
985
+
986
+ # Header Block
987
+ # https://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp
988
+
989
+ version = b"87a"
990
+ if im.info.get("version") == b"89a" or (
991
+ info
992
+ and (
993
+ "transparency" in info
994
+ or info.get("loop") is not None
995
+ or info.get("duration")
996
+ or info.get("comment")
997
+ )
998
+ ):
999
+ version = b"89a"
1000
+
1001
+ background = _get_background(im, info.get("background"))
1002
+
1003
+ palette_bytes = _get_palette_bytes(im)
1004
+ color_table_size = _get_color_table_size(palette_bytes)
1005
+
1006
+ header = [
1007
+ b"GIF" # signature
1008
+ + version # version
1009
+ + o16(im.size[0]) # canvas width
1010
+ + o16(im.size[1]), # canvas height
1011
+ # Logical Screen Descriptor
1012
+ # size of global color table + global color table flag
1013
+ o8(color_table_size + 128), # packed fields
1014
+ # background + reserved/aspect
1015
+ o8(background) + o8(0),
1016
+ # Global Color Table
1017
+ _get_header_palette(palette_bytes),
1018
+ ]
1019
+ if info.get("loop") is not None:
1020
+ header.append(
1021
+ b"!"
1022
+ + o8(255) # extension intro
1023
+ + o8(11)
1024
+ + b"NETSCAPE2.0"
1025
+ + o8(3)
1026
+ + o8(1)
1027
+ + o16(info["loop"]) # number of loops
1028
+ + o8(0)
1029
+ )
1030
+ if info.get("comment"):
1031
+ comment_block = b"!" + o8(254) # extension intro
1032
+
1033
+ comment = info["comment"]
1034
+ if isinstance(comment, str):
1035
+ comment = comment.encode()
1036
+ for i in range(0, len(comment), 255):
1037
+ subblock = comment[i : i + 255]
1038
+ comment_block += o8(len(subblock)) + subblock
1039
+
1040
+ comment_block += o8(0)
1041
+ header.append(comment_block)
1042
+ return header
1043
+
1044
+
1045
+ def _write_frame_data(
1046
+ fp: IO[bytes],
1047
+ im_frame: Image.Image,
1048
+ offset: tuple[int, int],
1049
+ params: dict[str, Any],
1050
+ ) -> None:
1051
+ try:
1052
+ im_frame.encoderinfo = params
1053
+
1054
+ # local image header
1055
+ _write_local_header(fp, im_frame, offset, 0)
1056
+
1057
+ ImageFile._save(
1058
+ im_frame, fp, [("gif", (0, 0) + im_frame.size, 0, RAWMODE[im_frame.mode])]
1059
+ )
1060
+
1061
+ fp.write(b"\0") # end of image data
1062
+ finally:
1063
+ del im_frame.encoderinfo
1064
+
1065
+
1066
+ # --------------------------------------------------------------------
1067
+ # Legacy GIF utilities
1068
+
1069
+
1070
+ def getheader(
1071
+ im: Image.Image, palette: _Palette | None = None, info: dict[str, Any] | None = None
1072
+ ) -> tuple[list[bytes], list[int] | None]:
1073
+ """
1074
+ Legacy Method to get Gif data from image.
1075
+
1076
+ Warning:: May modify image data.
1077
+
1078
+ :param im: Image object
1079
+ :param palette: bytes object containing the source palette, or ....
1080
+ :param info: encoderinfo
1081
+ :returns: tuple of(list of header items, optimized palette)
1082
+
1083
+ """
1084
+ if info is None:
1085
+ info = {}
1086
+
1087
+ used_palette_colors = _get_optimize(im, info)
1088
+
1089
+ if "background" not in info and "background" in im.info:
1090
+ info["background"] = im.info["background"]
1091
+
1092
+ im_mod = _normalize_palette(im, palette, info)
1093
+ im.palette = im_mod.palette
1094
+ im.im = im_mod.im
1095
+ header = _get_global_header(im, info)
1096
+
1097
+ return header, used_palette_colors
1098
+
1099
+
1100
+ def getdata(
1101
+ im: Image.Image, offset: tuple[int, int] = (0, 0), **params: Any
1102
+ ) -> list[bytes]:
1103
+ """
1104
+ Legacy Method
1105
+
1106
+ Return a list of strings representing this image.
1107
+ The first string is a local image header, the rest contains
1108
+ encoded image data.
1109
+
1110
+ To specify duration, add the time in milliseconds,
1111
+ e.g. ``getdata(im_frame, duration=1000)``
1112
+
1113
+ :param im: Image object
1114
+ :param offset: Tuple of (x, y) pixels. Defaults to (0, 0)
1115
+ :param \\**params: e.g. duration or other encoder info parameters
1116
+ :returns: List of bytes containing GIF encoded frame data
1117
+
1118
+ """
1119
+ from io import BytesIO
1120
+
1121
+ class Collector(BytesIO):
1122
+ data = []
1123
+
1124
+ if sys.version_info >= (3, 12):
1125
+ from collections.abc import Buffer
1126
+
1127
+ def write(self, data: Buffer) -> int:
1128
+ self.data.append(data)
1129
+ return len(data)
1130
+
1131
+ else:
1132
+
1133
+ def write(self, data: Any) -> int:
1134
+ self.data.append(data)
1135
+ return len(data)
1136
+
1137
+ im.load() # make sure raster data is available
1138
+
1139
+ fp = Collector()
1140
+
1141
+ _write_frame_data(fp, im, offset, params)
1142
+
1143
+ return fp.data
1144
+
1145
+
1146
+ # --------------------------------------------------------------------
1147
+ # Registry
1148
+
1149
+ Image.register_open(GifImageFile.format, GifImageFile, _accept)
1150
+ Image.register_save(GifImageFile.format, _save)
1151
+ Image.register_save_all(GifImageFile.format, _save_all)
1152
+ Image.register_extension(GifImageFile.format, ".gif")
1153
+ Image.register_mime(GifImageFile.format, "image/gif")
1154
+
1155
+ #
1156
+ # Uncomment the following line if you wish to use NETPBM/PBMPLUS
1157
+ # instead of the built-in "uncompressed" GIF encoder
1158
+
1159
+ # Image.register_save(GifImageFile.format, _save_netpbm)
myenv/Lib/site-packages/PIL/GimpGradientFile.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # stuff to read (and render) GIMP gradient files
6
+ #
7
+ # History:
8
+ # 97-08-23 fl Created
9
+ #
10
+ # Copyright (c) Secret Labs AB 1997.
11
+ # Copyright (c) Fredrik Lundh 1997.
12
+ #
13
+ # See the README file for information on usage and redistribution.
14
+ #
15
+
16
+ """
17
+ Stuff to translate curve segments to palette values (derived from
18
+ the corresponding code in GIMP, written by Federico Mena Quintero.
19
+ See the GIMP distribution for more information.)
20
+ """
21
+ from __future__ import annotations
22
+
23
+ from math import log, pi, sin, sqrt
24
+ from typing import IO, Callable
25
+
26
+ from ._binary import o8
27
+
28
+ EPSILON = 1e-10
29
+ """""" # Enable auto-doc for data member
30
+
31
+
32
+ def linear(middle: float, pos: float) -> float:
33
+ if pos <= middle:
34
+ if middle < EPSILON:
35
+ return 0.0
36
+ else:
37
+ return 0.5 * pos / middle
38
+ else:
39
+ pos = pos - middle
40
+ middle = 1.0 - middle
41
+ if middle < EPSILON:
42
+ return 1.0
43
+ else:
44
+ return 0.5 + 0.5 * pos / middle
45
+
46
+
47
+ def curved(middle: float, pos: float) -> float:
48
+ return pos ** (log(0.5) / log(max(middle, EPSILON)))
49
+
50
+
51
+ def sine(middle: float, pos: float) -> float:
52
+ return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0
53
+
54
+
55
+ def sphere_increasing(middle: float, pos: float) -> float:
56
+ return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2)
57
+
58
+
59
+ def sphere_decreasing(middle: float, pos: float) -> float:
60
+ return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2)
61
+
62
+
63
+ SEGMENTS = [linear, curved, sine, sphere_increasing, sphere_decreasing]
64
+ """""" # Enable auto-doc for data member
65
+
66
+
67
+ class GradientFile:
68
+ gradient: (
69
+ list[
70
+ tuple[
71
+ float,
72
+ float,
73
+ float,
74
+ list[float],
75
+ list[float],
76
+ Callable[[float, float], float],
77
+ ]
78
+ ]
79
+ | None
80
+ ) = None
81
+
82
+ def getpalette(self, entries: int = 256) -> tuple[bytes, str]:
83
+ assert self.gradient is not None
84
+ palette = []
85
+
86
+ ix = 0
87
+ x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
88
+
89
+ for i in range(entries):
90
+ x = i / (entries - 1)
91
+
92
+ while x1 < x:
93
+ ix += 1
94
+ x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
95
+
96
+ w = x1 - x0
97
+
98
+ if w < EPSILON:
99
+ scale = segment(0.5, 0.5)
100
+ else:
101
+ scale = segment((xm - x0) / w, (x - x0) / w)
102
+
103
+ # expand to RGBA
104
+ r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5))
105
+ g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5))
106
+ b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5))
107
+ a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5))
108
+
109
+ # add to palette
110
+ palette.append(r + g + b + a)
111
+
112
+ return b"".join(palette), "RGBA"
113
+
114
+
115
+ class GimpGradientFile(GradientFile):
116
+ """File handler for GIMP's gradient format."""
117
+
118
+ def __init__(self, fp: IO[bytes]) -> None:
119
+ if fp.readline()[:13] != b"GIMP Gradient":
120
+ msg = "not a GIMP gradient file"
121
+ raise SyntaxError(msg)
122
+
123
+ line = fp.readline()
124
+
125
+ # GIMP 1.2 gradient files don't contain a name, but GIMP 1.3 files do
126
+ if line.startswith(b"Name: "):
127
+ line = fp.readline().strip()
128
+
129
+ count = int(line)
130
+
131
+ self.gradient = []
132
+
133
+ for i in range(count):
134
+ s = fp.readline().split()
135
+ w = [float(x) for x in s[:11]]
136
+
137
+ x0, x1 = w[0], w[2]
138
+ xm = w[1]
139
+ rgb0 = w[3:7]
140
+ rgb1 = w[7:11]
141
+
142
+ segment = SEGMENTS[int(s[11])]
143
+ cspace = int(s[12])
144
+
145
+ if cspace != 0:
146
+ msg = "cannot handle HSV colour space"
147
+ raise OSError(msg)
148
+
149
+ self.gradient.append((x0, x1, xm, rgb0, rgb1, segment))
myenv/Lib/site-packages/PIL/GimpPaletteFile.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # stuff to read GIMP palette files
6
+ #
7
+ # History:
8
+ # 1997-08-23 fl Created
9
+ # 2004-09-07 fl Support GIMP 2.0 palette files.
10
+ #
11
+ # Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
12
+ # Copyright (c) Fredrik Lundh 1997-2004.
13
+ #
14
+ # See the README file for information on usage and redistribution.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ import re
19
+ from typing import IO
20
+
21
+ from ._binary import o8
22
+
23
+
24
+ class GimpPaletteFile:
25
+ """File handler for GIMP's palette format."""
26
+
27
+ rawmode = "RGB"
28
+
29
+ def __init__(self, fp: IO[bytes]) -> None:
30
+ palette = [o8(i) * 3 for i in range(256)]
31
+
32
+ if fp.readline()[:12] != b"GIMP Palette":
33
+ msg = "not a GIMP palette file"
34
+ raise SyntaxError(msg)
35
+
36
+ for i in range(256):
37
+ s = fp.readline()
38
+ if not s:
39
+ break
40
+
41
+ # skip fields and comment lines
42
+ if re.match(rb"\w+:|#", s):
43
+ continue
44
+ if len(s) > 100:
45
+ msg = "bad palette file"
46
+ raise SyntaxError(msg)
47
+
48
+ v = tuple(map(int, s.split()[:3]))
49
+ if len(v) != 3:
50
+ msg = "bad palette entry"
51
+ raise ValueError(msg)
52
+
53
+ palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2])
54
+
55
+ self.palette = b"".join(palette)
56
+
57
+ def getpalette(self) -> tuple[bytes, str]:
58
+ return self.palette, self.rawmode
myenv/Lib/site-packages/PIL/GribStubImagePlugin.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # GRIB stub adapter
6
+ #
7
+ # Copyright (c) 1996-2003 by Fredrik Lundh
8
+ #
9
+ # See the README file for information on usage and redistribution.
10
+ #
11
+ from __future__ import annotations
12
+
13
+ from typing import IO
14
+
15
+ from . import Image, ImageFile
16
+
17
+ _handler = None
18
+
19
+
20
+ def register_handler(handler: ImageFile.StubHandler | None) -> None:
21
+ """
22
+ Install application-specific GRIB image handler.
23
+
24
+ :param handler: Handler object.
25
+ """
26
+ global _handler
27
+ _handler = handler
28
+
29
+
30
+ # --------------------------------------------------------------------
31
+ # Image adapter
32
+
33
+
34
+ def _accept(prefix: bytes) -> bool:
35
+ return prefix[:4] == b"GRIB" and prefix[7] == 1
36
+
37
+
38
+ class GribStubImageFile(ImageFile.StubImageFile):
39
+ format = "GRIB"
40
+ format_description = "GRIB"
41
+
42
+ def _open(self) -> None:
43
+ offset = self.fp.tell()
44
+
45
+ if not _accept(self.fp.read(8)):
46
+ msg = "Not a GRIB file"
47
+ raise SyntaxError(msg)
48
+
49
+ self.fp.seek(offset)
50
+
51
+ # make something up
52
+ self._mode = "F"
53
+ self._size = 1, 1
54
+
55
+ loader = self._load()
56
+ if loader:
57
+ loader.open(self)
58
+
59
+ def _load(self) -> ImageFile.StubHandler | None:
60
+ return _handler
61
+
62
+
63
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
64
+ if _handler is None or not hasattr(_handler, "save"):
65
+ msg = "GRIB save handler not installed"
66
+ raise OSError(msg)
67
+ _handler.save(im, fp, filename)
68
+
69
+
70
+ # --------------------------------------------------------------------
71
+ # Registry
72
+
73
+ Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept)
74
+ Image.register_save(GribStubImageFile.format, _save)
75
+
76
+ Image.register_extension(GribStubImageFile.format, ".grib")
myenv/Lib/site-packages/PIL/Hdf5StubImagePlugin.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # The Python Imaging Library
3
+ # $Id$
4
+ #
5
+ # HDF5 stub adapter
6
+ #
7
+ # Copyright (c) 2000-2003 by Fredrik Lundh
8
+ #
9
+ # See the README file for information on usage and redistribution.
10
+ #
11
+ from __future__ import annotations
12
+
13
+ from typing import IO
14
+
15
+ from . import Image, ImageFile
16
+
17
+ _handler = None
18
+
19
+
20
+ def register_handler(handler: ImageFile.StubHandler | None) -> None:
21
+ """
22
+ Install application-specific HDF5 image handler.
23
+
24
+ :param handler: Handler object.
25
+ """
26
+ global _handler
27
+ _handler = handler
28
+
29
+
30
+ # --------------------------------------------------------------------
31
+ # Image adapter
32
+
33
+
34
+ def _accept(prefix: bytes) -> bool:
35
+ return prefix[:8] == b"\x89HDF\r\n\x1a\n"
36
+
37
+
38
+ class HDF5StubImageFile(ImageFile.StubImageFile):
39
+ format = "HDF5"
40
+ format_description = "HDF5"
41
+
42
+ def _open(self) -> None:
43
+ offset = self.fp.tell()
44
+
45
+ if not _accept(self.fp.read(8)):
46
+ msg = "Not an HDF file"
47
+ raise SyntaxError(msg)
48
+
49
+ self.fp.seek(offset)
50
+
51
+ # make something up
52
+ self._mode = "F"
53
+ self._size = 1, 1
54
+
55
+ loader = self._load()
56
+ if loader:
57
+ loader.open(self)
58
+
59
+ def _load(self) -> ImageFile.StubHandler | None:
60
+ return _handler
61
+
62
+
63
+ def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
64
+ if _handler is None or not hasattr(_handler, "save"):
65
+ msg = "HDF5 save handler not installed"
66
+ raise OSError(msg)
67
+ _handler.save(im, fp, filename)
68
+
69
+
70
+ # --------------------------------------------------------------------
71
+ # Registry
72
+
73
+ Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept)
74
+ Image.register_save(HDF5StubImageFile.format, _save)
75
+
76
+ Image.register_extensions(HDF5StubImageFile.format, [".h5", ".hdf"])