Upload 61 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .codecov.yml +12 -0
- .dockerignore +14 -16
- .gitattributes +2 -34
- .gitignore +328 -15
- .gitmodules +9 -0
- .pre-commit-config.yaml +35 -0
- CHANGELOG.md +435 -0
- CITATION.cff +11 -0
- CODE_OF_CONDUCT.md +6 -129
- CONTRIBUTING.md +14 -78
- Dockerfile +17 -0
- LICENSE +21 -202
- LICENSE.txt +126 -0
- MANIFEST.in +4 -0
- MODEL_LICENSE +33 -0
- Makefile +3 -42
- NBSETUP.md +95 -0
- NOTICE +1648 -0
- Notice +1 -0
- README.md +32 -14
- Releases.md +182 -0
- SECURITY.md +32 -24
- USE_POLICY.md +50 -0
- __init__.py +4 -0
- cgmanifest.json +42 -0
- component-governance.yml +18 -0
- compute_metrics.py +395 -0
- conda_dependencies.yaml +29 -0
- config.json +2 -51
- configuration.ipynb +389 -0
- configuration.yml +4 -0
- configuration_chatglm.py +59 -0
- constants.py +232 -0
- download_dependencies.py +49 -0
- environment_dev.yml +59 -0
- error_definitions.py +316 -0
- evaluate_model.py +356 -0
- exceptions.py +331 -0
- generation_config.json +2 -3
- image_constants.py +53 -0
- image_dataset.py +300 -0
- index.md +151 -0
- license_header.txt +13 -0
- logging_utilities.py +292 -0
- model_prediction.py +293 -0
- modeling_chatglm.py +1197 -0
- mypy.ini +47 -0
- pyproject.toml +46 -0
- pytorch_model.bin.index.json +201 -195
- quantization.py +188 -0
.codecov.yml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
comment:
|
2 |
+
require_changes: true
|
3 |
+
coverage:
|
4 |
+
status:
|
5 |
+
project:
|
6 |
+
default:
|
7 |
+
target: auto
|
8 |
+
threshold: 2%
|
9 |
+
patch:
|
10 |
+
default:
|
11 |
+
target: auto
|
12 |
+
threshold: 2%
|
.dockerignore
CHANGED
@@ -1,16 +1,14 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
**/.coverage
|
16 |
-
**/coverage.xml
|
|
|
1 |
+
# Ignore everything
|
2 |
+
**
|
3 |
+
|
4 |
+
# Distribution packages
|
5 |
+
!dist/*.whl
|
6 |
+
|
7 |
+
# Scripts
|
8 |
+
!docker/scripts/start_argilla_server.sh
|
9 |
+
!docker/scripts/wait-for-it.sh
|
10 |
+
!docker/scripts/start_quickstart_argilla.sh
|
11 |
+
!docker/scripts/load_data.py
|
12 |
+
|
13 |
+
# Package dependencies
|
14 |
+
!docker/quickstart.requirements.txt
|
|
|
|
.gitattributes
CHANGED
@@ -1,34 +1,2 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
evals/registry/data/**/*.jsonl filter=lfs diff=lfs merge=lfs -text
|
2 |
+
tokenizer.model filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
CHANGED
@@ -1,17 +1,330 @@
|
|
1 |
-
|
2 |
-
.
|
|
|
|
|
3 |
|
4 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
*.pyc
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Ignore Visual Studio temporary files, build results, and
|
2 |
+
## files generated by popular Visual Studio add-ons.
|
3 |
+
##
|
4 |
+
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
|
5 |
|
6 |
+
# User-specific files
|
7 |
+
*.suo
|
8 |
+
*.user
|
9 |
+
*.userosscache
|
10 |
+
*.sln.docstates
|
11 |
+
|
12 |
+
# User-specific files (MonoDevelop/Xamarin Studio)
|
13 |
+
*.userprefs
|
14 |
+
|
15 |
+
# Build results
|
16 |
+
[Dd]ebug/
|
17 |
+
[Dd]ebugPublic/
|
18 |
+
[Rr]elease/
|
19 |
+
[Rr]eleases/
|
20 |
+
x64/
|
21 |
+
x86/
|
22 |
+
bld/
|
23 |
+
[Bb]in/
|
24 |
+
[Oo]bj/
|
25 |
+
[Ll]og/
|
26 |
+
|
27 |
+
# Visual Studio 2015/2017 cache/options directory
|
28 |
+
.vs/
|
29 |
+
# Uncomment if you have tasks that create the project's static files in wwwroot
|
30 |
+
#wwwroot/
|
31 |
+
|
32 |
+
# Visual Studio 2017 auto generated files
|
33 |
+
Generated\ Files/
|
34 |
+
|
35 |
+
# MSTest test Results
|
36 |
+
[Tt]est[Rr]esult*/
|
37 |
+
[Bb]uild[Ll]og.*
|
38 |
+
|
39 |
+
# NUNIT
|
40 |
+
*.VisualState.xml
|
41 |
+
TestResult.xml
|
42 |
+
|
43 |
+
# Build Results of an ATL Project
|
44 |
+
[Dd]ebugPS/
|
45 |
+
[Rr]eleasePS/
|
46 |
+
dlldata.c
|
47 |
+
|
48 |
+
# Benchmark Results
|
49 |
+
BenchmarkDotNet.Artifacts/
|
50 |
+
|
51 |
+
# .NET Core
|
52 |
+
project.lock.json
|
53 |
+
project.fragment.lock.json
|
54 |
+
artifacts/
|
55 |
+
**/Properties/launchSettings.json
|
56 |
+
|
57 |
+
# StyleCop
|
58 |
+
StyleCopReport.xml
|
59 |
+
|
60 |
+
# Files built by Visual Studio
|
61 |
+
*_i.c
|
62 |
+
*_p.c
|
63 |
+
*_i.h
|
64 |
+
*.ilk
|
65 |
+
*.meta
|
66 |
+
*.obj
|
67 |
+
*.iobj
|
68 |
+
*.pch
|
69 |
+
*.pdb
|
70 |
+
*.ipdb
|
71 |
+
*.pgc
|
72 |
+
*.pgd
|
73 |
+
*.rsp
|
74 |
+
*.sbr
|
75 |
+
*.tlb
|
76 |
+
*.tli
|
77 |
+
*.tlh
|
78 |
+
*.tmp
|
79 |
+
*.tmp_proj
|
80 |
+
*.log
|
81 |
+
*.vspscc
|
82 |
+
*.vssscc
|
83 |
+
.builds
|
84 |
+
*.pidb
|
85 |
+
*.svclog
|
86 |
+
*.scc
|
87 |
+
|
88 |
+
# Chutzpah Test files
|
89 |
+
_Chutzpah*
|
90 |
+
|
91 |
+
# Visual C++ cache files
|
92 |
+
ipch/
|
93 |
+
*.aps
|
94 |
+
*.ncb
|
95 |
+
*.opendb
|
96 |
+
*.opensdf
|
97 |
+
*.sdf
|
98 |
+
*.cachefile
|
99 |
+
*.VC.db
|
100 |
+
*.VC.VC.opendb
|
101 |
+
|
102 |
+
# Visual Studio profiler
|
103 |
+
*.psess
|
104 |
+
*.vsp
|
105 |
+
*.vspx
|
106 |
+
*.sap
|
107 |
+
|
108 |
+
# Visual Studio Trace Files
|
109 |
+
*.e2e
|
110 |
+
|
111 |
+
# TFS 2012 Local Workspace
|
112 |
+
$tf/
|
113 |
+
|
114 |
+
# Guidance Automation Toolkit
|
115 |
+
*.gpState
|
116 |
+
|
117 |
+
# ReSharper is a .NET coding add-in
|
118 |
+
_ReSharper*/
|
119 |
+
*.[Rr]e[Ss]harper
|
120 |
+
*.DotSettings.user
|
121 |
+
|
122 |
+
# JustCode is a .NET coding add-in
|
123 |
+
.JustCode
|
124 |
+
|
125 |
+
# TeamCity is a build add-in
|
126 |
+
_TeamCity*
|
127 |
+
|
128 |
+
# DotCover is a Code Coverage Tool
|
129 |
+
*.dotCover
|
130 |
+
|
131 |
+
# AxoCover is a Code Coverage Tool
|
132 |
+
.axoCover/*
|
133 |
+
!.axoCover/settings.json
|
134 |
+
|
135 |
+
# Visual Studio code coverage results
|
136 |
+
*.coverage
|
137 |
+
*.coveragexml
|
138 |
+
|
139 |
+
# NCrunch
|
140 |
+
_NCrunch_*
|
141 |
+
.*crunch*.local.xml
|
142 |
+
nCrunchTemp_*
|
143 |
+
|
144 |
+
# MightyMoose
|
145 |
+
*.mm.*
|
146 |
+
AutoTest.Net/
|
147 |
+
|
148 |
+
# Web workbench (sass)
|
149 |
+
.sass-cache/
|
150 |
+
|
151 |
+
# Installshield output folder
|
152 |
+
[Ee]xpress/
|
153 |
+
|
154 |
+
# DocProject is a documentation generator add-in
|
155 |
+
DocProject/buildhelp/
|
156 |
+
DocProject/Help/*.HxT
|
157 |
+
DocProject/Help/*.HxC
|
158 |
+
DocProject/Help/*.hhc
|
159 |
+
DocProject/Help/*.hhk
|
160 |
+
DocProject/Help/*.hhp
|
161 |
+
DocProject/Help/Html2
|
162 |
+
DocProject/Help/html
|
163 |
+
|
164 |
+
# Click-Once directory
|
165 |
+
publish/
|
166 |
+
|
167 |
+
# Publish Web Output
|
168 |
+
*.[Pp]ublish.xml
|
169 |
+
*.azurePubxml
|
170 |
+
# Note: Comment the next line if you want to checkin your web deploy settings,
|
171 |
+
# but database connection strings (with potential passwords) will be unencrypted
|
172 |
+
*.pubxml
|
173 |
+
*.publishproj
|
174 |
+
|
175 |
+
# Microsoft Azure Web App publish settings. Comment the next line if you want to
|
176 |
+
# checkin your Azure Web App publish settings, but sensitive information contained
|
177 |
+
# in these scripts will be unencrypted
|
178 |
+
PublishScripts/
|
179 |
+
|
180 |
+
# NuGet Packages
|
181 |
+
*.nupkg
|
182 |
+
# The packages folder can be ignored because of Package Restore
|
183 |
+
**/[Pp]ackages/*
|
184 |
+
# except build/, which is used as an MSBuild target.
|
185 |
+
!**/[Pp]ackages/build/
|
186 |
+
# Uncomment if necessary however generally it will be regenerated when needed
|
187 |
+
#!**/[Pp]ackages/repositories.config
|
188 |
+
# NuGet v3's project.json files produces more ignorable files
|
189 |
+
*.nuget.props
|
190 |
+
*.nuget.targets
|
191 |
+
|
192 |
+
# Microsoft Azure Build Output
|
193 |
+
csx/
|
194 |
+
*.build.csdef
|
195 |
+
|
196 |
+
# Microsoft Azure Emulator
|
197 |
+
ecf/
|
198 |
+
rcf/
|
199 |
+
|
200 |
+
# Windows Store app package directories and files
|
201 |
+
AppPackages/
|
202 |
+
BundleArtifacts/
|
203 |
+
Package.StoreAssociation.xml
|
204 |
+
_pkginfo.txt
|
205 |
+
*.appx
|
206 |
+
|
207 |
+
# Visual Studio cache files
|
208 |
+
# files ending in .cache can be ignored
|
209 |
+
*.[Cc]ache
|
210 |
+
# but keep track of directories ending in .cache
|
211 |
+
!*.[Cc]ache/
|
212 |
+
|
213 |
+
# Others
|
214 |
+
ClientBin/
|
215 |
+
~$*
|
216 |
+
*~
|
217 |
+
*.dbmdl
|
218 |
+
*.dbproj.schemaview
|
219 |
+
*.jfm
|
220 |
+
*.pfx
|
221 |
+
*.publishsettings
|
222 |
+
orleans.codegen.cs
|
223 |
+
|
224 |
+
# Including strong name files can present a security risk
|
225 |
+
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
|
226 |
+
#*.snk
|
227 |
+
|
228 |
+
# Since there are multiple workflows, uncomment next line to ignore bower_components
|
229 |
+
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
|
230 |
+
#bower_components/
|
231 |
+
|
232 |
+
# RIA/Silverlight projects
|
233 |
+
Generated_Code/
|
234 |
+
|
235 |
+
# Backup & report files from converting an old project file
|
236 |
+
# to a newer Visual Studio version. Backup files are not needed,
|
237 |
+
# because we have git ;-)
|
238 |
+
_UpgradeReport_Files/
|
239 |
+
Backup*/
|
240 |
+
UpgradeLog*.XML
|
241 |
+
UpgradeLog*.htm
|
242 |
+
ServiceFabricBackup/
|
243 |
+
*.rptproj.bak
|
244 |
+
|
245 |
+
# SQL Server files
|
246 |
+
*.mdf
|
247 |
+
*.ldf
|
248 |
+
*.ndf
|
249 |
+
|
250 |
+
# Business Intelligence projects
|
251 |
+
*.rdl.data
|
252 |
+
*.bim.layout
|
253 |
+
*.bim_*.settings
|
254 |
+
*.rptproj.rsuser
|
255 |
+
|
256 |
+
# Microsoft Fakes
|
257 |
+
FakesAssemblies/
|
258 |
+
|
259 |
+
# GhostDoc plugin setting file
|
260 |
+
*.GhostDoc.xml
|
261 |
+
|
262 |
+
# Node.js Tools for Visual Studio
|
263 |
+
.ntvs_analysis.dat
|
264 |
+
node_modules/
|
265 |
+
|
266 |
+
# Visual Studio 6 build log
|
267 |
+
*.plg
|
268 |
+
|
269 |
+
# Visual Studio 6 workspace options file
|
270 |
+
*.opt
|
271 |
+
|
272 |
+
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
|
273 |
+
*.vbw
|
274 |
+
|
275 |
+
# Visual Studio LightSwitch build output
|
276 |
+
**/*.HTMLClient/GeneratedArtifacts
|
277 |
+
**/*.DesktopClient/GeneratedArtifacts
|
278 |
+
**/*.DesktopClient/ModelManifest.xml
|
279 |
+
**/*.Server/GeneratedArtifacts
|
280 |
+
**/*.Server/ModelManifest.xml
|
281 |
+
_Pvt_Extensions
|
282 |
+
|
283 |
+
# Paket dependency manager
|
284 |
+
.paket/paket.exe
|
285 |
+
paket-files/
|
286 |
+
|
287 |
+
# FAKE - F# Make
|
288 |
+
.fake/
|
289 |
+
|
290 |
+
# JetBrains Rider
|
291 |
+
.idea/
|
292 |
+
*.sln.iml
|
293 |
+
|
294 |
+
# CodeRush
|
295 |
+
.cr/
|
296 |
+
|
297 |
+
# Python Tools for Visual Studio (PTVS)
|
298 |
+
__pycache__/
|
299 |
*.pyc
|
300 |
+
|
301 |
+
# Cake - Uncomment if you are using it
|
302 |
+
# tools/**
|
303 |
+
# !tools/packages.config
|
304 |
+
|
305 |
+
# Tabs Studio
|
306 |
+
*.tss
|
307 |
+
|
308 |
+
# Telerik's JustMock configuration file
|
309 |
+
*.jmconfig
|
310 |
+
|
311 |
+
# BizTalk build output
|
312 |
+
*.btp.cs
|
313 |
+
*.btm.cs
|
314 |
+
*.odx.cs
|
315 |
+
*.xsd.cs
|
316 |
+
|
317 |
+
# OpenCover UI analysis results
|
318 |
+
OpenCover/
|
319 |
+
|
320 |
+
# Azure Stream Analytics local run output
|
321 |
+
ASALocalRun/
|
322 |
+
|
323 |
+
# MSBuild Binary and Structured Log
|
324 |
+
*.binlog
|
325 |
+
|
326 |
+
# NVidia Nsight GPU debugger configuration file
|
327 |
+
*.nvuser
|
328 |
+
|
329 |
+
# MFractors (Xamarin productivity tool) working folder
|
330 |
+
.mfractor/
|
.gitmodules
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[submodule "Python/pybind11"]
|
2 |
+
path = Python/pybind11
|
3 |
+
url = https://github.com/pybind/pybind11.git
|
4 |
+
branch = v2.10
|
5 |
+
|
6 |
+
[submodule "Python/gpgmm"]
|
7 |
+
path = Python/gpgmm
|
8 |
+
url = https://github.com/intel/gpgmm.git
|
9 |
+
branch = main
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
3 |
+
rev: 'v1.3.0'
|
4 |
+
hooks:
|
5 |
+
- id: mypy
|
6 |
+
args: ["--config-file=mypy.ini", "--no-site-packages"]
|
7 |
+
|
8 |
+
- repo: https://github.com/psf/black
|
9 |
+
rev: 22.8.0
|
10 |
+
hooks:
|
11 |
+
- id: black
|
12 |
+
args: [--line-length=100, --exclude=""]
|
13 |
+
|
14 |
+
# this is not technically always safe but usually is
|
15 |
+
# use comments `# isort: off` and `# isort: on` to disable/re-enable isort
|
16 |
+
- repo: https://github.com/pycqa/isort
|
17 |
+
rev: 5.12.0
|
18 |
+
hooks:
|
19 |
+
- id: isort
|
20 |
+
args: [--line-length=100, --profile=black]
|
21 |
+
|
22 |
+
# this is slightly dangerous because python imports have side effects
|
23 |
+
# and this tool removes unused imports, which may be providing
|
24 |
+
# necessary side effects for the code to run
|
25 |
+
- repo: https://github.com/PyCQA/autoflake
|
26 |
+
rev: v1.6.1
|
27 |
+
hooks:
|
28 |
+
- id: autoflake
|
29 |
+
args:
|
30 |
+
- "--in-place"
|
31 |
+
- "--expand-star-imports"
|
32 |
+
- "--remove-duplicate-keys"
|
33 |
+
- "--remove-unused-variables"
|
34 |
+
- "--remove-all-unused-imports"
|
35 |
+
exclude: "evals/__init__.py"
|
CHANGELOG.md
ADDED
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Changelog
|
2 |
+
|
3 |
+
All notable changes to this project will be documented in this file.
|
4 |
+
|
5 |
+
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
6 |
+
|
7 |
+
<!--
|
8 |
+
These are the section headers that we use:
|
9 |
+
* "Added" for new features.
|
10 |
+
* "Changed" for changes in existing functionality.
|
11 |
+
* "Deprecated" for soon-to-be removed features.
|
12 |
+
* "Removed" for now removed features.
|
13 |
+
* "Fixed" for any bug fixes.
|
14 |
+
* "Security" in case of vulnerabilities.
|
15 |
+
-->
|
16 |
+
|
17 |
+
## [Unreleased]()
|
18 |
+
|
19 |
+
## [1.14.1](https://github.com/argilla-io/argilla/compare/v1.14.0...v1.14.1)
|
20 |
+
|
21 |
+
### Fixed
|
22 |
+
|
23 |
+
- Fixed PostgreSQL database not being updated after `begin_nested` because of missing `commit` ([#3567](https://github.com/argilla-io/argilla/pull/3567)).
|
24 |
+
|
25 |
+
## [1.14.0](https://github.com/argilla-io/argilla/compare/v1.13.3...v1.14.0)
|
26 |
+
|
27 |
+
### Added
|
28 |
+
|
29 |
+
- Added `PATCH /api/v1/fields/{field_id}` endpoint to update the field title and markdown settings ([#3421](https://github.com/argilla-io/argilla/pull/3421)).
|
30 |
+
- Added `PATCH /api/v1/datasets/{dataset_id}` endpoint to update dataset name and guidelines ([#3402](https://github.com/argilla-io/argilla/pull/3402)).
|
31 |
+
- Added `PATCH /api/v1/questions/{question_id}` endpoint to update question title, description and some settings (depending on the type of question) ([#3477](https://github.com/argilla-io/argilla/pull/3477)).
|
32 |
+
- Added `DELETE /api/v1/records/{record_id}` endpoint to remove a record given its ID ([#3337](https://github.com/argilla-io/argilla/pull/3337)).
|
33 |
+
- Added `pull` method in `RemoteFeedbackDataset` (a `FeedbackDataset` pushed to Argilla) to pull all the records from it and return it as a local copy as a `FeedbackDataset` ([#3465](https://github.com/argilla-io/argilla/pull/3465)).
|
34 |
+
- Added `delete` method in `RemoteFeedbackDataset` (a `FeedbackDataset` pushed to Argilla) ([#3512](https://github.com/argilla-io/argilla/pull/3512)).
|
35 |
+
- Added `delete_records` method in `RemoteFeedbackDataset`, and `delete` method in `RemoteFeedbackRecord` to delete records from Argilla ([#3526](https://github.com/argilla-io/argilla/pull/3526)).
|
36 |
+
|
37 |
+
### Changed
|
38 |
+
|
39 |
+
- Improved efficiency of weak labeling when dataset contains vectors ([#3444](https://github.com/argilla-io/argilla/pull/3444)).
|
40 |
+
- Added `ArgillaDatasetMixin` to detach the Argilla-related functionality from the `FeedbackDataset` ([#3427](https://github.com/argilla-io/argilla/pull/3427))
|
41 |
+
- Moved `FeedbackDataset`-related `pydantic.BaseModel` schemas to `argilla.client.feedback.schemas` instead, to be better structured and more scalable and maintainable ([#3427](https://github.com/argilla-io/argilla/pull/3427))
|
42 |
+
- Update CLI to use database async connection ([#3450](https://github.com/argilla-io/argilla/pull/3450)).
|
43 |
+
- Limit rating questions values to the positive range [1, 10] ([#3451](https://github.com/argilla-io/argilla/issues/3451)).
|
44 |
+
- Updated `POST /api/users` endpoint to be able to provide a list of workspace names to which the user should be linked to ([#3462](https://github.com/argilla-io/argilla/pull/3462)).
|
45 |
+
- Updated Python client `User.create` method to be able to provide a list of workspace names to which the user should be linked to ([#3462](https://github.com/argilla-io/argilla/pull/3462)).
|
46 |
+
- Updated `GET /api/v1/me/datasets/{dataset_id}/records` endpoint to allow getting records matching one of the response statuses provided via query param ([#3359](https://github.com/argilla-io/argilla/pull/3359)).
|
47 |
+
- Updated `POST /api/v1/me/datasets/{dataset_id}/records` endpoint to allow searching records matching one of the response statuses provided via query param ([#3359](https://github.com/argilla-io/argilla/pull/3359)).
|
48 |
+
- Updated `SearchEngine.search` method to allow searching records matching one of the response statuses provided ([#3359](https://github.com/argilla-io/argilla/pull/3359)).
|
49 |
+
- After calling `FeedbackDataset.push_to_argilla`, the methods `FeedbackDataset.add_records` and `FeedbackRecord.set_suggestions` will automatically call Argilla with no need of calling `push_to_argilla` explicitly ([#3465](https://github.com/argilla-io/argilla/pull/3465)).
|
50 |
+
- Now calling `FeedbackDataset.push_to_huggingface` dumps the `responses` as a `List[Dict[str, Any]]` instead of `Sequence` to make it more readable via 🤗`datasets` ([#3539](https://github.com/argilla-io/argilla/pull/3539)).
|
51 |
+
|
52 |
+
### Fixed
|
53 |
+
|
54 |
+
- Fixed issue with `bool` values and `default` from Jinja2 while generating the HuggingFace `DatasetCard` from `argilla_template.md` ([#3499](https://github.com/argilla-io/argilla/pull/3499)).
|
55 |
+
- Fixed `DatasetConfig.from_yaml` which was failing when calling `FeedbackDataset.from_huggingface` as the UUIDs cannot be deserialized automatically by `PyYAML`, so UUIDs are neither dumped nor loaded anymore ([#3502](https://github.com/argilla-io/argilla/pull/3502)).
|
56 |
+
- Fixed an issue that didn't allow the Argilla server to work behind a proxy ([#3543](https://github.com/argilla-io/argilla/pull/3543)).
|
57 |
+
- `TextClassificationSettings` and `TokenClassificationSettings` labels are properly parsed to strings both in the Python client and in the backend endpoint ([#3495](https://github.com/argilla-io/argilla/issues/3495)).
|
58 |
+
- Fixed `PUT /api/v1/datasets/{dataset_id}/publish` to check whether at least one field and question has `required=True` ([#3511](https://github.com/argilla-io/argilla/pull/3511)).
|
59 |
+
- Fixed `FeedbackDataset.from_huggingface` as `suggestions` were being lost when there were no `responses` ([#3539](https://github.com/argilla-io/argilla/pull/3539)).
|
60 |
+
- Fixed `QuestionSchema` and `FieldSchema` not validating `name` attribute ([#3550](https://github.com/argilla-io/argilla/pull/3550)).
|
61 |
+
|
62 |
+
### Deprecated
|
63 |
+
|
64 |
+
- After calling `FeedbackDataset.push_to_argilla`, calling `push_to_argilla` again won't do anything since the dataset is already pushed to Argilla ([#3465](https://github.com/argilla-io/argilla/pull/3465)).
|
65 |
+
- After calling `FeedbackDataset.push_to_argilla`, calling `fetch_records` won't do anything since the records are lazily fetched from Argilla ([#3465](https://github.com/argilla-io/argilla/pull/3465)).
|
66 |
+
- After calling `FeedbackDataset.push_to_argilla`, the Argilla ID is no longer stored in the attribute/property `argilla_id` but in `id` instead ([#3465](https://github.com/argilla-io/argilla/pull/3465)).
|
67 |
+
|
68 |
+
## [1.13.3](https://github.com/argilla-io/argilla/compare/v1.13.2...v1.13.3)
|
69 |
+
|
70 |
+
### Fixed
|
71 |
+
|
72 |
+
- Fixed `ModuleNotFoundError` caused because the `argilla.utils.telemetry` module used in the `ArgillaTrainer` was importing an optional dependency not installed by default ([#3471](https://github.com/argilla-io/argilla/pull/3471)).
|
73 |
+
- Fixed `ImportError` caused because the `argilla.client.feedback.config` module was importing `pyyaml` optional dependency not installed by default ([#3471](https://github.com/argilla-io/argilla/pull/3471)).
|
74 |
+
|
75 |
+
## [1.13.2](https://github.com/argilla-io/argilla/compare/v1.13.1...v1.13.2)
|
76 |
+
|
77 |
+
### Fixed
|
78 |
+
|
79 |
+
- The `suggestion_type_enum` ENUM data type created in PostgreSQL didn't have any value ([#3445](https://github.com/argilla-io/argilla/pull/3445)).
|
80 |
+
|
81 |
+
## [1.13.1](https://github.com/argilla-io/argilla/compare/v1.13.0...v1.13.1)
|
82 |
+
|
83 |
+
### Fixed
|
84 |
+
|
85 |
+
- Fix database migration for PostgreSQL (See [#3438](https://github.com/argilla-io/argilla/pull/3438))
|
86 |
+
|
87 |
+
## [1.13.0](https://github.com/argilla-io/argilla/compare/v1.12.1...v1.13.0)
|
88 |
+
|
89 |
+
### Added
|
90 |
+
|
91 |
+
- Added `GET /api/v1/users/{user_id}/workspaces` endpoint to list the workspaces to which a user belongs ([#3308](https://github.com/argilla-io/argilla/pull/3308) and [#3343](https://github.com/argilla-io/argilla/pull/3343)).
|
92 |
+
- Added `HuggingFaceDatasetMixin` for internal usage, to detach the `FeedbackDataset` integrations from the class itself, and use Mixins instead ([#3326](https://github.com/argilla-io/argilla/pull/3326)).
|
93 |
+
- Added `GET /api/v1/records/{record_id}/suggestions` API endpoint to get the list of suggestions for the responses associated to a record ([#3304](https://github.com/argilla-io/argilla/pull/3304)).
|
94 |
+
- Added `POST /api/v1/records/{record_id}/suggestions` API endpoint to create a suggestion for a response associated to a record ([#3304](https://github.com/argilla-io/argilla/pull/3304)).
|
95 |
+
- Added support for `RankingQuestionStrategy`, `RankingQuestionUnification` and the `.for_text_classification` method for the `TrainingTaskMapping` ([#3364](https://github.com/argilla-io/argilla/pull/3364))
|
96 |
+
- Added `PUT /api/v1/records/{record_id}/suggestions` API endpoint to create or update a suggestion for a response associated to a record ([#3304](https://github.com/argilla-io/argilla/pull/3304) & [3391](https://github.com/argilla-io/argilla/pull/3391)).
|
97 |
+
- Added `suggestions` attribute to `FeedbackRecord`, and allow adding and retrieving suggestions from the Python client ([#3370](https://github.com/argilla-io/argilla/pull/3370))
|
98 |
+
- Added `allowed_for_roles` Python decorator to check whether the current user has the required role to access the decorated function/method for `User` and `Workspace` ([#3383](https://github.com/argilla-io/argilla/pull/3383))
|
99 |
+
- Added API and Python Client support for workspace deletion (Closes [#3260](https://github.com/argilla-io/argilla/issues/3260))
|
100 |
+
- Added `GET /api/v1/me/workspaces` endpoint to list the workspaces of the current active user ([#3390](https://github.com/argilla-io/argilla/pull/3390))
|
101 |
+
|
102 |
+
### Changed
|
103 |
+
|
104 |
+
- Updated output payload for `GET /api/v1/datasets/{dataset_id}/records`, `GET /api/v1/me/datasets/{dataset_id}/records`, `POST /api/v1/me/datasets/{dataset_id}/records/search` endpoints to include the suggestions of the records based on the value of the `include` query parameter ([#3304](https://github.com/argilla-io/argilla/pull/3304)).
|
105 |
+
- Updated `POST /api/v1/datasets/{dataset_id}/records` input payload to add suggestions ([#3304](https://github.com/argilla-io/argilla/pull/3304)).
|
106 |
+
- The `POST /api/datasets/:dataset-id/:task/bulk` endpoints don't create the dataset if does not exists (Closes [#3244](https://github.com/argilla-io/argilla/issues/3244))
|
107 |
+
- Added Telemetry support for `ArgillaTrainer` (closes [#3325](https://github.com/argilla-io/argilla/issues/3325))
|
108 |
+
- `User.workspaces` is no longer an attribute but a property, and is calling `list_user_workspaces` to list all the workspace names for a given user ID ([#3334](https://github.com/argilla-io/argilla/pull/3334))
|
109 |
+
- Renamed `FeedbackDatasetConfig` to `DatasetConfig` and export/import from YAML as default instead of JSON (just used internally on `push_to_huggingface` and `from_huggingface` methods of `FeedbackDataset`) ([#3326](https://github.com/argilla-io/argilla/pull/3326)).
|
110 |
+
- The protected metadata fields support other than textual info - existing datasets must be reindex. See [docs](https://docs.argilla.io/en/latest/getting_started/installation/configurations/database_migrations.html#elasticsearch) for more detail (Closes [#3332](https://github.com/argilla-io/argilla/issues/3332)).
|
111 |
+
- Updated `Dockerfile` parent image from `python:3.9.16-slim` to `python:3.10.12-slim` ([#3425](https://github.com/argilla-io/argilla/pull/3425)).
|
112 |
+
- Updated `quickstart.Dockerfile` parent image from `elasticsearch:8.5.3` to `argilla/argilla-server:${ARGILLA_VERSION}` ([#3425](https://github.com/argilla-io/argilla/pull/3425)).
|
113 |
+
|
114 |
+
### Removed
|
115 |
+
|
116 |
+
- Removed support to non-prefixed environment variables. All valid env vars start with `ARGILLA_` (See [#3392](https://github.com/argilla-io/argilla/pull/3392)).
|
117 |
+
|
118 |
+
### Fixed
|
119 |
+
|
120 |
+
- Fixed `GET /api/v1/me/datasets/{dataset_id}/records` endpoint returning always the responses for the records even if `responses` was not provided via the `include` query parameter ([#3304](https://github.com/argilla-io/argilla/pull/3304)).
|
121 |
+
- Values for protected metadata fields are not truncated (Closes [#3331](https://github.com/argilla-io/argilla/issues/3331)).
|
122 |
+
- Big number ids are properly rendered in UI (Closes [#3265](https://github.com/argilla-io/argilla/issues/3265))
|
123 |
+
- Fixed `ArgillaDatasetCard` to include the values/labels for all the existing questions ([#3366](https://github.com/argilla-io/argilla/pull/3265))
|
124 |
+
|
125 |
+
### Deprecated
|
126 |
+
|
127 |
+
- Integer support for record id in text classification, token classification and text2text datasets.
|
128 |
+
|
129 |
+
## [1.12.1](https://github.com/argilla-io/argilla/compare/v1.12.0...v1.12.1)
|
130 |
+
|
131 |
+
### Fixed
|
132 |
+
|
133 |
+
- Using `rg.init` with default `argilla` user skips setting the default workspace if not available. (Closes [#3340](https://github.com/argilla-io/argilla/issues/3340))
|
134 |
+
- Resolved wrong import structure for `ArgillaTrainer` and `TrainingTaskMapping` (Closes [#3345](https://github.com/argilla-io/argilla/issues/3345))
|
135 |
+
- Pin pydantic dependency to version < 2 (Closes [3348](https://github.com/argilla-io/argilla/issues/3348))
|
136 |
+
|
137 |
+
## [1.12.0](https://github.com/argilla-io/argilla/compare/v1.11.0...v1.12.0)
|
138 |
+
|
139 |
+
### Added
|
140 |
+
|
141 |
+
- Added `RankingQuestionSettings` class allowing to create ranking questions in the API using `POST /api/v1/datasets/{dataset_id}/questions` endpoint ([#3232](https://github.com/argilla-io/argilla/pull/3232))
|
142 |
+
- Added `RankingQuestion` in the Python client to create ranking questions ([#3275](https://github.com/argilla-io/argilla/issues/3275)).
|
143 |
+
- Added `Ranking` component in feedback task question form ([#3177](https://github.com/argilla-io/argilla/pull/3177) & [#3246](https://github.com/argilla-io/argilla/pull/3246)).
|
144 |
+
- Added `FeedbackDataset.prepare_for_training` method for generaring a framework-specific dataset with the responses provided for `RatingQuestion`, `LabelQuestion` and `MultiLabelQuestion` ([#3151](https://github.com/argilla-io/argilla/pull/3151)).
|
145 |
+
- Added `ArgillaSpaCyTransformersTrainer` class for supporting the training with `spacy-transformers` ([#3256](https://github.com/argilla-io/argilla/pull/3256)).
|
146 |
+
|
147 |
+
#### Docs
|
148 |
+
|
149 |
+
- Added instructions for how to run the Argilla frontend in the developer docs ([#3314](https://github.com/argilla-io/argilla/pull/3314)).
|
150 |
+
|
151 |
+
### Changed
|
152 |
+
|
153 |
+
- All docker related files have been moved into the `docker` folder ([#3053](https://github.com/argilla-io/argilla/pull/3053)).
|
154 |
+
- `release.Dockerfile` have been renamed to `Dockerfile` ([#3133](https://github.com/argilla-io/argilla/pull/3133)).
|
155 |
+
- Updated `rg.load` function to raise a `ValueError` with a explanatory message for the cases in which the user tries to use the function to load a `FeedbackDataset` ([#3289](https://github.com/argilla-io/argilla/pull/3289)).
|
156 |
+
- Updated `ArgillaSpaCyTrainer` to allow re-using `tok2vec` ([#3256](https://github.com/argilla-io/argilla/pull/3256)).
|
157 |
+
|
158 |
+
### Fixed
|
159 |
+
|
160 |
+
- Check available workspaces on Argilla on `rg.set_workspace` (Closes [#3262](https://github.com/argilla-io/argilla/issues/3262))
|
161 |
+
|
162 |
+
## [1.11.0](https://github.com/argilla-io/argilla/compare/v1.10.0...v1.11.0)
|
163 |
+
|
164 |
+
### Fixed
|
165 |
+
|
166 |
+
- Replaced `np.float` alias by `float` to avoid `AttributeError` when using `find_label_errors` function with `numpy>=1.24.0` ([#3214](https://github.com/argilla-io/argilla/pull/3214)).
|
167 |
+
- Fixed `format_as("datasets")` when no responses or optional respones in `FeedbackRecord`, to set their value to what 🤗 Datasets expects instead of just `None` ([#3224](https://github.com/argilla-io/argilla/pull/3224)).
|
168 |
+
- Fixed `push_to_huggingface()` when `generate_card=True` (default behaviour), as we were passing a sample record to the `ArgillaDatasetCard` class, and `UUID`s introduced in 1.10.0 ([#3192](https://github.com/argilla-io/argilla/pull/3192)), are not JSON-serializable ([#3231](https://github.com/argilla-io/argilla/pull/3231)).
|
169 |
+
- Fixed `from_argilla` and `push_to_argilla` to ensure consistency on both field and question re-construction, and to ensure `UUID`s are properly serialized as `str`, respectively ([#3234](https://github.com/argilla-io/argilla/pull/3234)).
|
170 |
+
- Refactored usage of `import argilla as rg` to clarify package navigation ([#3279](https://github.com/argilla-io/argilla/pull/3279)).
|
171 |
+
|
172 |
+
#### Docs
|
173 |
+
|
174 |
+
- Fixed URLs in Weak Supervision with Sentence Tranformers tutorial [#3243](https://github.com/argilla-io/argilla/pull/3243).
|
175 |
+
- Fixed library buttons' formatting on Tutorials page ([#3255](https://github.com/argilla-io/argilla/pull/3255)).
|
176 |
+
- Modified styling of error code outputs in notebooks ([#3270](https://github.com/argilla-io/argilla/pull/3270)).
|
177 |
+
- Added ElasticSearch and OpenSearch versions ([#3280](https://github.com/argilla-io/argilla/pull/3280)).
|
178 |
+
- Removed template notebook from table of contents ([#3271](https://github.com/argilla-io/argilla/pull/3271)).
|
179 |
+
- Fixed tutorials with `pip install argilla` to not use older versions of the package ([#3282](https://github.com/argilla-io/argilla/pull/3282)).
|
180 |
+
|
181 |
+
### Added
|
182 |
+
|
183 |
+
- Added `metadata` attribute to the `Record` of the `FeedbackDataset` ([#3194](https://github.com/argilla-io/argilla/pull/3194))
|
184 |
+
- New `users update` command to update the role for an existing user ([#3188](https://github.com/argilla-io/argilla/pull/3188))
|
185 |
+
- New `Workspace` class to allow users manage their Argilla workspaces and the users assigned to those workspaces via the Python client ([#3180](https://github.com/argilla-io/argilla/pull/3180))
|
186 |
+
- Added `User` class to let users manage their Argilla users via the Python client ([#3169](https://github.com/argilla-io/argilla/pull/3169)).
|
187 |
+
- Added an option to display `tqdm` progress bar to `FeedbackDataset.push_to_argilla` when looping over the records to upload ([#3233](https://github.com/argilla-io/argilla/pull/3233)).
|
188 |
+
|
189 |
+
### Changed
|
190 |
+
|
191 |
+
- The role system now support three different roles `owner`, `admin` and `annotator` ([#3104](https://github.com/argilla-io/argilla/pull/3104))
|
192 |
+
- `admin` role is scoped to workspace-level operations ([#3115](https://github.com/argilla-io/argilla/pull/3115))
|
193 |
+
- The `owner` user is created among the default pool of users in the quickstart, and the default user in the server has now `owner` role ([#3248](https://github.com/argilla-io/argilla/pull/3248)), reverting ([#3188](https://github.com/argilla-io/argilla/pull/3188)).
|
194 |
+
|
195 |
+
### Deprecated
|
196 |
+
|
197 |
+
- As of Python 3.7 end-of-life (EOL) on 2023-06-27, Argilla will no longer support Python 3.7 ([#3188](https://github.com/argilla-io/argilla/pull/33188)). More information at https://peps.python.org/pep-0537/
|
198 |
+
|
199 |
+
## [1.10.0](https://github.com/argilla-io/argilla/compare/v1.9.0...v1.10.0)
|
200 |
+
|
201 |
+
### Added
|
202 |
+
|
203 |
+
- Added search component for feedback datasets ([#3138](https://github.com/argilla-io/argilla/pull/3138))
|
204 |
+
- Added markdown support for feedback dataset guidelines ([#3153](https://github.com/argilla-io/argilla/pull/3153))
|
205 |
+
- Added Train button for feedback datasets ([#3170](https://github.com/argilla-io/argilla/pull/3170))
|
206 |
+
|
207 |
+
### Changed
|
208 |
+
|
209 |
+
- Updated `SearchEngine` and `POST /api/v1/me/datasets/{dataset_id}/records/search` to return the `total` number of records matching the search query ([#3166](https://github.com/argilla-io/argilla/pull/3166))
|
210 |
+
|
211 |
+
### Fixed
|
212 |
+
|
213 |
+
- Replaced Enum for string value in URLs for client API calls (Closes [#3149](https://github.com/argilla-io/argilla/issues/3149))
|
214 |
+
- Resolve breaking issue with `ArgillaSpanMarkerTrainer` for Named Entity Recognition with `span_marker` v1.1.x onwards.
|
215 |
+
- Move `ArgillaDatasetCard` import under `@requires_version` decorator, so that the `ImportError` on `huggingface_hub` is handled properly ([#3174](https://github.com/argilla-io/argilla/pull/3174))
|
216 |
+
- Allow flow `FeedbackDataset.from_argilla` -> `FeedbackDataset.push_to_argilla` under different dataset names and/or workspaces ([#3192](https://github.com/argilla-io/argilla/issues/3192))
|
217 |
+
|
218 |
+
#### Docs
|
219 |
+
|
220 |
+
- Resolved typos in the docs ([#3240](https://github.com/argilla-io/argilla/pull/3240)).
|
221 |
+
- Fixed mention of master branch ([#3254](https://github.com/argilla-io/argilla/pull/3254)).
|
222 |
+
|
223 |
+
|
224 |
+
## [1.9.0](https://github.com/argilla-io/argilla/compare/v1.8.0...v1.9.0)
|
225 |
+
|
226 |
+
### Added
|
227 |
+
|
228 |
+
- Added boolean `use_markdown` property to `TextFieldSettings` model.
|
229 |
+
- Added boolean `use_markdown` property to `TextQuestionSettings` model.
|
230 |
+
- Added new status `draft` for the `Response` model.
|
231 |
+
- Added `LabelSelectionQuestionSettings` class allowing to create label selection (single-choice) questions in the API ([#3005](https://github.com/argilla-io/argilla/pull/3005))
|
232 |
+
- Added `MultiLabelSelectionQuestionSettings` class allowing to create multi-label selection (multi-choice) questions in the API ([#3010](https://github.com/argilla-io/argilla/pull/3010)).
|
233 |
+
- Added `POST /api/v1/me/datasets/{dataset_id}/records/search` endpoint ([#3068](https://github.com/argilla-io/argilla/pull/3068)).
|
234 |
+
- Added new components in feedback task Question form: MultiLabel ([#3064](https://github.com/argilla-io/argilla/pull/3064)) and SingleLabel ([#3016](https://github.com/argilla-io/argilla/pull/3016)).
|
235 |
+
- Added docstrings to the `pydantic.BaseModel`s defined at `argilla/client/feedback/schemas.py` ([#3137](https://github.com/argilla-io/argilla/pull/3137))
|
236 |
+
- Added the information about executing tests in the developer documentation ([#3143]).
|
237 |
+
|
238 |
+
### Changed
|
239 |
+
|
240 |
+
- Updated `GET /api/v1/me/datasets/:dataset_id/metrics` output payload to include the count of responses with `draft` status.
|
241 |
+
- Added `LabelSelectionQuestionSettings` class allowing to create label selection (single-choice) questions in the API.
|
242 |
+
- Added `MultiLabelSelectionQuestionSettings` class allowing to create multi-label selection (multi-choice) questions in the API.
|
243 |
+
- Database setup for unit tests. Now the unit tests use a different database than the one used by the local Argilla server (Closes [#2987](https://github.com/argilla-io/argilla/issues/2987)).
|
244 |
+
- Updated `alembic` setup to be able to autogenerate revision/migration scripts using SQLAlchemy metadata from Argilla server models ([#3044](https://github.com/argilla-io/argilla/pull/3044))
|
245 |
+
- Improved `DatasetCard` generation on `FeedbackDataset.push_to_huggingface` when `generate_card=True`, following the official HuggingFace Hub template, but suited to `FeedbackDataset`s from Argilla ([#3110](https://github.com/argilla-io/argilla/pull/3100))
|
246 |
+
|
247 |
+
### Fixed
|
248 |
+
|
249 |
+
- Disallow `fields` and `questions` in `FeedbackDataset` with the same name ([#3126]).
|
250 |
+
- Fixed broken links in the documentation and updated the development branch name from `development` to `develop` ([#3145]).
|
251 |
+
|
252 |
+
[#3126]: https://github.com/argilla-io/argilla/pull/3126
|
253 |
+
|
254 |
+
## [1.8.0](https://github.com/argilla-io/argilla/compare/v1.7.0...v1.8.0)
|
255 |
+
|
256 |
+
### Added
|
257 |
+
|
258 |
+
- `/api/v1/datasets` new endpoint to list and create datasets ([#2615]).
|
259 |
+
- `/api/v1/datasets/{dataset_id}` new endpoint to get and delete datasets ([#2615]).
|
260 |
+
- `/api/v1/datasets/{dataset_id}/publish` new endpoint to publish a dataset ([#2615]).
|
261 |
+
- `/api/v1/datasets/{dataset_id}/questions` new endpoint to list and create dataset questions ([#2615])
|
262 |
+
- `/api/v1/datasets/{dataset_id}/fields` new endpoint to list and create dataset fields ([#2615])
|
263 |
+
- `/api/v1/datasets/{dataset_id}/questions/{question_id}` new endpoint to delete a dataset questions ([#2615])
|
264 |
+
- `/api/v1/datasets/{dataset_id}/fields/{field_id}` new endpoint to delete a dataset field ([#2615])
|
265 |
+
- `/api/v1/workspaces/{workspace_id}` new endpoint to get workspaces by id ([#2615])
|
266 |
+
- `/api/v1/responses/{response_id}` new endpoint to update and delete a response ([#2615])
|
267 |
+
- `/api/v1/datasets/{dataset_id}/records` new endpoint to create and list dataset records ([#2615])
|
268 |
+
- `/api/v1/me/datasets` new endpoint to list user visible datasets ([#2615])
|
269 |
+
- `/api/v1/me/dataset/{dataset_id}/records` new endpoint to list dataset records with user responses ([#2615])
|
270 |
+
- `/api/v1/me/datasets/{dataset_id}/metrics` new endpoint to get the dataset user metrics ([#2615])
|
271 |
+
- `/api/v1/me/records/{record_id}/responses` new endpoint to create record user responses ([#2615])
|
272 |
+
- showing new feedback task datasets in datasets list ([#2719])
|
273 |
+
- new page for feedback task ([#2680])
|
274 |
+
- show feedback task metrics ([#2822])
|
275 |
+
- user can delete dataset in dataset settings page ([#2792])
|
276 |
+
- Support for `FeedbackDataset` in Python client (parent PR [#2615], and nested PRs: [#2949], [#2827], [#2943], [#2945], [#2962], and [#3003])
|
277 |
+
- Integration with the HuggingFace Hub ([#2949])
|
278 |
+
- Added `ArgillaPeftTrainer` for text and token classificaiton [#2854](https://github.com/argilla-io/argilla/issues/2854)
|
279 |
+
- Added `predict_proba()` method to `ArgillaSetFitTrainer`
|
280 |
+
- Added `ArgillaAutoTrainTrainer` for Text Classification [#2664](https://github.com/argilla-io/argilla/issues/2664)
|
281 |
+
- New `database revisions` command showing database revisions info
|
282 |
+
|
283 |
+
[#2615]: https://github.com/argilla-io/argilla/issues/2615
|
284 |
+
|
285 |
+
### Fixes
|
286 |
+
|
287 |
+
- Avoid rendering html for invalid html strings in Text2text ([#2911]https://github.com/argilla-io/argilla/issues/2911)
|
288 |
+
|
289 |
+
### Changed
|
290 |
+
|
291 |
+
- The `database migrate` command accepts a `--revision` param to provide specific revision id
|
292 |
+
- `tokens_length` metrics function returns empty data ([#3045])
|
293 |
+
- `token_length` metrics function returns empty data ([#3045])
|
294 |
+
- `mention_length` metrics function returns empty data ([#3045])
|
295 |
+
- `entity_density` metrics function returns empty data ([#3045])
|
296 |
+
|
297 |
+
### Deprecated
|
298 |
+
|
299 |
+
- Using Argilla with Python 3.7 runtime is deprecated and support will be removed from version 1.11.0 ([#2902](https://github.com/argilla-io/argilla/issues/2902))
|
300 |
+
- `tokens_length` metrics function has been deprecated and will be removed in 1.10.0 ([#3045])
|
301 |
+
- `token_length` metrics function has been deprecated and will be removed in 1.10.0 ([#3045])
|
302 |
+
- `mention_length` metrics function has been deprecated and will be removed in 1.10.0 ([#3045])
|
303 |
+
- `entity_density` metrics function has been deprecated and will be removed in 1.10.0 ([#3045])
|
304 |
+
|
305 |
+
### Removed
|
306 |
+
|
307 |
+
- Removed mention `density`, `tokens_length` and `chars_length` metrics from token classification metrics storage ([#3045])
|
308 |
+
- Removed token `char_start`, `char_end`, `tag`, and `score` metrics from token classification metrics storage ([#3045])
|
309 |
+
- Removed tags-related metrics from token classification metrics storage ([#3045])
|
310 |
+
|
311 |
+
[#3045]: https://github.com/argilla-io/argilla/pull/3045
|
312 |
+
|
313 |
+
## [1.7.0](https://github.com/argilla-io/argilla/compare/v1.6.0...v1.7.0)
|
314 |
+
|
315 |
+
### Added
|
316 |
+
|
317 |
+
- add `max_retries` and `num_threads` parameters to `rg.log` to run data logging request concurrently with backoff retry policy. See [#2458](https://github.com/argilla-io/argilla/issues/2458) and [#2533](https://github.com/argilla-io/argilla/issues/2533)
|
318 |
+
- `rg.load` accepts `include_vectors` and `include_metrics` when loading data. Closes [#2398](https://github.com/argilla-io/argilla/issues/2398)
|
319 |
+
- Added `settings` param to `prepare_for_training` ([#2689](https://github.com/argilla-io/argilla/issues/2689))
|
320 |
+
- Added `prepare_for_training` for `openai` ([#2658](https://github.com/argilla-io/argilla/issues/2658))
|
321 |
+
- Added `ArgillaOpenAITrainer` ([#2659](https://github.com/argilla-io/argilla/issues/2659))
|
322 |
+
- Added `ArgillaSpanMarkerTrainer` for Named Entity Recognition ([#2693](https://github.com/argilla-io/argilla/pull/2693))
|
323 |
+
- Added `ArgillaTrainer` CLI support. Closes ([#2809](https://github.com/argilla-io/argilla/issues/2809))
|
324 |
+
|
325 |
+
### Fixes
|
326 |
+
|
327 |
+
- fix image alignment on token classification
|
328 |
+
|
329 |
+
### Changed
|
330 |
+
|
331 |
+
- Argilla quickstart image dependencies are externalized into `quickstart.requirements.txt`. See [#2666](https://github.com/argilla-io/argilla/pull/2666)
|
332 |
+
- bulk endpoints will upsert data when record `id` is present. Closes [#2535](https://github.com/argilla-io/argilla/issues/2535)
|
333 |
+
- moved from `click` to `typer` CLI support. Closes ([#2815](https://github.com/argilla-io/argilla/issues/2815))
|
334 |
+
- Argilla server docker image is built with PostgreSQL support. Closes [#2686](https://github.com/argilla-io/argilla/issues/2686)
|
335 |
+
- The `rg.log` computes all batches and raise an error for all failed batches.
|
336 |
+
- The default batch size for `rg.log` is now 100.
|
337 |
+
|
338 |
+
### Fixed
|
339 |
+
|
340 |
+
- `argilla.training` bugfixes and unification ([#2665](https://github.com/argilla-io/argilla/issues/2665))
|
341 |
+
- Resolved several small bugs in the `ArgillaTrainer`.
|
342 |
+
|
343 |
+
### Deprecated
|
344 |
+
|
345 |
+
- The `rg.log_async` function is deprecated and will be removed in next minor release.
|
346 |
+
|
347 |
+
## [1.6.0](https://github.com/argilla-io/argilla/compare/v1.5.1...v1.6.0)
|
348 |
+
|
349 |
+
### Added
|
350 |
+
|
351 |
+
- `ARGILLA_HOME_PATH` new environment variable ([#2564]).
|
352 |
+
- `ARGILLA_DATABASE_URL` new environment variable ([#2564]).
|
353 |
+
- Basic support for user roles with `admin` and `annotator` ([#2564]).
|
354 |
+
- `id`, `first_name`, `last_name`, `role`, `inserted_at` and `updated_at` new user fields ([#2564]).
|
355 |
+
- `/api/users` new endpoint to list and create users ([#2564]).
|
356 |
+
- `/api/users/{user_id}` new endpoint to delete users ([#2564]).
|
357 |
+
- `/api/workspaces` new endpoint to list and create workspaces ([#2564]).
|
358 |
+
- `/api/workspaces/{workspace_id}/users` new endpoint to list workspace users ([#2564]).
|
359 |
+
- `/api/workspaces/{workspace_id}/users/{user_id}` new endpoint to create and delete workspace users ([#2564]).
|
360 |
+
- `argilla.tasks.users.migrate` new task to migrate users from old YAML file to database ([#2564]).
|
361 |
+
- `argilla.tasks.users.create` new task to create a user ([#2564]).
|
362 |
+
- `argilla.tasks.users.create_default` new task to create a user with default credentials ([#2564]).
|
363 |
+
- `argilla.tasks.database.migrate` new task to execute database migrations ([#2564]).
|
364 |
+
- `release.Dockerfile` and `quickstart.Dockerfile` now creates a default `argilladata` volume to persist data ([#2564]).
|
365 |
+
- Add user settings page. Closes [#2496](https://github.com/argilla-io/argilla/issues/2496)
|
366 |
+
- Added `Argilla.training` module with support for `spacy`, `setfit`, and `transformers`. Closes [#2504](https://github.com/argilla-io/argilla/issues/2496)
|
367 |
+
|
368 |
+
### Fixes
|
369 |
+
|
370 |
+
- Now the `prepare_for_training` method is working when `multi_label=True`. Closes [#2606](https://github.com/argilla-io/argilla/issues/2606)
|
371 |
+
|
372 |
+
### Changed
|
373 |
+
|
374 |
+
- `ARGILLA_USERS_DB_FILE` environment variable now it's only used to migrate users from YAML file to database ([#2564]).
|
375 |
+
- `full_name` user field is now deprecated and `first_name` and `last_name` should be used instead ([#2564]).
|
376 |
+
- `password` user field now requires a minimum of `8` and a maximum of `100` characters in size ([#2564]).
|
377 |
+
- `quickstart.Dockerfile` image default users from `team` and `argilla` to `admin` and `annotator` including new passwords and API keys ([#2564]).
|
378 |
+
- Datasets to be managed only by users with `admin` role ([#2564]).
|
379 |
+
- The list of rules is now accessible while metrics are computed. Closes[#2117](https://github.com/argilla-io/argilla/issues/2117)
|
380 |
+
- Style updates for weak labeling and adding feedback toast when delete rules. See [#2626](https://github.com/argilla-io/argilla/pull/2626) and [#2648](https://github.com/argilla-io/argilla/pull/2648)
|
381 |
+
|
382 |
+
### Removed
|
383 |
+
|
384 |
+
- `email` user field ([#2564]).
|
385 |
+
- `disabled` user field ([#2564]).
|
386 |
+
- Support for private workspaces ([#2564]).
|
387 |
+
- `ARGILLA_LOCAL_AUTH_DEFAULT_APIKEY` and `ARGILLA_LOCAL_AUTH_DEFAULT_PASSWORD` environment variables. Use `python -m argilla.tasks.users.create_default` instead ([#2564]).
|
388 |
+
- The old headers for `API Key` and `workspace` from python client
|
389 |
+
- The default value for old `API Key` constant. Closes [#2251](https://github.com/argilla-io/argilla/issues/2251)
|
390 |
+
|
391 |
+
[#2564]: https://github.com/argilla-io/argilla/issues/2564
|
392 |
+
|
393 |
+
## [1.5.1](https://github.com/argilla-io/argilla/compare/v1.5.0...v1.5.1) - 2023-03-30
|
394 |
+
|
395 |
+
### Fixes
|
396 |
+
|
397 |
+
- Copying datasets between workspaces with proper owner/workspace info. Closes [#2562](https://github.com/argilla-io/argilla/issues/2562)
|
398 |
+
- Copy dataset with empty workspace to the default user workspace [905d4de](https://github.com/recognai/argilla/commit/905d4deaa769bfc9bbc022cd2dc75c7435cfe865)
|
399 |
+
- Using elasticsearch config to request backend version. Closes [#2311](https://github.com/argilla-io/argilla/issues/2311)
|
400 |
+
- Remove sorting by score in labels. Closes [#2622](https://github.com/argilla-io/argilla/issues/2622)
|
401 |
+
|
402 |
+
### Changed
|
403 |
+
|
404 |
+
- Update field name in metadata for image url. See [#2609](https://github.com/argilla-io/argilla/pull/2609)
|
405 |
+
- Improvements in tutorial doc cards. Closes [#2216](https://github.com/argilla-io/argilla/issues/2216)
|
406 |
+
|
407 |
+
## [1.5.0](https://github.com/argilla-io/argilla/compare/v1.4.0...v1.5.0) - 2023-03-21
|
408 |
+
|
409 |
+
### Added
|
410 |
+
|
411 |
+
- Add the fields to retrieve when loading the data from argilla. `rg.load` takes too long because of the vector field, even when users don't need it. Closes [#2398](https://github.com/argilla-io/argilla/issues/2398)
|
412 |
+
- Add new page and components for dataset settings. Closes [#2442](https://github.com/argilla-io/argilla/issues/2003)
|
413 |
+
- Add ability to show image in records (for TokenClassification and TextClassification) if an URL is passed in metadata with the key \_image_url
|
414 |
+
- Non-searchable fields support in metadata. [#2570](https://github.com/argilla-io/argilla/pull/2570)
|
415 |
+
- Add record ID references to the prepare for training methods. Closes [#2483](https://github.com/argilla-io/argilla/issues/2483)
|
416 |
+
- Add tutorial on Image Classification. [#2420](https://github.com/argilla-io/argilla/pull/2420)
|
417 |
+
- Add Train button, visible for "admin" role, with code snippets from a selection of libraries. Closes [#2591] (https://github.com/argilla-io/argilla/pull/2591)
|
418 |
+
|
419 |
+
### Changed
|
420 |
+
|
421 |
+
- Labels are now centralized in a specific vuex ORM called GlobalLabel Model, see https://github.com/argilla-io/argilla/issues/2210. This model is the same for TokenClassification and TextClassification (so both task have labels with color_id and shortcuts parameters in the vuex ORM)
|
422 |
+
- The shortcuts improvement for labels [#2339](https://github.com/argilla-io/argilla/pull/2339) have been moved to the vuex ORM in dataset settings feature [#2444](https://github.com/argilla-io/argilla/commit/eb37c3bcff3ad253481d6a10f8abb093384f2dcb)
|
423 |
+
- Update "Define a labeling schema" section in docs.
|
424 |
+
- The record inputs are sorted alphabetically in UI by default. [#2581](https://github.com/argilla-io/argilla/pull/2581)
|
425 |
+
- The record inputs are fully visible when pagination size is one and the height of collapsed area size is bigger for laptop screen. [#2587](https://github.com/argilla-io/argilla/pull/2587/files)
|
426 |
+
|
427 |
+
### Fixes
|
428 |
+
|
429 |
+
- Allow URL to be clickable in Jupyter notebook again. Closes [#2527](https://github.com/argilla-io/argilla/issues/2527)
|
430 |
+
|
431 |
+
### Removed
|
432 |
+
|
433 |
+
- Removing some data scan deprecated endpoints used by old clients. This change will break compatibility with client `<v1.3.0`
|
434 |
+
- Stop using old scan deprecated endpoints in python client. This logic will break client compatibility with server version `<1.3.0`
|
435 |
+
- Remove the previous way to add labels through the dataset page. Now labels can be added only through dataset settings page.
|
CITATION.cff
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cff-version: 1.2.0
|
2 |
+
message: "If you use this software, please cite it as below."
|
3 |
+
authors:
|
4 |
+
- family-names: "Daniel"
|
5 |
+
given-names: "Vila-Suero"
|
6 |
+
- family-names: "Francisco"
|
7 |
+
given-names: "Aranda"
|
8 |
+
title: "Argilla - Open-source framework for data-centric NLP"
|
9 |
+
version: 1.2.0
|
10 |
+
date-released: 2023-01-12
|
11 |
+
url: "https://github.com/argilla-io/argilla"
|
CODE_OF_CONDUCT.md
CHANGED
@@ -1,132 +1,9 @@
|
|
1 |
-
#
|
2 |
|
3 |
-
|
4 |
|
5 |
-
|
6 |
-
community a harassment-free experience for everyone, regardless of age, body
|
7 |
-
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
8 |
-
identity and expression, level of experience, education, socio-economic status,
|
9 |
-
nationality, personal appearance, race, caste, color, religion, or sexual identity
|
10 |
-
and orientation.
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
## Our Standards
|
16 |
-
|
17 |
-
Examples of behavior that contributes to a positive environment for our
|
18 |
-
community include:
|
19 |
-
|
20 |
-
* Demonstrating empathy and kindness toward other people
|
21 |
-
* Being respectful of differing opinions, viewpoints, and experiences
|
22 |
-
* Giving and gracefully accepting constructive feedback
|
23 |
-
* Accepting responsibility and apologizing to those affected by our mistakes,
|
24 |
-
and learning from the experience
|
25 |
-
* Focusing on what is best not just for us as individuals, but for the
|
26 |
-
overall community
|
27 |
-
|
28 |
-
Examples of unacceptable behavior include:
|
29 |
-
|
30 |
-
* The use of sexualized language or imagery, and sexual attention or
|
31 |
-
advances of any kind
|
32 |
-
* Trolling, insulting or derogatory comments, and personal or political attacks
|
33 |
-
* Public or private harassment
|
34 |
-
* Publishing others' private information, such as a physical or email
|
35 |
-
address, without their explicit permission
|
36 |
-
* Other conduct which could reasonably be considered inappropriate in a
|
37 |
-
professional setting
|
38 |
-
|
39 |
-
## Enforcement Responsibilities
|
40 |
-
|
41 |
-
Community leaders are responsible for clarifying and enforcing our standards of
|
42 |
-
acceptable behavior and will take appropriate and fair corrective action in
|
43 |
-
response to any behavior that they deem inappropriate, threatening, offensive,
|
44 |
-
or harmful.
|
45 |
-
|
46 |
-
Community leaders have the right and responsibility to remove, edit, or reject
|
47 |
-
comments, commits, code, wiki edits, issues, and other contributions that are
|
48 |
-
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
49 |
-
decisions when appropriate.
|
50 |
-
|
51 |
-
## Scope
|
52 |
-
|
53 |
-
This Code of Conduct applies within all community spaces, and also applies when
|
54 |
-
an individual is officially representing the community in public spaces.
|
55 |
-
Examples of representing our community include using an official e-mail address,
|
56 |
-
posting via an official social media account, or acting as an appointed
|
57 |
-
representative at an online or offline event.
|
58 |
-
|
59 |
-
## Enforcement
|
60 |
-
|
61 |
-
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
62 |
-
reported to the community leaders responsible for enforcement at
|
63 |
-
feedback@huggingface.co.
|
64 |
-
All complaints will be reviewed and investigated promptly and fairly.
|
65 |
-
|
66 |
-
All community leaders are obligated to respect the privacy and security of the
|
67 |
-
reporter of any incident.
|
68 |
-
|
69 |
-
## Enforcement Guidelines
|
70 |
-
|
71 |
-
Community leaders will follow these Community Impact Guidelines in determining
|
72 |
-
the consequences for any action they deem in violation of this Code of Conduct:
|
73 |
-
|
74 |
-
### 1. Correction
|
75 |
-
|
76 |
-
**Community Impact**: Use of inappropriate language or other behavior deemed
|
77 |
-
unprofessional or unwelcome in the community.
|
78 |
-
|
79 |
-
**Consequence**: A private, written warning from community leaders, providing
|
80 |
-
clarity around the nature of the violation and an explanation of why the
|
81 |
-
behavior was inappropriate. A public apology may be requested.
|
82 |
-
|
83 |
-
### 2. Warning
|
84 |
-
|
85 |
-
**Community Impact**: A violation through a single incident or series
|
86 |
-
of actions.
|
87 |
-
|
88 |
-
**Consequence**: A warning with consequences for continued behavior. No
|
89 |
-
interaction with the people involved, including unsolicited interaction with
|
90 |
-
those enforcing the Code of Conduct, for a specified period of time. This
|
91 |
-
includes avoiding interactions in community spaces as well as external channels
|
92 |
-
like social media. Violating these terms may lead to a temporary or
|
93 |
-
permanent ban.
|
94 |
-
|
95 |
-
### 3. Temporary Ban
|
96 |
-
|
97 |
-
**Community Impact**: A serious violation of community standards, including
|
98 |
-
sustained inappropriate behavior.
|
99 |
-
|
100 |
-
**Consequence**: A temporary ban from any sort of interaction or public
|
101 |
-
communication with the community for a specified period of time. No public or
|
102 |
-
private interaction with the people involved, including unsolicited interaction
|
103 |
-
with those enforcing the Code of Conduct, is allowed during this period.
|
104 |
-
Violating these terms may lead to a permanent ban.
|
105 |
-
|
106 |
-
### 4. Permanent Ban
|
107 |
-
|
108 |
-
**Community Impact**: Demonstrating a pattern of violation of community
|
109 |
-
standards, including sustained inappropriate behavior, harassment of an
|
110 |
-
individual, or aggression toward or disparagement of classes of individuals.
|
111 |
-
|
112 |
-
**Consequence**: A permanent ban from any sort of public interaction within
|
113 |
-
the community.
|
114 |
-
|
115 |
-
## Attribution
|
116 |
-
|
117 |
-
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
118 |
-
version 2.0, available at
|
119 |
-
[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0].
|
120 |
-
|
121 |
-
Community Impact Guidelines were inspired by
|
122 |
-
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
123 |
-
|
124 |
-
For answers to common questions about this code of conduct, see the FAQ at
|
125 |
-
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
|
126 |
-
at [https://www.contributor-covenant.org/translations][translations].
|
127 |
-
|
128 |
-
[homepage]: https://www.contributor-covenant.org
|
129 |
-
[v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html
|
130 |
-
[Mozilla CoC]: https://github.com/mozilla/diversity
|
131 |
-
[FAQ]: https://www.contributor-covenant.org/faq
|
132 |
-
[translations]: https://www.contributor-covenant.org/translations
|
|
|
1 |
+
# Microsoft Open Source Code of Conduct
|
2 |
|
3 |
+
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
4 |
|
5 |
+
Resources:
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
|
8 |
+
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
9 |
+
- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CONTRIBUTING.md
CHANGED
@@ -1,87 +1,23 @@
|
|
1 |
-
#
|
2 |
|
3 |
-
|
|
|
|
|
4 |
|
5 |
-
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
|
10 |
-
|
11 |
|
12 |
-
|
13 |
-
our [code of conduct](CODE_OF_CONDUCT.md).
|
14 |
|
15 |
-
|
16 |
|
17 |
-
|
18 |
|
19 |
-
|
20 |
|
21 |
-
If you would like to work on any of the open Issues:
|
22 |
-
|
23 |
-
1. Make sure it is not already assigned to someone else. You have the assignee (if any) on the top of the right column of the Issue page.
|
24 |
-
|
25 |
-
2. You can self-assign it by commenting on the Issue page with one of the keywords: `#take` or `#self-assign`.
|
26 |
-
|
27 |
-
3. Work on your self-assigned issue and eventually create a Pull Request.
|
28 |
-
|
29 |
-
## How to create a Pull Request?
|
30 |
-
|
31 |
-
1. Fork the [repository](https://github.com/huggingface/datasets-server) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account.
|
32 |
-
|
33 |
-
2. Clone your fork to your local disk, and add the base repository as a remote:
|
34 |
-
|
35 |
-
```bash
|
36 |
-
git clone git@github.com:<your Github handle>/datasets-server.git
|
37 |
-
cd datasets-server
|
38 |
-
git remote add upstream https://github.com/huggingface/datasets-server.git
|
39 |
-
```
|
40 |
-
|
41 |
-
3. Create a new branch to hold your development changes:
|
42 |
-
|
43 |
-
```bash
|
44 |
-
git checkout -b a-descriptive-name-for-my-changes
|
45 |
-
```
|
46 |
-
|
47 |
-
**do not** work on the `main` branch.
|
48 |
-
|
49 |
-
4. Set up a development environment by following the [developer guide](./DEVELOPER_GUIDE.md)
|
50 |
-
|
51 |
-
5. Develop the features on your branch.
|
52 |
-
|
53 |
-
6. Format your code. Run black and isort so that your newly added files look nice with the following command:
|
54 |
-
|
55 |
-
```bash
|
56 |
-
make style
|
57 |
-
```
|
58 |
-
|
59 |
-
7. Once you're happy with your code, add your changes and make a commit to record your changes locally:
|
60 |
-
|
61 |
-
```bash
|
62 |
-
git add -p
|
63 |
-
git commit
|
64 |
-
```
|
65 |
-
|
66 |
-
It is a good idea to sync your copy of the code with the original
|
67 |
-
repository regularly. This way you can quickly account for changes:
|
68 |
-
|
69 |
-
```bash
|
70 |
-
git fetch upstream
|
71 |
-
git rebase upstream/main
|
72 |
-
```
|
73 |
-
|
74 |
-
Push the changes to your account using:
|
75 |
-
|
76 |
-
```bash
|
77 |
-
git push -u origin a-descriptive-name-for-my-changes
|
78 |
-
```
|
79 |
-
|
80 |
-
8. Once you are satisfied, go the webpage of your fork on GitHub. Click on "Pull request" to send your to the project maintainers for review.
|
81 |
-
|
82 |
-
Thank you for your contribution!
|
83 |
-
|
84 |
-
## Code of conduct
|
85 |
-
|
86 |
-
This project adheres to the HuggingFace [code of conduct](CODE_OF_CONDUCT.md).
|
87 |
-
By participating, you are expected to uphold this code.
|
|
|
1 |
+
# Contribute to Argilla
|
2 |
|
3 |
+
Everyone is welcome to contribute, and we value everybody's contribution. Code
|
4 |
+
contributions are not the only way to help the community. Answering questions, helping
|
5 |
+
others, and improving the documentation are also immensely valuable.
|
6 |
|
7 |
+
It also helps us if you spread the word! Reference the library in blog posts
|
8 |
+
about the awesome projects it made possible, shout out on Twitter every time it has
|
9 |
+
helped you, or simply ⭐️ the repository to say thank you.
|
10 |
|
11 |
+
However you choose to contribute, please be mindful and respect our
|
12 |
+
[code of conduct](https://github.com/argilla-io/argilla/blob/main/CODE_OF_CONDUCT.md).
|
13 |
|
14 |
+
For contributions, we work together to regreen the earth with [JustDiggit](https://justdiggit.org/) via our [Argilla Community Growers](https://argilla.io/blog/introducing-argilla-community-growers/) initiative.
|
15 |
|
16 |
+
## Need some help?
|
|
|
17 |
|
18 |
+
We understand that getting started might be scary and difficult, therefore, we provide a guided contributor mentorship program. Feel free to schedule a meeting [here](https://calendly.com/argilla-office-hours/30min) to get you started. Alternatively, you can start with [joining our Slack](https://join.slack.com/t/rubrixworkspace/shared_invite/zt-whigkyjn-a3IUJLD7gDbTZ0rKlvcJ5g) and reaching out in our [contributor channel](https://rubrixworkspace.slack.com/archives/C05BCGM277B).
|
19 |
|
20 |
+
## Want to work on your own?
|
21 |
|
22 |
+
For more seasoned contributors, we recommend taking a look at the [contributor section](https://docs.argilla.io/en/latest/community/contributing.html) in our docs.
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Dockerfile
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM mcr.microsoft.com/azureml/openmpi4.1.0-ubuntu20.04:20230727.v1
|
2 |
+
|
3 |
+
ENV AZUREML_CONDA_ENVIRONMENT_PATH /azureml-envs/auto-prompt
|
4 |
+
|
5 |
+
# Prepend path to AzureML conda environment
|
6 |
+
ENV PATH $AZUREML_CONDA_ENVIRONMENT_PATH/bin:$PATH
|
7 |
+
|
8 |
+
# Create conda environment
|
9 |
+
COPY conda_dependencies.yaml .
|
10 |
+
RUN conda env create -p $AZUREML_CONDA_ENVIRONMENT_PATH -f conda_dependencies.yaml -q && \
|
11 |
+
rm conda_dependencies.yaml && \
|
12 |
+
conda run -p $AZUREML_CONDA_ENVIRONMENT_PATH pip cache purge && \
|
13 |
+
conda clean -a -y
|
14 |
+
|
15 |
+
RUN pip install azureml-metrics==0.0.21.post1
|
16 |
+
|
17 |
+
ENV LD_LIBRARY_PATH $AZUREML_CONDA_ENVIRONMENT_PATH/lib:$LD_LIBRARY_PATH
|
LICENSE
CHANGED
@@ -1,202 +1,21 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
outstanding shares, or (iii) beneficial ownership of such entity.
|
23 |
-
|
24 |
-
"You" (or "Your") shall mean an individual or Legal Entity
|
25 |
-
exercising permissions granted by this License.
|
26 |
-
|
27 |
-
"Source" form shall mean the preferred form for making modifications,
|
28 |
-
including but not limited to software source code, documentation
|
29 |
-
source, and configuration files.
|
30 |
-
|
31 |
-
"Object" form shall mean any form resulting from mechanical
|
32 |
-
transformation or translation of a Source form, including but
|
33 |
-
not limited to compiled object code, generated documentation,
|
34 |
-
and conversions to other media types.
|
35 |
-
|
36 |
-
"Work" shall mean the work of authorship, whether in Source or
|
37 |
-
Object form, made available under the License, as indicated by a
|
38 |
-
copyright notice that is included in or attached to the work
|
39 |
-
(an example is provided in the Appendix below).
|
40 |
-
|
41 |
-
"Derivative Works" shall mean any work, whether in Source or Object
|
42 |
-
form, that is based on (or derived from) the Work and for which the
|
43 |
-
editorial revisions, annotations, elaborations, or other modifications
|
44 |
-
represent, as a whole, an original work of authorship. For the purposes
|
45 |
-
of this License, Derivative Works shall not include works that remain
|
46 |
-
separable from, or merely link (or bind by name) to the interfaces of,
|
47 |
-
the Work and Derivative Works thereof.
|
48 |
-
|
49 |
-
"Contribution" shall mean any work of authorship, including
|
50 |
-
the original version of the Work and any modifications or additions
|
51 |
-
to that Work or Derivative Works thereof, that is intentionally
|
52 |
-
submitted to Licensor for inclusion in the Work by the copyright owner
|
53 |
-
or by an individual or Legal Entity authorized to submit on behalf of
|
54 |
-
the copyright owner. For the purposes of this definition, "submitted"
|
55 |
-
means any form of electronic, verbal, or written communication sent
|
56 |
-
to the Licensor or its representatives, including but not limited to
|
57 |
-
communication on electronic mailing lists, source code control systems,
|
58 |
-
and issue tracking systems that are managed by, or on behalf of, the
|
59 |
-
Licensor for the purpose of discussing and improving the Work, but
|
60 |
-
excluding communication that is conspicuously marked or otherwise
|
61 |
-
designated in writing by the copyright owner as "Not a Contribution."
|
62 |
-
|
63 |
-
"Contributor" shall mean Licensor and any individual or Legal Entity
|
64 |
-
on behalf of whom a Contribution has been received by Licensor and
|
65 |
-
subsequently incorporated within the Work.
|
66 |
-
|
67 |
-
2. Grant of Copyright License. Subject to the terms and conditions of
|
68 |
-
this License, each Contributor hereby grants to You a perpetual,
|
69 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
70 |
-
copyright license to reproduce, prepare Derivative Works of,
|
71 |
-
publicly display, publicly perform, sublicense, and distribute the
|
72 |
-
Work and such Derivative Works in Source or Object form.
|
73 |
-
|
74 |
-
3. Grant of Patent License. Subject to the terms and conditions of
|
75 |
-
this License, each Contributor hereby grants to You a perpetual,
|
76 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
77 |
-
(except as stated in this section) patent license to make, have made,
|
78 |
-
use, offer to sell, sell, import, and otherwise transfer the Work,
|
79 |
-
where such license applies only to those patent claims licensable
|
80 |
-
by such Contributor that are necessarily infringed by their
|
81 |
-
Contribution(s) alone or by combination of their Contribution(s)
|
82 |
-
with the Work to which such Contribution(s) was submitted. If You
|
83 |
-
institute patent litigation against any entity (including a
|
84 |
-
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
85 |
-
or a Contribution incorporated within the Work constitutes direct
|
86 |
-
or contributory patent infringement, then any patent licenses
|
87 |
-
granted to You under this License for that Work shall terminate
|
88 |
-
as of the date such litigation is filed.
|
89 |
-
|
90 |
-
4. Redistribution. You may reproduce and distribute copies of the
|
91 |
-
Work or Derivative Works thereof in any medium, with or without
|
92 |
-
modifications, and in Source or Object form, provided that You
|
93 |
-
meet the following conditions:
|
94 |
-
|
95 |
-
(a) You must give any other recipients of the Work or
|
96 |
-
Derivative Works a copy of this License; and
|
97 |
-
|
98 |
-
(b) You must cause any modified files to carry prominent notices
|
99 |
-
stating that You changed the files; and
|
100 |
-
|
101 |
-
(c) You must retain, in the Source form of any Derivative Works
|
102 |
-
that You distribute, all copyright, patent, trademark, and
|
103 |
-
attribution notices from the Source form of the Work,
|
104 |
-
excluding those notices that do not pertain to any part of
|
105 |
-
the Derivative Works; and
|
106 |
-
|
107 |
-
(d) If the Work includes a "NOTICE" text file as part of its
|
108 |
-
distribution, then any Derivative Works that You distribute must
|
109 |
-
include a readable copy of the attribution notices contained
|
110 |
-
within such NOTICE file, excluding those notices that do not
|
111 |
-
pertain to any part of the Derivative Works, in at least one
|
112 |
-
of the following places: within a NOTICE text file distributed
|
113 |
-
as part of the Derivative Works; within the Source form or
|
114 |
-
documentation, if provided along with the Derivative Works; or,
|
115 |
-
within a display generated by the Derivative Works, if and
|
116 |
-
wherever such third-party notices normally appear. The contents
|
117 |
-
of the NOTICE file are for informational purposes only and
|
118 |
-
do not modify the License. You may add Your own attribution
|
119 |
-
notices within Derivative Works that You distribute, alongside
|
120 |
-
or as an addendum to the NOTICE text from the Work, provided
|
121 |
-
that such additional attribution notices cannot be construed
|
122 |
-
as modifying the License.
|
123 |
-
|
124 |
-
You may add Your own copyright statement to Your modifications and
|
125 |
-
may provide additional or different license terms and conditions
|
126 |
-
for use, reproduction, or distribution of Your modifications, or
|
127 |
-
for any such Derivative Works as a whole, provided Your use,
|
128 |
-
reproduction, and distribution of the Work otherwise complies with
|
129 |
-
the conditions stated in this License.
|
130 |
-
|
131 |
-
5. Submission of Contributions. Unless You explicitly state otherwise,
|
132 |
-
any Contribution intentionally submitted for inclusion in the Work
|
133 |
-
by You to the Licensor shall be under the terms and conditions of
|
134 |
-
this License, without any additional terms or conditions.
|
135 |
-
Notwithstanding the above, nothing herein shall supersede or modify
|
136 |
-
the terms of any separate license agreement you may have executed
|
137 |
-
with Licensor regarding such Contributions.
|
138 |
-
|
139 |
-
6. Trademarks. This License does not grant permission to use the trade
|
140 |
-
names, trademarks, service marks, or product names of the Licensor,
|
141 |
-
except as required for reasonable and customary use in describing the
|
142 |
-
origin of the Work and reproducing the content of the NOTICE file.
|
143 |
-
|
144 |
-
7. Disclaimer of Warranty. Unless required by applicable law or
|
145 |
-
agreed to in writing, Licensor provides the Work (and each
|
146 |
-
Contributor provides its Contributions) on an "AS IS" BASIS,
|
147 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
148 |
-
implied, including, without limitation, any warranties or conditions
|
149 |
-
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
150 |
-
PARTICULAR PURPOSE. You are solely responsible for determining the
|
151 |
-
appropriateness of using or redistributing the Work and assume any
|
152 |
-
risks associated with Your exercise of permissions under this License.
|
153 |
-
|
154 |
-
8. Limitation of Liability. In no event and under no legal theory,
|
155 |
-
whether in tort (including negligence), contract, or otherwise,
|
156 |
-
unless required by applicable law (such as deliberate and grossly
|
157 |
-
negligent acts) or agreed to in writing, shall any Contributor be
|
158 |
-
liable to You for damages, including any direct, indirect, special,
|
159 |
-
incidental, or consequential damages of any character arising as a
|
160 |
-
result of this License or out of the use or inability to use the
|
161 |
-
Work (including but not limited to damages for loss of goodwill,
|
162 |
-
work stoppage, computer failure or malfunction, or any and all
|
163 |
-
other commercial damages or losses), even if such Contributor
|
164 |
-
has been advised of the possibility of such damages.
|
165 |
-
|
166 |
-
9. Accepting Warranty or Additional Liability. While redistributing
|
167 |
-
the Work or Derivative Works thereof, You may choose to offer,
|
168 |
-
and charge a fee for, acceptance of support, warranty, indemnity,
|
169 |
-
or other liability obligations and/or rights consistent with this
|
170 |
-
License. However, in accepting such obligations, You may act only
|
171 |
-
on Your own behalf and on Your sole responsibility, not on behalf
|
172 |
-
of any other Contributor, and only if You agree to indemnify,
|
173 |
-
defend, and hold each Contributor harmless for any liability
|
174 |
-
incurred by, or claims asserted against, such Contributor by reason
|
175 |
-
of your accepting any such warranty or additional liability.
|
176 |
-
|
177 |
-
END OF TERMS AND CONDITIONS
|
178 |
-
|
179 |
-
APPENDIX: How to apply the Apache License to your work.
|
180 |
-
|
181 |
-
To apply the Apache License to your work, attach the following
|
182 |
-
boilerplate notice, with the fields enclosed by brackets "[]"
|
183 |
-
replaced with your own identifying information. (Don't include
|
184 |
-
the brackets!) The text should be enclosed in the appropriate
|
185 |
-
comment syntax for the file format. We also recommend that a
|
186 |
-
file or class name and description of purpose be included on the
|
187 |
-
same "printed page" as the copyright notice for easier
|
188 |
-
identification within third-party archives.
|
189 |
-
|
190 |
-
Copyright [yyyy] [name of copyright owner]
|
191 |
-
|
192 |
-
Licensed under the Apache License, Version 2.0 (the "License");
|
193 |
-
you may not use this file except in compliance with the License.
|
194 |
-
You may obtain a copy of the License at
|
195 |
-
|
196 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
197 |
-
|
198 |
-
Unless required by applicable law or agreed to in writing, software
|
199 |
-
distributed under the License is distributed on an "AS IS" BASIS,
|
200 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
201 |
-
See the License for the specific language governing permissions and
|
202 |
-
limitations under the License.
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) Microsoft Corporation. All rights reserved.
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
LICENSE.txt
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
LLAMA 2 COMMUNITY LICENSE AGREEMENT
|
2 |
+
Llama 2 Version Release Date: July 18, 2023
|
3 |
+
|
4 |
+
"Agreement" means the terms and conditions for use, reproduction, distribution and
|
5 |
+
modification of the Llama Materials set forth herein.
|
6 |
+
|
7 |
+
"Documentation" means the specifications, manuals and documentation
|
8 |
+
accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-
|
9 |
+
libraries/llama-downloads/.
|
10 |
+
|
11 |
+
"Licensee" or "you" means you, or your employer or any other person or entity (if
|
12 |
+
you are entering into this Agreement on such person or entity's behalf), of the age
|
13 |
+
required under applicable laws, rules or regulations to provide legal consent and that
|
14 |
+
has legal authority to bind your employer or such other person or entity if you are
|
15 |
+
entering in this Agreement on their behalf.
|
16 |
+
|
17 |
+
"Llama 2" means the foundational large language models and software and
|
18 |
+
algorithms, including machine-learning model code, trained model weights,
|
19 |
+
inference-enabling code, training-enabling code, fine-tuning enabling code and other
|
20 |
+
elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-
|
21 |
+
libraries/llama-downloads/.
|
22 |
+
|
23 |
+
"Llama Materials" means, collectively, Meta's proprietary Llama 2 and
|
24 |
+
Documentation (and any portion thereof) made available under this Agreement.
|
25 |
+
|
26 |
+
"Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you
|
27 |
+
are an entity, your principal place of business is in the EEA or Switzerland) and Meta
|
28 |
+
Platforms, Inc. (if you are located outside of the EEA or Switzerland).
|
29 |
+
|
30 |
+
By clicking "I Accept" below or by using or distributing any portion or element of the
|
31 |
+
Llama Materials, you agree to be bound by this Agreement.
|
32 |
+
|
33 |
+
1. License Rights and Redistribution.
|
34 |
+
|
35 |
+
a. Grant of Rights. You are granted a non-exclusive, worldwide, non-
|
36 |
+
transferable and royalty-free limited license under Meta's intellectual property or
|
37 |
+
other rights owned by Meta embodied in the Llama Materials to use, reproduce,
|
38 |
+
distribute, copy, create derivative works of, and make modifications to the Llama
|
39 |
+
Materials.
|
40 |
+
|
41 |
+
b. Redistribution and Use.
|
42 |
+
|
43 |
+
i. If you distribute or make the Llama Materials, or any derivative works
|
44 |
+
thereof, available to a third party, you shall provide a copy of this Agreement to such
|
45 |
+
third party.
|
46 |
+
ii. If you receive Llama Materials, or any derivative works thereof, from
|
47 |
+
a Licensee as part of an integrated end user product, then Section 2 of this
|
48 |
+
Agreement will not apply to you.
|
49 |
+
|
50 |
+
iii. You must retain in all copies of the Llama Materials that you
|
51 |
+
distribute the following attribution notice within a "Notice" text file distributed as a
|
52 |
+
part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License,
|
53 |
+
Copyright (c) Meta Platforms, Inc. All Rights Reserved."
|
54 |
+
|
55 |
+
iv. Your use of the Llama Materials must comply with applicable laws
|
56 |
+
and regulations (including trade compliance laws and regulations) and adhere to the
|
57 |
+
Acceptable Use Policy for the Llama Materials (available at
|
58 |
+
https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into
|
59 |
+
this Agreement.
|
60 |
+
|
61 |
+
v. You will not use the Llama Materials or any output or results of the
|
62 |
+
Llama Materials to improve any other large language model (excluding Llama 2 or
|
63 |
+
derivative works thereof).
|
64 |
+
|
65 |
+
2. Additional Commercial Terms. If, on the Llama 2 version release date, the
|
66 |
+
monthly active users of the products or services made available by or for Licensee,
|
67 |
+
or Licensee's affiliates, is greater than 700 million monthly active users in the
|
68 |
+
preceding calendar month, you must request a license from Meta, which Meta may
|
69 |
+
grant to you in its sole discretion, and you are not authorized to exercise any of the
|
70 |
+
rights under this Agreement unless or until Meta otherwise expressly grants you
|
71 |
+
such rights.
|
72 |
+
|
73 |
+
3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE
|
74 |
+
LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE
|
75 |
+
PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
76 |
+
EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY
|
77 |
+
WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR
|
78 |
+
FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE
|
79 |
+
FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING
|
80 |
+
THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR
|
81 |
+
USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
|
82 |
+
|
83 |
+
4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE
|
84 |
+
LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT,
|
85 |
+
NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS
|
86 |
+
AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL,
|
87 |
+
CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN
|
88 |
+
IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF
|
89 |
+
ANY OF THE FOREGOING.
|
90 |
+
|
91 |
+
5. Intellectual Property.
|
92 |
+
|
93 |
+
a. No trademark licenses are granted under this Agreement, and in
|
94 |
+
connection with the Llama Materials, neither Meta nor Licensee may use any name
|
95 |
+
or mark owned by or associated with the other or any of its affiliates, except as
|
96 |
+
required for reasonable and customary use in describing and redistributing the
|
97 |
+
Llama Materials.
|
98 |
+
|
99 |
+
b. Subject to Meta's ownership of Llama Materials and derivatives made by or
|
100 |
+
for Meta, with respect to any derivative works and modifications of the Llama
|
101 |
+
Materials that are made by you, as between you and Meta, you are and will be the
|
102 |
+
owner of such derivative works and modifications.
|
103 |
+
|
104 |
+
c. If you institute litigation or other proceedings against Meta or any entity
|
105 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that the Llama
|
106 |
+
Materials or Llama 2 outputs or results, or any portion of any of the foregoing,
|
107 |
+
constitutes infringement of intellectual property or other rights owned or licensable
|
108 |
+
by you, then any licenses granted to you under this Agreement shall terminate as of
|
109 |
+
the date such litigation or claim is filed or instituted. You will indemnify and hold
|
110 |
+
harmless Meta from and against any claim by any third party arising out of or related
|
111 |
+
to your use or distribution of the Llama Materials.
|
112 |
+
|
113 |
+
6. Term and Termination. The term of this Agreement will commence upon your
|
114 |
+
acceptance of this Agreement or access to the Llama Materials and will continue in
|
115 |
+
full force and effect until terminated in accordance with the terms and conditions
|
116 |
+
herein. Meta may terminate this Agreement if you are in breach of any term or
|
117 |
+
condition of this Agreement. Upon termination of this Agreement, you shall delete
|
118 |
+
and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the
|
119 |
+
termination of this Agreement.
|
120 |
+
|
121 |
+
7. Governing Law and Jurisdiction. This Agreement will be governed and
|
122 |
+
construed under the laws of the State of California without regard to choice of law
|
123 |
+
principles, and the UN Convention on Contracts for the International Sale of Goods
|
124 |
+
does not apply to this Agreement. The courts of California shall have exclusive
|
125 |
+
jurisdiction of any dispute arising out of this Agreement.
|
126 |
+
|
MANIFEST.in
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
recursive-include evals *.py
|
2 |
+
recursive-include evals *.yaml
|
3 |
+
recursive-include evals *.sql
|
4 |
+
recursive-include evals/registry/data *.jsonl
|
MODEL_LICENSE
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The CodeGeeX License
|
2 |
+
|
3 |
+
1. Definitions
|
4 |
+
|
5 |
+
“Licensor” means the CodeGeeX Model Team that distributes its Software.
|
6 |
+
|
7 |
+
“Software” means the CodeGeeX model parameters made available under this license.
|
8 |
+
|
9 |
+
2. License Grant
|
10 |
+
|
11 |
+
Subject to the terms and conditions of this License, the Licensor hereby grants to you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty-free copyright license to use the Software solely for your non-commercial research purposes.
|
12 |
+
|
13 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
3. Restriction
|
16 |
+
|
17 |
+
You will not use, copy, modify, merge, publish, distribute, reproduce, or create derivative works of the Software, in whole or in part, for any commercial, military, or illegal purposes.
|
18 |
+
|
19 |
+
You will not use the Software for any act that may undermine China's national security and national unity, harm the public interest of society, or infringe upon the rights and interests of human beings.
|
20 |
+
|
21 |
+
4. Disclaimer
|
22 |
+
|
23 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
24 |
+
|
25 |
+
5. Limitation of Liability
|
26 |
+
|
27 |
+
EXCEPT TO THE EXTENT PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER BASED IN TORT, NEGLIGENCE, CONTRACT, LIABILITY, OR OTHERWISE WILL ANY LICENSOR BE LIABLE TO YOU FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES, OR ANY OTHER COMMERCIAL LOSSES, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
28 |
+
|
29 |
+
6. Dispute Resolution
|
30 |
+
|
31 |
+
This license shall be governed and construed in accordance with the laws of People’s Republic of China. Any dispute arising from or in connection with this License shall be submitted to Haidian District People's Court in Beijing.
|
32 |
+
|
33 |
+
Note that the license is subject to update to a more comprehensive version. For any questions related to the license and copyright, please contact us at report@aminer.cn.
|
Makefile
CHANGED
@@ -1,42 +1,3 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
export PORT_API := 8180
|
5 |
-
export PORT_ROWS := 8182
|
6 |
-
export PORT_REVERSE_PROXY := 8100
|
7 |
-
|
8 |
-
# environment variables per target
|
9 |
-
start: export COMPOSE_PROJECT_NAME := datasets-server
|
10 |
-
stop: export COMPOSE_PROJECT_NAME := datasets-server
|
11 |
-
dev-start: export COMPOSE_PROJECT_NAME := dev-datasets-server
|
12 |
-
dev-stop: export COMPOSE_PROJECT_NAME := dev-datasets-server
|
13 |
-
|
14 |
-
# makefile variables per target
|
15 |
-
start: DOCKER_COMPOSE := ./tools/docker-compose-datasets-server.yml
|
16 |
-
stop: DOCKER_COMPOSE := ./tools/docker-compose-datasets-server.yml
|
17 |
-
dev-start: DOCKER_COMPOSE := ./tools/docker-compose-dev-datasets-server.yml
|
18 |
-
dev-stop: DOCKER_COMPOSE := ./tools/docker-compose-dev-datasets-server.yml
|
19 |
-
|
20 |
-
include tools/Docker.mk
|
21 |
-
|
22 |
-
.PHONY: start
|
23 |
-
start:
|
24 |
-
MONGO_PORT=${MONGO_PORT} ADMIN_UVICORN_PORT=${PORT_ADMIN} API_UVICORN_PORT=${PORT_API} ROWS_UVICORN_PORT=${PORT_ROWS} PORT_REVERSE_PROXY=${PORT_REVERSE_PROXY} DOCKER_COMPOSE=${DOCKER_COMPOSE} $(MAKE) up
|
25 |
-
|
26 |
-
.PHONY: stop
|
27 |
-
stop:
|
28 |
-
MONGO_PORT=${MONGO_PORT} ADMIN_UVICORN_PORT=${PORT_ADMIN} API_UVICORN_PORT=${PORT_API} ROWS_UVICORN_PORT=${PORT_ROWS} PORT_REVERSE_PROXY=${PORT_REVERSE_PROXY} DOCKER_COMPOSE=${DOCKER_COMPOSE} $(MAKE) down
|
29 |
-
|
30 |
-
.PHONY: dev-start
|
31 |
-
dev-start:
|
32 |
-
MONGO_PORT=${MONGO_PORT} ADMIN_UVICORN_PORT=${PORT_ADMIN} API_UVICORN_PORT=${PORT_API} ROWS_UVICORN_PORT=${PORT_ROWS} PORT_REVERSE_PROXY=${PORT_REVERSE_PROXY} DOCKER_COMPOSE=${DOCKER_COMPOSE} $(MAKE) up
|
33 |
-
|
34 |
-
.PHONY: dev-stop
|
35 |
-
dev-stop:
|
36 |
-
MONGO_PORT=${MONGO_PORT} ADMIN_UVICORN_PORT=${PORT_ADMIN} API_UVICORN_PORT=${PORT_API} ROWS_UVICORN_PORT=${PORT_ROWS} PORT_REVERSE_PROXY=${PORT_REVERSE_PROXY} DOCKER_COMPOSE=${DOCKER_COMPOSE} $(MAKE) down
|
37 |
-
|
38 |
-
.PHONY: e2e
|
39 |
-
e2e:
|
40 |
-
$(MAKE) -C e2e/ e2e
|
41 |
-
|
42 |
-
# for install, quality checks and tests of every job, lib, service or worker, see the Makefile in the corresponding folder
|
|
|
1 |
+
.PHONY: mypy
|
2 |
+
mypy:
|
3 |
+
mypy --config-file=mypy.ini --no-site-packages .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
NBSETUP.md
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Set up your notebook environment for Azure Machine Learning
|
2 |
+
|
3 |
+
To run the notebooks in this repository use one of following options.
|
4 |
+
|
5 |
+
## **Option 1: Use Azure Notebooks**
|
6 |
+
Azure Notebooks is a hosted Jupyter-based notebook service in the Azure cloud. Azure Machine Learning Python SDK is already pre-installed in the Azure Notebooks `Python 3.6` kernel.
|
7 |
+
|
8 |
+
1. [![Azure Notebooks](https://notebooks.azure.com/launch.png)](https://aka.ms/aml-clone-azure-notebooks)
|
9 |
+
[Import sample notebooks ](https://aka.ms/aml-clone-azure-notebooks) into Azure Notebooks
|
10 |
+
1. Follow the instructions in the [Configuration](configuration.ipynb) notebook to create and connect to a workspace
|
11 |
+
1. Open one of the sample notebooks
|
12 |
+
|
13 |
+
**Make sure the Azure Notebook kernel is set to `Python 3.6`** when you open a notebook by choosing Kernel > Change Kernel > Python 3.6 from the menus.
|
14 |
+
|
15 |
+
## **Option 2: Use your own notebook server**
|
16 |
+
|
17 |
+
### Quick installation
|
18 |
+
We recommend you create a Python virtual environment ([Miniconda](https://conda.io/miniconda.html) preferred but [virtualenv](https://virtualenv.pypa.io/en/latest/) works too) and install the SDK in it.
|
19 |
+
```sh
|
20 |
+
# install just the base SDK
|
21 |
+
pip install azureml-sdk
|
22 |
+
|
23 |
+
# clone the sample repoistory
|
24 |
+
git clone https://github.com/Azure/MachineLearningNotebooks.git
|
25 |
+
|
26 |
+
# below steps are optional
|
27 |
+
# install the base SDK, Jupyter notebook server and tensorboard
|
28 |
+
pip install azureml-sdk[notebooks,tensorboard]
|
29 |
+
|
30 |
+
# install model explainability component
|
31 |
+
pip install azureml-sdk[interpret]
|
32 |
+
|
33 |
+
# install automated ml components
|
34 |
+
pip install azureml-sdk[automl]
|
35 |
+
|
36 |
+
# install experimental features (not ready for production use)
|
37 |
+
pip install azureml-sdk[contrib]
|
38 |
+
```
|
39 |
+
|
40 |
+
Note the _extras_ (the keywords inside the square brackets) can be combined. For example:
|
41 |
+
```sh
|
42 |
+
# install base SDK, Jupyter notebook and automated ml components
|
43 |
+
pip install azureml-sdk[notebooks,automl]
|
44 |
+
```
|
45 |
+
|
46 |
+
### Full instructions
|
47 |
+
[Install the Azure Machine Learning SDK](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
|
48 |
+
|
49 |
+
Please make sure you start with the [Configuration](configuration.ipynb) notebook to create and connect to a workspace.
|
50 |
+
|
51 |
+
|
52 |
+
### Video walkthrough:
|
53 |
+
|
54 |
+
[!VIDEO https://youtu.be/VIsXeTuW3FU]
|
55 |
+
|
56 |
+
## **Option 3: Use Docker**
|
57 |
+
|
58 |
+
You need to have Docker engine installed locally and running. Open a command line window and type the following command.
|
59 |
+
|
60 |
+
__Note:__ We use version `1.0.10` below as an exmaple, but you can replace that with any available version number you like.
|
61 |
+
|
62 |
+
```sh
|
63 |
+
# clone the sample repoistory
|
64 |
+
git clone https://github.com/Azure/MachineLearningNotebooks.git
|
65 |
+
|
66 |
+
# change current directory to the folder
|
67 |
+
# where Dockerfile of the specific SDK version is located.
|
68 |
+
cd MachineLearningNotebooks/Dockerfiles/1.0.10
|
69 |
+
|
70 |
+
# build a Docker image with the a name (azuremlsdk for example)
|
71 |
+
# and a version number tag (1.0.10 for example).
|
72 |
+
# this can take several minutes depending on your computer speed and network bandwidth.
|
73 |
+
docker build . -t azuremlsdk:1.0.10
|
74 |
+
|
75 |
+
# launch the built Docker container which also automatically starts
|
76 |
+
# a Jupyter server instance listening on port 8887 of the host machine
|
77 |
+
docker run -it -p 8887:8887 azuremlsdk:1.0.10
|
78 |
+
```
|
79 |
+
|
80 |
+
Now you can point your browser to http://localhost:8887. We recommend that you start from the `configuration.ipynb` notebook at the root directory.
|
81 |
+
|
82 |
+
If you need additional Azure ML SDK components, you can either modify the Docker files before you build the Docker images to add additional steps, or install them through command line in the live container after you build the Docker image. For example:
|
83 |
+
|
84 |
+
```sh
|
85 |
+
# install the core SDK and automated ml components
|
86 |
+
pip install azureml-sdk[automl]
|
87 |
+
|
88 |
+
# install the core SDK and model explainability component
|
89 |
+
pip install azureml-sdk[interpret]
|
90 |
+
|
91 |
+
# install the core SDK and experimental components
|
92 |
+
pip install azureml-sdk[contrib]
|
93 |
+
```
|
94 |
+
Drag and Drop
|
95 |
+
The image will be downloaded by Fatkun
|
NOTICE
ADDED
@@ -0,0 +1,1648 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
NOTICES AND INFORMATION
|
2 |
+
Do Not Translate or Localize
|
3 |
+
|
4 |
+
This software incorporates material from third parties.
|
5 |
+
Microsoft makes certain open source code available at https://3rdpartysource.microsoft.com,
|
6 |
+
or you may send a check or money order for US $5.00, including the product name,
|
7 |
+
the open source component name, platform, and version number, to:
|
8 |
+
|
9 |
+
Source Code Compliance Team
|
10 |
+
Microsoft Corporation
|
11 |
+
One Microsoft Way
|
12 |
+
Redmond, WA 98052
|
13 |
+
USA
|
14 |
+
|
15 |
+
Notwithstanding any other terms, you may reverse engineer this software to the extent
|
16 |
+
required to debug changes to any libraries licensed under the GNU Lesser General Public License.
|
17 |
+
|
18 |
+
---------------------------------------------------------
|
19 |
+
|
20 |
+
vonclites/squeezenet f13555462e835409480829f8fdacac96835161e5 - MIT
|
21 |
+
|
22 |
+
|
23 |
+
Copyright (c) 2018 Domenick Poster
|
24 |
+
|
25 |
+
MIT License
|
26 |
+
|
27 |
+
Copyright (c) 2018 Domenick Poster
|
28 |
+
|
29 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
30 |
+
of this software and associated documentation files (the "Software"), to deal
|
31 |
+
in the Software without restriction, including without limitation the rights
|
32 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
33 |
+
copies of the Software, and to permit persons to whom the Software is
|
34 |
+
furnished to do so, subject to the following conditions:
|
35 |
+
|
36 |
+
The above copyright notice and this permission notice shall be included in all
|
37 |
+
copies or substantial portions of the Software.
|
38 |
+
|
39 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
40 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
41 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
42 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
43 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
44 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
45 |
+
SOFTWARE.
|
46 |
+
|
47 |
+
|
48 |
+
---------------------------------------------------------
|
49 |
+
|
50 |
+
---------------------------------------------------------
|
51 |
+
|
52 |
+
zzh8829/yolov3-tf2 65294d5dc1794b325db5a37b2ed02773ca5bf839 - MIT
|
53 |
+
|
54 |
+
|
55 |
+
Copyright (c) 2019 Zihao Zhang
|
56 |
+
|
57 |
+
MIT License
|
58 |
+
|
59 |
+
Copyright (c) 2019 Zihao Zhang
|
60 |
+
|
61 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
62 |
+
of this software and associated documentation files (the "Software"), to deal
|
63 |
+
in the Software without restriction, including without limitation the rights
|
64 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
65 |
+
copies of the Software, and to permit persons to whom the Software is
|
66 |
+
furnished to do so, subject to the following conditions:
|
67 |
+
|
68 |
+
The above copyright notice and this permission notice shall be included in all
|
69 |
+
copies or substantial portions of the Software.
|
70 |
+
|
71 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
72 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
73 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
74 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
75 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
76 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
77 |
+
SOFTWARE.
|
78 |
+
|
79 |
+
|
80 |
+
---------------------------------------------------------
|
81 |
+
|
82 |
+
---------------------------------------------------------
|
83 |
+
|
84 |
+
tensorflow/models 5f296bbef998e75721818d6b336264ae10f4a77d
|
85 |
+
|
86 |
+
Copyright 2016 The TensorFlow Authors. All rights reserved.
|
87 |
+
|
88 |
+
Apache License
|
89 |
+
Version 2.0, January 2004
|
90 |
+
http://www.apache.org/licenses/
|
91 |
+
|
92 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
93 |
+
|
94 |
+
1. Definitions.
|
95 |
+
|
96 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
97 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
98 |
+
|
99 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
100 |
+
the copyright owner that is granting the License.
|
101 |
+
|
102 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
103 |
+
other entities that control, are controlled by, or are under common
|
104 |
+
control with that entity. For the purposes of this definition,
|
105 |
+
"control" means (i) the power, direct or indirect, to cause the
|
106 |
+
direction or management of such entity, whether by contract or
|
107 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
108 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
109 |
+
|
110 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
111 |
+
exercising permissions granted by this License.
|
112 |
+
|
113 |
+
"Source" form shall mean the preferred form for making modifications,
|
114 |
+
including but not limited to software source code, documentation
|
115 |
+
source, and configuration files.
|
116 |
+
|
117 |
+
"Object" form shall mean any form resulting from mechanical
|
118 |
+
transformation or translation of a Source form, including but
|
119 |
+
not limited to compiled object code, generated documentation,
|
120 |
+
and conversions to other media types.
|
121 |
+
|
122 |
+
"Work" shall mean the work of authorship, whether in Source or
|
123 |
+
Object form, made available under the License, as indicated by a
|
124 |
+
copyright notice that is included in or attached to the work
|
125 |
+
(an example is provided in the Appendix below).
|
126 |
+
|
127 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
128 |
+
form, that is based on (or derived from) the Work and for which the
|
129 |
+
editorial revisions, annotations, elaborations, or other modifications
|
130 |
+
represent, as a whole, an original work of authorship. For the purposes
|
131 |
+
of this License, Derivative Works shall not include works that remain
|
132 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
133 |
+
the Work and Derivative Works thereof.
|
134 |
+
|
135 |
+
"Contribution" shall mean any work of authorship, including
|
136 |
+
the original version of the Work and any modifications or additions
|
137 |
+
to that Work or Derivative Works thereof, that is intentionally
|
138 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
139 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
140 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
141 |
+
means any form of electronic, verbal, or written communication sent
|
142 |
+
to the Licensor or its representatives, including but not limited to
|
143 |
+
communication on electronic mailing lists, source code control systems,
|
144 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
145 |
+
Licensor for the purpose of discussing and improving the Work, but
|
146 |
+
excluding communication that is conspicuously marked or otherwise
|
147 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
148 |
+
|
149 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
150 |
+
on behalf of whom a Contribution has been received by Licensor and
|
151 |
+
subsequently incorporated within the Work.
|
152 |
+
|
153 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
154 |
+
this License, each Contributor hereby grants to You a perpetual,
|
155 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
156 |
+
copyright license to reproduce, prepare Derivative Works of,
|
157 |
+
publicly display, publicly perform, sublicense, and distribute the
|
158 |
+
Work and such Derivative Works in Source or Object form.
|
159 |
+
|
160 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
161 |
+
this License, each Contributor hereby grants to You a perpetual,
|
162 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
163 |
+
(except as stated in this section) patent license to make, have made,
|
164 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
165 |
+
where such license applies only to those patent claims licensable
|
166 |
+
by such Contributor that are necessarily infringed by their
|
167 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
168 |
+
with the Work to which such Contribution(s) was submitted. If You
|
169 |
+
institute patent litigation against any entity (including a
|
170 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
171 |
+
or a Contribution incorporated within the Work constitutes direct
|
172 |
+
or contributory patent infringement, then any patent licenses
|
173 |
+
granted to You under this License for that Work shall terminate
|
174 |
+
as of the date such litigation is filed.
|
175 |
+
|
176 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
177 |
+
Work or Derivative Works thereof in any medium, with or without
|
178 |
+
modifications, and in Source or Object form, provided that You
|
179 |
+
meet the following conditions:
|
180 |
+
|
181 |
+
(a) You must give any other recipients of the Work or
|
182 |
+
Derivative Works a copy of this License; and
|
183 |
+
|
184 |
+
(b) You must cause any modified files to carry prominent notices
|
185 |
+
stating that You changed the files; and
|
186 |
+
|
187 |
+
(c) You must retain, in the Source form of any Derivative Works
|
188 |
+
that You distribute, all copyright, patent, trademark, and
|
189 |
+
attribution notices from the Source form of the Work,
|
190 |
+
excluding those notices that do not pertain to any part of
|
191 |
+
the Derivative Works; and
|
192 |
+
|
193 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
194 |
+
distribution, then any Derivative Works that You distribute must
|
195 |
+
include a readable copy of the attribution notices contained
|
196 |
+
within such NOTICE file, excluding those notices that do not
|
197 |
+
pertain to any part of the Derivative Works, in at least one
|
198 |
+
of the following places: within a NOTICE text file distributed
|
199 |
+
as part of the Derivative Works; within the Source form or
|
200 |
+
documentation, if provided along with the Derivative Works; or,
|
201 |
+
within a display generated by the Derivative Works, if and
|
202 |
+
wherever such third-party notices normally appear. The contents
|
203 |
+
of the NOTICE file are for informational purposes only and
|
204 |
+
do not modify the License. You may add Your own attribution
|
205 |
+
notices within Derivative Works that You distribute, alongside
|
206 |
+
or as an addendum to the NOTICE text from the Work, provided
|
207 |
+
that such additional attribution notices cannot be construed
|
208 |
+
as modifying the License.
|
209 |
+
|
210 |
+
You may add Your own copyright statement to Your modifications and
|
211 |
+
may provide additional or different license terms and conditions
|
212 |
+
for use, reproduction, or distribution of Your modifications, or
|
213 |
+
for any such Derivative Works as a whole, provided Your use,
|
214 |
+
reproduction, and distribution of the Work otherwise complies with
|
215 |
+
the conditions stated in this License.
|
216 |
+
|
217 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
218 |
+
any Contribution intentionally submitted for inclusion in the Work
|
219 |
+
by You to the Licensor shall be under the terms and conditions of
|
220 |
+
this License, without any additional terms or conditions.
|
221 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
222 |
+
the terms of any separate license agreement you may have executed
|
223 |
+
with Licensor regarding such Contributions.
|
224 |
+
|
225 |
+
6. Trademarks. This License does not grant permission to use the trade
|
226 |
+
names, trademarks, service marks, or product names of the Licensor,
|
227 |
+
except as required for reasonable and customary use in describing the
|
228 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
229 |
+
|
230 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
231 |
+
agreed to in writing, Licensor provides the Work (and each
|
232 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
233 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
234 |
+
implied, including, without limitation, any warranties or conditions
|
235 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
236 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
237 |
+
appropriateness of using or redistributing the Work and assume any
|
238 |
+
risks associated with Your exercise of permissions under this License.
|
239 |
+
|
240 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
241 |
+
whether in tort (including negligence), contract, or otherwise,
|
242 |
+
unless required by applicable law (such as deliberate and grossly
|
243 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
244 |
+
liable to You for damages, including any direct, indirect, special,
|
245 |
+
incidental, or consequential damages of any character arising as a
|
246 |
+
result of this License or out of the use or inability to use the
|
247 |
+
Work (including but not limited to damages for loss of goodwill,
|
248 |
+
work stoppage, computer failure or malfunction, or any and all
|
249 |
+
other commercial damages or losses), even if such Contributor
|
250 |
+
has been advised of the possibility of such damages.
|
251 |
+
|
252 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
253 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
254 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
255 |
+
or other liability obligations and/or rights consistent with this
|
256 |
+
License. However, in accepting such obligations, You may act only
|
257 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
258 |
+
of any other Contributor, and only if You agree to indemnify,
|
259 |
+
defend, and hold each Contributor harmless for any liability
|
260 |
+
incurred by, or claims asserted against, such Contributor by reason
|
261 |
+
of your accepting any such warranty or additional liability.
|
262 |
+
|
263 |
+
END OF TERMS AND CONDITIONS
|
264 |
+
|
265 |
+
APPENDIX: How to apply the Apache License to your work.
|
266 |
+
|
267 |
+
To apply the Apache License to your work, attach the following
|
268 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
269 |
+
replaced with your own identifying information. (Don't include
|
270 |
+
the brackets!) The text should be enclosed in the appropriate
|
271 |
+
comment syntax for the file format. We also recommend that a
|
272 |
+
file or class name and description of purpose be included on the
|
273 |
+
same "printed page" as the copyright notice for easier
|
274 |
+
identification within third-party archives.
|
275 |
+
|
276 |
+
Copyright 2016, The Authors.
|
277 |
+
|
278 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
279 |
+
you may not use this file except in compliance with the License.
|
280 |
+
You may obtain a copy of the License at
|
281 |
+
|
282 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
283 |
+
|
284 |
+
Unless required by applicable law or agreed to in writing, software
|
285 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
286 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
287 |
+
See the License for the specific language governing permissions and
|
288 |
+
limitations under the License.
|
289 |
+
|
290 |
+
---------------------------------------------------------
|
291 |
+
|
292 |
+
---------------------------------------------------------
|
293 |
+
|
294 |
+
Derivative Works of FNS Style Transfer model
|
295 |
+
(github.com/microsoft/windows-machine-learning)
|
296 |
+
|
297 |
+
MIT License
|
298 |
+
|
299 |
+
Copyright (c) Microsoft Corporation. All rights reserved.
|
300 |
+
|
301 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
302 |
+
of this software and associated documentation files (the "Software"), to deal
|
303 |
+
in the Software without restriction, including without limitation the rights
|
304 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
305 |
+
copies of the Software, and to permit persons to whom the Software is
|
306 |
+
furnished to do so, subject to the following conditions:
|
307 |
+
|
308 |
+
The above copyright notice and this permission notice shall be included in all
|
309 |
+
copies or substantial portions of the Software.
|
310 |
+
|
311 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
312 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
313 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
314 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
315 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
316 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
317 |
+
SOFTWARE
|
318 |
+
|
319 |
+
---------------------------------------------------------
|
320 |
+
|
321 |
+
Derivative Works of ONNX MNIST and Super Resolution model
|
322 |
+
(github.com/onnx/models)
|
323 |
+
|
324 |
+
MIT License
|
325 |
+
|
326 |
+
Copyright (c) ONNX Project Contributors
|
327 |
+
|
328 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
329 |
+
of this software and associated documentation files (the "Software"), to deal
|
330 |
+
in the Software without restriction, including without limitation the rights
|
331 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
332 |
+
copies of the Software, and to permit persons to whom the Software is
|
333 |
+
furnished to do so, subject to the following conditions:
|
334 |
+
|
335 |
+
The above copyright notice and this permission notice shall be included in all
|
336 |
+
copies or substantial portions of the Software.
|
337 |
+
|
338 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
339 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
340 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
341 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
342 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
343 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
344 |
+
SOFTWARE.
|
345 |
+
|
346 |
+
---------------------------------------------------------
|
347 |
+
|
348 |
+
---------------------------------------------------------
|
349 |
+
|
350 |
+
Derivative Works of ONNX MobileNet and SqueezeNet model
|
351 |
+
(github.com/onnx/models)
|
352 |
+
|
353 |
+
Copyright Ankit Khedia, Abhinav Sharma. All rights reserved.
|
354 |
+
|
355 |
+
Apache License
|
356 |
+
Version 2.0, January 2004
|
357 |
+
http://www.apache.org/licenses/
|
358 |
+
|
359 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
360 |
+
|
361 |
+
1. Definitions.
|
362 |
+
|
363 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
364 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
365 |
+
|
366 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
367 |
+
the copyright owner that is granting the License.
|
368 |
+
|
369 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
370 |
+
other entities that control, are controlled by, or are under common
|
371 |
+
control with that entity. For the purposes of this definition,
|
372 |
+
"control" means (i) the power, direct or indirect, to cause the
|
373 |
+
direction or management of such entity, whether by contract or
|
374 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
375 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
376 |
+
|
377 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
378 |
+
exercising permissions granted by this License.
|
379 |
+
|
380 |
+
"Source" form shall mean the preferred form for making modifications,
|
381 |
+
including but not limited to software source code, documentation
|
382 |
+
source, and configuration files.
|
383 |
+
|
384 |
+
"Object" form shall mean any form resulting from mechanical
|
385 |
+
transformation or translation of a Source form, including but
|
386 |
+
not limited to compiled object code, generated documentation,
|
387 |
+
and conversions to other media types.
|
388 |
+
|
389 |
+
"Work" shall mean the work of authorship, whether in Source or
|
390 |
+
Object form, made available under the License, as indicated by a
|
391 |
+
copyright notice that is included in or attached to the work
|
392 |
+
(an example is provided in the Appendix below).
|
393 |
+
|
394 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
395 |
+
form, that is based on (or derived from) the Work and for which the
|
396 |
+
editorial revisions, annotations, elaborations, or other modifications
|
397 |
+
represent, as a whole, an original work of authorship. For the purposes
|
398 |
+
of this License, Derivative Works shall not include works that remain
|
399 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
400 |
+
the Work and Derivative Works thereof.
|
401 |
+
|
402 |
+
"Contribution" shall mean any work of authorship, including
|
403 |
+
the original version of the Work and any modifications or additions
|
404 |
+
to that Work or Derivative Works thereof, that is intentionally
|
405 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
406 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
407 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
408 |
+
means any form of electronic, verbal, or written communication sent
|
409 |
+
to the Licensor or its representatives, including but not limited to
|
410 |
+
communication on electronic mailing lists, source code control systems,
|
411 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
412 |
+
Licensor for the purpose of discussing and improving the Work, but
|
413 |
+
excluding communication that is conspicuously marked or otherwise
|
414 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
415 |
+
|
416 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
417 |
+
on behalf of whom a Contribution has been received by Licensor and
|
418 |
+
subsequently incorporated within the Work.
|
419 |
+
|
420 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
421 |
+
this License, each Contributor hereby grants to You a perpetual,
|
422 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
423 |
+
copyright license to reproduce, prepare Derivative Works of,
|
424 |
+
publicly display, publicly perform, sublicense, and distribute the
|
425 |
+
Work and such Derivative Works in Source or Object form.
|
426 |
+
|
427 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
428 |
+
this License, each Contributor hereby grants to You a perpetual,
|
429 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
430 |
+
(except as stated in this section) patent license to make, have made,
|
431 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
432 |
+
where such license applies only to those patent claims licensable
|
433 |
+
by such Contributor that are necessarily infringed by their
|
434 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
435 |
+
with the Work to which such Contribution(s) was submitted. If You
|
436 |
+
institute patent litigation against any entity (including a
|
437 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
438 |
+
or a Contribution incorporated within the Work constitutes direct
|
439 |
+
or contributory patent infringement, then any patent licenses
|
440 |
+
granted to You under this License for that Work shall terminate
|
441 |
+
as of the date such litigation is filed.
|
442 |
+
|
443 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
444 |
+
Work or Derivative Works thereof in any medium, with or without
|
445 |
+
modifications, and in Source or Object form, provided that You
|
446 |
+
meet the following conditions:
|
447 |
+
|
448 |
+
(a) You must give any other recipients of the Work or
|
449 |
+
Derivative Works a copy of this License; and
|
450 |
+
|
451 |
+
(b) You must cause any modified files to carry prominent notices
|
452 |
+
stating that You changed the files; and
|
453 |
+
|
454 |
+
(c) You must retain, in the Source form of any Derivative Works
|
455 |
+
that You distribute, all copyright, patent, trademark, and
|
456 |
+
attribution notices from the Source form of the Work,
|
457 |
+
excluding those notices that do not pertain to any part of
|
458 |
+
the Derivative Works; and
|
459 |
+
|
460 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
461 |
+
distribution, then any Derivative Works that You distribute must
|
462 |
+
include a readable copy of the attribution notices contained
|
463 |
+
within such NOTICE file, excluding those notices that do not
|
464 |
+
pertain to any part of the Derivative Works, in at least one
|
465 |
+
of the following places: within a NOTICE text file distributed
|
466 |
+
as part of the Derivative Works; within the Source form or
|
467 |
+
documentation, if provided along with the Derivative Works; or,
|
468 |
+
within a display generated by the Derivative Works, if and
|
469 |
+
wherever such third-party notices normally appear. The contents
|
470 |
+
of the NOTICE file are for informational purposes only and
|
471 |
+
do not modify the License. You may add Your own attribution
|
472 |
+
notices within Derivative Works that You distribute, alongside
|
473 |
+
or as an addendum to the NOTICE text from the Work, provided
|
474 |
+
that such additional attribution notices cannot be construed
|
475 |
+
as modifying the License.
|
476 |
+
|
477 |
+
You may add Your own copyright statement to Your modifications and
|
478 |
+
may provide additional or different license terms and conditions
|
479 |
+
for use, reproduction, or distribution of Your modifications, or
|
480 |
+
for any such Derivative Works as a whole, provided Your use,
|
481 |
+
reproduction, and distribution of the Work otherwise complies with
|
482 |
+
the conditions stated in this License.
|
483 |
+
|
484 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
485 |
+
any Contribution intentionally submitted for inclusion in the Work
|
486 |
+
by You to the Licensor shall be under the terms and conditions of
|
487 |
+
this License, without any additional terms or conditions.
|
488 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
489 |
+
the terms of any separate license agreement you may have executed
|
490 |
+
with Licensor regarding such Contributions.
|
491 |
+
|
492 |
+
6. Trademarks. This License does not grant permission to use the trade
|
493 |
+
names, trademarks, service marks, or product names of the Licensor,
|
494 |
+
except as required for reasonable and customary use in describing the
|
495 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
496 |
+
|
497 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
498 |
+
agreed to in writing, Licensor provides the Work (and each
|
499 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
500 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
501 |
+
implied, including, without limitation, any warranties or conditions
|
502 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
503 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
504 |
+
appropriateness of using or redistributing the Work and assume any
|
505 |
+
risks associated with Your exercise of permissions under this License.
|
506 |
+
|
507 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
508 |
+
whether in tort (including negligence), contract, or otherwise,
|
509 |
+
unless required by applicable law (such as deliberate and grossly
|
510 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
511 |
+
liable to You for damages, including any direct, indirect, special,
|
512 |
+
incidental, or consequential damages of any character arising as a
|
513 |
+
result of this License or out of the use or inability to use the
|
514 |
+
Work (including but not limited to damages for loss of goodwill,
|
515 |
+
work stoppage, computer failure or malfunction, or any and all
|
516 |
+
other commercial damages or losses), even if such Contributor
|
517 |
+
has been advised of the possibility of such damages.
|
518 |
+
|
519 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
520 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
521 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
522 |
+
or other liability obligations and/or rights consistent with this
|
523 |
+
License. However, in accepting such obligations, You may act only
|
524 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
525 |
+
of any other Contributor, and only if You agree to indemnify,
|
526 |
+
defend, and hold each Contributor harmless for any liability
|
527 |
+
incurred by, or claims asserted against, such Contributor by reason
|
528 |
+
of your accepting any such warranty or additional liability.
|
529 |
+
|
530 |
+
END OF TERMS AND CONDITIONS
|
531 |
+
|
532 |
+
APPENDIX: How to apply the Apache License to your work.
|
533 |
+
|
534 |
+
To apply the Apache License to your work, attach the following
|
535 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
536 |
+
replaced with your own identifying information. (Don't include
|
537 |
+
the brackets!) The text should be enclosed in the appropriate
|
538 |
+
comment syntax for the file format. We also recommend that a
|
539 |
+
file or class name and description of purpose be included on the
|
540 |
+
same "printed page" as the copyright notice for easier
|
541 |
+
identification within third-party archives.
|
542 |
+
|
543 |
+
Copyright 2016, The Authors.
|
544 |
+
|
545 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
546 |
+
you may not use this file except in compliance with the License.
|
547 |
+
You may obtain a copy of the License at
|
548 |
+
|
549 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
550 |
+
|
551 |
+
Unless required by applicable law or agreed to in writing, software
|
552 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
553 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
554 |
+
See the License for the specific language governing permissions and
|
555 |
+
limitations under the License.
|
556 |
+
|
557 |
+
---------------------------------------------------------
|
558 |
+
|
559 |
+
keras-team/keras-io 251351d750aef44599dc22f44a6c4edf4a612317
|
560 |
+
|
561 |
+
Apache License
|
562 |
+
Version 2.0, January 2004
|
563 |
+
http://www.apache.org/licenses/
|
564 |
+
|
565 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
566 |
+
|
567 |
+
1. Definitions.
|
568 |
+
|
569 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
570 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
571 |
+
|
572 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
573 |
+
the copyright owner that is granting the License.
|
574 |
+
|
575 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
576 |
+
other entities that control, are controlled by, or are under common
|
577 |
+
control with that entity. For the purposes of this definition,
|
578 |
+
"control" means (i) the power, direct or indirect, to cause the
|
579 |
+
direction or management of such entity, whether by contract or
|
580 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
581 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
582 |
+
|
583 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
584 |
+
exercising permissions granted by this License.
|
585 |
+
|
586 |
+
"Source" form shall mean the preferred form for making modifications,
|
587 |
+
including but not limited to software source code, documentation
|
588 |
+
source, and configuration files.
|
589 |
+
|
590 |
+
"Object" form shall mean any form resulting from mechanical
|
591 |
+
transformation or translation of a Source form, including but
|
592 |
+
not limited to compiled object code, generated documentation,
|
593 |
+
and conversions to other media types.
|
594 |
+
|
595 |
+
"Work" shall mean the work of authorship, whether in Source or
|
596 |
+
Object form, made available under the License, as indicated by a
|
597 |
+
copyright notice that is included in or attached to the work
|
598 |
+
(an example is provided in the Appendix below).
|
599 |
+
|
600 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
601 |
+
form, that is based on (or derived from) the Work and for which the
|
602 |
+
editorial revisions, annotations, elaborations, or other modifications
|
603 |
+
represent, as a whole, an original work of authorship. For the purposes
|
604 |
+
of this License, Derivative Works shall not include works that remain
|
605 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
606 |
+
the Work and Derivative Works thereof.
|
607 |
+
|
608 |
+
"Contribution" shall mean any work of authorship, including
|
609 |
+
the original version of the Work and any modifications or additions
|
610 |
+
to that Work or Derivative Works thereof, that is intentionally
|
611 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
612 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
613 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
614 |
+
means any form of electronic, verbal, or written communication sent
|
615 |
+
to the Licensor or its representatives, including but not limited to
|
616 |
+
communication on electronic mailing lists, source code control systems,
|
617 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
618 |
+
Licensor for the purpose of discussing and improving the Work, but
|
619 |
+
excluding communication that is conspicuously marked or otherwise
|
620 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
621 |
+
|
622 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
623 |
+
on behalf of whom a Contribution has been received by Licensor and
|
624 |
+
subsequently incorporated within the Work.
|
625 |
+
|
626 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
627 |
+
this License, each Contributor hereby grants to You a perpetual,
|
628 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
629 |
+
copyright license to reproduce, prepare Derivative Works of,
|
630 |
+
publicly display, publicly perform, sublicense, and distribute the
|
631 |
+
Work and such Derivative Works in Source or Object form.
|
632 |
+
|
633 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
634 |
+
this License, each Contributor hereby grants to You a perpetual,
|
635 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
636 |
+
(except as stated in this section) patent license to make, have made,
|
637 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
638 |
+
where such license applies only to those patent claims licensable
|
639 |
+
by such Contributor that are necessarily infringed by their
|
640 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
641 |
+
with the Work to which such Contribution(s) was submitted. If You
|
642 |
+
institute patent litigation against any entity (including a
|
643 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
644 |
+
or a Contribution incorporated within the Work constitutes direct
|
645 |
+
or contributory patent infringement, then any patent licenses
|
646 |
+
granted to You under this License for that Work shall terminate
|
647 |
+
as of the date such litigation is filed.
|
648 |
+
|
649 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
650 |
+
Work or Derivative Works thereof in any medium, with or without
|
651 |
+
modifications, and in Source or Object form, provided that You
|
652 |
+
meet the following conditions:
|
653 |
+
|
654 |
+
(a) You must give any other recipients of the Work or
|
655 |
+
Derivative Works a copy of this License; and
|
656 |
+
|
657 |
+
(b) You must cause any modified files to carry prominent notices
|
658 |
+
stating that You changed the files; and
|
659 |
+
|
660 |
+
(c) You must retain, in the Source form of any Derivative Works
|
661 |
+
that You distribute, all copyright, patent, trademark, and
|
662 |
+
attribution notices from the Source form of the Work,
|
663 |
+
excluding those notices that do not pertain to any part of
|
664 |
+
the Derivative Works; and
|
665 |
+
|
666 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
667 |
+
distribution, then any Derivative Works that You distribute must
|
668 |
+
include a readable copy of the attribution notices contained
|
669 |
+
within such NOTICE file, excluding those notices that do not
|
670 |
+
pertain to any part of the Derivative Works, in at least one
|
671 |
+
of the following places: within a NOTICE text file distributed
|
672 |
+
as part of the Derivative Works; within the Source form or
|
673 |
+
documentation, if provided along with the Derivative Works; or,
|
674 |
+
within a display generated by the Derivative Works, if and
|
675 |
+
wherever such third-party notices normally appear. The contents
|
676 |
+
of the NOTICE file are for informational purposes only and
|
677 |
+
do not modify the License. You may add Your own attribution
|
678 |
+
notices within Derivative Works that You distribute, alongside
|
679 |
+
or as an addendum to the NOTICE text from the Work, provided
|
680 |
+
that such additional attribution notices cannot be construed
|
681 |
+
as modifying the License.
|
682 |
+
|
683 |
+
You may add Your own copyright statement to Your modifications and
|
684 |
+
may provide additional or different license terms and conditions
|
685 |
+
for use, reproduction, or distribution of Your modifications, or
|
686 |
+
for any such Derivative Works as a whole, provided Your use,
|
687 |
+
reproduction, and distribution of the Work otherwise complies with
|
688 |
+
the conditions stated in this License.
|
689 |
+
|
690 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
691 |
+
any Contribution intentionally submitted for inclusion in the Work
|
692 |
+
by You to the Licensor shall be under the terms and conditions of
|
693 |
+
this License, without any additional terms or conditions.
|
694 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
695 |
+
the terms of any separate license agreement you may have executed
|
696 |
+
with Licensor regarding such Contributions.
|
697 |
+
|
698 |
+
6. Trademarks. This License does not grant permission to use the trade
|
699 |
+
names, trademarks, service marks, or product names of the Licensor,
|
700 |
+
except as required for reasonable and customary use in describing the
|
701 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
702 |
+
|
703 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
704 |
+
agreed to in writing, Licensor provides the Work (and each
|
705 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
706 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
707 |
+
implied, including, without limitation, any warranties or conditions
|
708 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
709 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
710 |
+
appropriateness of using or redistributing the Work and assume any
|
711 |
+
risks associated with Your exercise of permissions under this License.
|
712 |
+
|
713 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
714 |
+
whether in tort (including negligence), contract, or otherwise,
|
715 |
+
unless required by applicable law (such as deliberate and grossly
|
716 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
717 |
+
liable to You for damages, including any direct, indirect, special,
|
718 |
+
incidental, or consequential damages of any character arising as a
|
719 |
+
result of this License or out of the use or inability to use the
|
720 |
+
Work (including but not limited to damages for loss of goodwill,
|
721 |
+
work stoppage, computer failure or malfunction, or any and all
|
722 |
+
other commercial damages or losses), even if such Contributor
|
723 |
+
has been advised of the possibility of such damages.
|
724 |
+
|
725 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
726 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
727 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
728 |
+
or other liability obligations and/or rights consistent with this
|
729 |
+
License. However, in accepting such obligations, You may act only
|
730 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
731 |
+
of any other Contributor, and only if You agree to indemnify,
|
732 |
+
defend, and hold each Contributor harmless for any liability
|
733 |
+
incurred by, or claims asserted against, such Contributor by reason
|
734 |
+
of your accepting any such warranty or additional liability.
|
735 |
+
|
736 |
+
END OF TERMS AND CONDITIONS
|
737 |
+
|
738 |
+
APPENDIX: How to apply the Apache License to your work.
|
739 |
+
|
740 |
+
To apply the Apache License to your work, attach the following
|
741 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
742 |
+
replaced with your own identifying information. (Don't include
|
743 |
+
the brackets!) The text should be enclosed in the appropriate
|
744 |
+
comment syntax for the file format. We also recommend that a
|
745 |
+
file or class name and description of purpose be included on the
|
746 |
+
same "printed page" as the copyright notice for easier
|
747 |
+
identification within third-party archives.
|
748 |
+
|
749 |
+
Copyright [yyyy] [name of copyright owner]
|
750 |
+
|
751 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
752 |
+
you may not use this file except in compliance with the License.
|
753 |
+
You may obtain a copy of the License at
|
754 |
+
|
755 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
756 |
+
|
757 |
+
Unless required by applicable law or agreed to in writing, software
|
758 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
759 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
760 |
+
See the License for the specific language governing permissions and
|
761 |
+
limitations under the License.
|
762 |
+
|
763 |
+
---------------------------------------------------------
|
764 |
+
|
765 |
+
tensorflow/text bc675d55d49b0c189be55104cfc0132ae489735d
|
766 |
+
|
767 |
+
Copyright 2018 The TensorFlow Authors. All rights reserved.
|
768 |
+
|
769 |
+
Apache License
|
770 |
+
Version 2.0, January 2004
|
771 |
+
http://www.apache.org/licenses/
|
772 |
+
|
773 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
774 |
+
|
775 |
+
1. Definitions.
|
776 |
+
|
777 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
778 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
779 |
+
|
780 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
781 |
+
the copyright owner that is granting the License.
|
782 |
+
|
783 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
784 |
+
other entities that control, are controlled by, or are under common
|
785 |
+
control with that entity. For the purposes of this definition,
|
786 |
+
"control" means (i) the power, direct or indirect, to cause the
|
787 |
+
direction or management of such entity, whether by contract or
|
788 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
789 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
790 |
+
|
791 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
792 |
+
exercising permissions granted by this License.
|
793 |
+
|
794 |
+
"Source" form shall mean the preferred form for making modifications,
|
795 |
+
including but not limited to software source code, documentation
|
796 |
+
source, and configuration files.
|
797 |
+
|
798 |
+
"Object" form shall mean any form resulting from mechanical
|
799 |
+
transformation or translation of a Source form, including but
|
800 |
+
not limited to compiled object code, generated documentation,
|
801 |
+
and conversions to other media types.
|
802 |
+
|
803 |
+
"Work" shall mean the work of authorship, whether in Source or
|
804 |
+
Object form, made available under the License, as indicated by a
|
805 |
+
copyright notice that is included in or attached to the work
|
806 |
+
(an example is provided in the Appendix below).
|
807 |
+
|
808 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
809 |
+
form, that is based on (or derived from) the Work and for which the
|
810 |
+
editorial revisions, annotations, elaborations, or other modifications
|
811 |
+
represent, as a whole, an original work of authorship. For the purposes
|
812 |
+
of this License, Derivative Works shall not include works that remain
|
813 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
814 |
+
the Work and Derivative Works thereof.
|
815 |
+
|
816 |
+
"Contribution" shall mean any work of authorship, including
|
817 |
+
the original version of the Work and any modifications or additions
|
818 |
+
to that Work or Derivative Works thereof, that is intentionally
|
819 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
820 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
821 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
822 |
+
means any form of electronic, verbal, or written communication sent
|
823 |
+
to the Licensor or its representatives, including but not limited to
|
824 |
+
communication on electronic mailing lists, source code control systems,
|
825 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
826 |
+
Licensor for the purpose of discussing and improving the Work, but
|
827 |
+
excluding communication that is conspicuously marked or otherwise
|
828 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
829 |
+
|
830 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
831 |
+
on behalf of whom a Contribution has been received by Licensor and
|
832 |
+
subsequently incorporated within the Work.
|
833 |
+
|
834 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
835 |
+
this License, each Contributor hereby grants to You a perpetual,
|
836 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
837 |
+
copyright license to reproduce, prepare Derivative Works of,
|
838 |
+
publicly display, publicly perform, sublicense, and distribute the
|
839 |
+
Work and such Derivative Works in Source or Object form.
|
840 |
+
|
841 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
842 |
+
this License, each Contributor hereby grants to You a perpetual,
|
843 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
844 |
+
(except as stated in this section) patent license to make, have made,
|
845 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
846 |
+
where such license applies only to those patent claims licensable
|
847 |
+
by such Contributor that are necessarily infringed by their
|
848 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
849 |
+
with the Work to which such Contribution(s) was submitted. If You
|
850 |
+
institute patent litigation against any entity (including a
|
851 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
852 |
+
or a Contribution incorporated within the Work constitutes direct
|
853 |
+
or contributory patent infringement, then any patent licenses
|
854 |
+
granted to You under this License for that Work shall terminate
|
855 |
+
as of the date such litigation is filed.
|
856 |
+
|
857 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
858 |
+
Work or Derivative Works thereof in any medium, with or without
|
859 |
+
modifications, and in Source or Object form, provided that You
|
860 |
+
meet the following conditions:
|
861 |
+
|
862 |
+
(a) You must give any other recipients of the Work or
|
863 |
+
Derivative Works a copy of this License; and
|
864 |
+
|
865 |
+
(b) You must cause any modified files to carry prominent notices
|
866 |
+
stating that You changed the files; and
|
867 |
+
|
868 |
+
(c) You must retain, in the Source form of any Derivative Works
|
869 |
+
that You distribute, all copyright, patent, trademark, and
|
870 |
+
attribution notices from the Source form of the Work,
|
871 |
+
excluding those notices that do not pertain to any part of
|
872 |
+
the Derivative Works; and
|
873 |
+
|
874 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
875 |
+
distribution, then any Derivative Works that You distribute must
|
876 |
+
include a readable copy of the attribution notices contained
|
877 |
+
within such NOTICE file, excluding those notices that do not
|
878 |
+
pertain to any part of the Derivative Works, in at least one
|
879 |
+
of the following places: within a NOTICE text file distributed
|
880 |
+
as part of the Derivative Works; within the Source form or
|
881 |
+
documentation, if provided along with the Derivative Works; or,
|
882 |
+
within a display generated by the Derivative Works, if and
|
883 |
+
wherever such third-party notices normally appear. The contents
|
884 |
+
of the NOTICE file are for informational purposes only and
|
885 |
+
do not modify the License. You may add Your own attribution
|
886 |
+
notices within Derivative Works that You distribute, alongside
|
887 |
+
or as an addendum to the NOTICE text from the Work, provided
|
888 |
+
that such additional attribution notices cannot be construed
|
889 |
+
as modifying the License.
|
890 |
+
|
891 |
+
You may add Your own copyright statement to Your modifications and
|
892 |
+
may provide additional or different license terms and conditions
|
893 |
+
for use, reproduction, or distribution of Your modifications, or
|
894 |
+
for any such Derivative Works as a whole, provided Your use,
|
895 |
+
reproduction, and distribution of the Work otherwise complies with
|
896 |
+
the conditions stated in this License.
|
897 |
+
|
898 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
899 |
+
any Contribution intentionally submitted for inclusion in the Work
|
900 |
+
by You to the Licensor shall be under the terms and conditions of
|
901 |
+
this License, without any additional terms or conditions.
|
902 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
903 |
+
the terms of any separate license agreement you may have executed
|
904 |
+
with Licensor regarding such Contributions.
|
905 |
+
|
906 |
+
6. Trademarks. This License does not grant permission to use the trade
|
907 |
+
names, trademarks, service marks, or product names of the Licensor,
|
908 |
+
except as required for reasonable and customary use in describing the
|
909 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
910 |
+
|
911 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
912 |
+
agreed to in writing, Licensor provides the Work (and each
|
913 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
914 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
915 |
+
implied, including, without limitation, any warranties or conditions
|
916 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
917 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
918 |
+
appropriateness of using or redistributing the Work and assume any
|
919 |
+
risks associated with Your exercise of permissions under this License.
|
920 |
+
|
921 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
922 |
+
whether in tort (including negligence), contract, or otherwise,
|
923 |
+
unless required by applicable law (such as deliberate and grossly
|
924 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
925 |
+
liable to You for damages, including any direct, indirect, special,
|
926 |
+
incidental, or consequential damages of any character arising as a
|
927 |
+
result of this License or out of the use or inability to use the
|
928 |
+
Work (including but not limited to damages for loss of goodwill,
|
929 |
+
work stoppage, computer failure or malfunction, or any and all
|
930 |
+
other commercial damages or losses), even if such Contributor
|
931 |
+
has been advised of the possibility of such damages.
|
932 |
+
|
933 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
934 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
935 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
936 |
+
or other liability obligations and/or rights consistent with this
|
937 |
+
License. However, in accepting such obligations, You may act only
|
938 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
939 |
+
of any other Contributor, and only if You agree to indemnify,
|
940 |
+
defend, and hold each Contributor harmless for any liability
|
941 |
+
incurred by, or claims asserted against, such Contributor by reason
|
942 |
+
of your accepting any such warranty or additional liability.
|
943 |
+
|
944 |
+
END OF TERMS AND CONDITIONS
|
945 |
+
|
946 |
+
APPENDIX: How to apply the Apache License to your work.
|
947 |
+
|
948 |
+
To apply the Apache License to your work, attach the following
|
949 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
950 |
+
replaced with your own identifying information. (Don't include
|
951 |
+
the brackets!) The text should be enclosed in the appropriate
|
952 |
+
comment syntax for the file format. We also recommend that a
|
953 |
+
file or class name and description of purpose be included on the
|
954 |
+
same "printed page" as the copyright notice for easier
|
955 |
+
identification within third-party archives.
|
956 |
+
|
957 |
+
Copyright 2017, The TensorFlow Authors.
|
958 |
+
|
959 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
960 |
+
you may not use this file except in compliance with the License.
|
961 |
+
You may obtain a copy of the License at
|
962 |
+
|
963 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
964 |
+
|
965 |
+
Unless required by applicable law or agreed to in writing, software
|
966 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
967 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
968 |
+
See the License for the specific language governing permissions and
|
969 |
+
limitations under the License.
|
970 |
+
|
971 |
+
---------------------------------------------------------
|
972 |
+
ultralytics/yolov3 1be31704c9c690929e4f6e6d950f40755ef2dcdc
|
973 |
+
|
974 |
+
GNU GENERAL PUBLIC LICENSE
|
975 |
+
Version 3, 29 June 2007
|
976 |
+
|
977 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
978 |
+
Everyone is permitted to copy and distribute verbatim copies
|
979 |
+
of this license document, but changing it is not allowed.
|
980 |
+
|
981 |
+
Preamble
|
982 |
+
|
983 |
+
The GNU General Public License is a free, copyleft license for
|
984 |
+
software and other kinds of works.
|
985 |
+
|
986 |
+
The licenses for most software and other practical works are designed
|
987 |
+
to take away your freedom to share and change the works. By contrast,
|
988 |
+
the GNU General Public License is intended to guarantee your freedom to
|
989 |
+
share and change all versions of a program--to make sure it remains free
|
990 |
+
software for all its users. We, the Free Software Foundation, use the
|
991 |
+
GNU General Public License for most of our software; it applies also to
|
992 |
+
any other work released this way by its authors. You can apply it to
|
993 |
+
your programs, too.
|
994 |
+
|
995 |
+
When we speak of free software, we are referring to freedom, not
|
996 |
+
price. Our General Public Licenses are designed to make sure that you
|
997 |
+
have the freedom to distribute copies of free software (and charge for
|
998 |
+
them if you wish), that you receive source code or can get it if you
|
999 |
+
want it, that you can change the software or use pieces of it in new
|
1000 |
+
free programs, and that you know you can do these things.
|
1001 |
+
|
1002 |
+
To protect your rights, we need to prevent others from denying you
|
1003 |
+
these rights or asking you to surrender the rights. Therefore, you have
|
1004 |
+
certain responsibilities if you distribute copies of the software, or if
|
1005 |
+
you modify it: responsibilities to respect the freedom of others.
|
1006 |
+
|
1007 |
+
For example, if you distribute copies of such a program, whether
|
1008 |
+
gratis or for a fee, you must pass on to the recipients the same
|
1009 |
+
freedoms that you received. You must make sure that they, too, receive
|
1010 |
+
or can get the source code. And you must show them these terms so they
|
1011 |
+
know their rights.
|
1012 |
+
|
1013 |
+
Developers that use the GNU GPL protect your rights with two steps:
|
1014 |
+
(1) assert copyright on the software, and (2) offer you this License
|
1015 |
+
giving you legal permission to copy, distribute and/or modify it.
|
1016 |
+
|
1017 |
+
For the developers' and authors' protection, the GPL clearly explains
|
1018 |
+
that there is no warranty for this free software. For both users' and
|
1019 |
+
authors' sake, the GPL requires that modified versions be marked as
|
1020 |
+
changed, so that their problems will not be attributed erroneously to
|
1021 |
+
authors of previous versions.
|
1022 |
+
|
1023 |
+
Some devices are designed to deny users access to install or run
|
1024 |
+
modified versions of the software inside them, although the manufacturer
|
1025 |
+
can do so. This is fundamentally incompatible with the aim of
|
1026 |
+
protecting users' freedom to change the software. The systematic
|
1027 |
+
pattern of such abuse occurs in the area of products for individuals to
|
1028 |
+
use, which is precisely where it is most unacceptable. Therefore, we
|
1029 |
+
have designed this version of the GPL to prohibit the practice for those
|
1030 |
+
products. If such problems arise substantially in other domains, we
|
1031 |
+
stand ready to extend this provision to those domains in future versions
|
1032 |
+
of the GPL, as needed to protect the freedom of users.
|
1033 |
+
|
1034 |
+
Finally, every program is threatened constantly by software patents.
|
1035 |
+
States should not allow patents to restrict development and use of
|
1036 |
+
software on general-purpose computers, but in those that do, we wish to
|
1037 |
+
avoid the special danger that patents applied to a free program could
|
1038 |
+
make it effectively proprietary. To prevent this, the GPL assures that
|
1039 |
+
patents cannot be used to render the program non-free.
|
1040 |
+
|
1041 |
+
The precise terms and conditions for copying, distribution and
|
1042 |
+
modification follow.
|
1043 |
+
|
1044 |
+
TERMS AND CONDITIONS
|
1045 |
+
|
1046 |
+
0. Definitions.
|
1047 |
+
|
1048 |
+
"This License" refers to version 3 of the GNU General Public License.
|
1049 |
+
|
1050 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
1051 |
+
works, such as semiconductor masks.
|
1052 |
+
|
1053 |
+
"The Program" refers to any copyrightable work licensed under this
|
1054 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
1055 |
+
"recipients" may be individuals or organizations.
|
1056 |
+
|
1057 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
1058 |
+
in a fashion requiring copyright permission, other than the making of an
|
1059 |
+
exact copy. The resulting work is called a "modified version" of the
|
1060 |
+
earlier work or a work "based on" the earlier work.
|
1061 |
+
|
1062 |
+
A "covered work" means either the unmodified Program or a work based
|
1063 |
+
on the Program.
|
1064 |
+
|
1065 |
+
To "propagate" a work means to do anything with it that, without
|
1066 |
+
permission, would make you directly or secondarily liable for
|
1067 |
+
infringement under applicable copyright law, except executing it on a
|
1068 |
+
computer or modifying a private copy. Propagation includes copying,
|
1069 |
+
distribution (with or without modification), making available to the
|
1070 |
+
public, and in some countries other activities as well.
|
1071 |
+
|
1072 |
+
To "convey" a work means any kind of propagation that enables other
|
1073 |
+
parties to make or receive copies. Mere interaction with a user through
|
1074 |
+
a computer network, with no transfer of a copy, is not conveying.
|
1075 |
+
|
1076 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
1077 |
+
to the extent that it includes a convenient and prominently visible
|
1078 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
1079 |
+
tells the user that there is no warranty for the work (except to the
|
1080 |
+
extent that warranties are provided), that licensees may convey the
|
1081 |
+
work under this License, and how to view a copy of this License. If
|
1082 |
+
the interface presents a list of user commands or options, such as a
|
1083 |
+
menu, a prominent item in the list meets this criterion.
|
1084 |
+
|
1085 |
+
1. Source Code.
|
1086 |
+
|
1087 |
+
The "source code" for a work means the preferred form of the work
|
1088 |
+
for making modifications to it. "Object code" means any non-source
|
1089 |
+
form of a work.
|
1090 |
+
|
1091 |
+
A "Standard Interface" means an interface that either is an official
|
1092 |
+
standard defined by a recognized standards body, or, in the case of
|
1093 |
+
interfaces specified for a particular programming language, one that
|
1094 |
+
is widely used among developers working in that language.
|
1095 |
+
|
1096 |
+
The "System Libraries" of an executable work include anything, other
|
1097 |
+
than the work as a whole, that (a) is included in the normal form of
|
1098 |
+
packaging a Major Component, but which is not part of that Major
|
1099 |
+
Component, and (b) serves only to enable use of the work with that
|
1100 |
+
Major Component, or to implement a Standard Interface for which an
|
1101 |
+
implementation is available to the public in source code form. A
|
1102 |
+
"Major Component", in this context, means a major essential component
|
1103 |
+
(kernel, window system, and so on) of the specific operating system
|
1104 |
+
(if any) on which the executable work runs, or a compiler used to
|
1105 |
+
produce the work, or an object code interpreter used to run it.
|
1106 |
+
|
1107 |
+
The "Corresponding Source" for a work in object code form means all
|
1108 |
+
the source code needed to generate, install, and (for an executable
|
1109 |
+
work) run the object code and to modify the work, including scripts to
|
1110 |
+
control those activities. However, it does not include the work's
|
1111 |
+
System Libraries, or general-purpose tools or generally available free
|
1112 |
+
programs which are used unmodified in performing those activities but
|
1113 |
+
which are not part of the work. For example, Corresponding Source
|
1114 |
+
includes interface definition files associated with source files for
|
1115 |
+
the work, and the source code for shared libraries and dynamically
|
1116 |
+
linked subprograms that the work is specifically designed to require,
|
1117 |
+
such as by intimate data communication or control flow between those
|
1118 |
+
subprograms and other parts of the work.
|
1119 |
+
|
1120 |
+
The Corresponding Source need not include anything that users
|
1121 |
+
can regenerate automatically from other parts of the Corresponding
|
1122 |
+
Source.
|
1123 |
+
|
1124 |
+
The Corresponding Source for a work in source code form is that
|
1125 |
+
same work.
|
1126 |
+
|
1127 |
+
2. Basic Permissions.
|
1128 |
+
|
1129 |
+
All rights granted under this License are granted for the term of
|
1130 |
+
copyright on the Program, and are irrevocable provided the stated
|
1131 |
+
conditions are met. This License explicitly affirms your unlimited
|
1132 |
+
permission to run the unmodified Program. The output from running a
|
1133 |
+
covered work is covered by this License only if the output, given its
|
1134 |
+
content, constitutes a covered work. This License acknowledges your
|
1135 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
1136 |
+
|
1137 |
+
You may make, run and propagate covered works that you do not
|
1138 |
+
convey, without conditions so long as your license otherwise remains
|
1139 |
+
in force. You may convey covered works to others for the sole purpose
|
1140 |
+
of having them make modifications exclusively for you, or provide you
|
1141 |
+
with facilities for running those works, provided that you comply with
|
1142 |
+
the terms of this License in conveying all material for which you do
|
1143 |
+
not control copyright. Those thus making or running the covered works
|
1144 |
+
for you must do so exclusively on your behalf, under your direction
|
1145 |
+
and control, on terms that prohibit them from making any copies of
|
1146 |
+
your copyrighted material outside their relationship with you.
|
1147 |
+
|
1148 |
+
Conveying under any other circumstances is permitted solely under
|
1149 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
1150 |
+
makes it unnecessary.
|
1151 |
+
|
1152 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
1153 |
+
|
1154 |
+
No covered work shall be deemed part of an effective technological
|
1155 |
+
measure under any applicable law fulfilling obligations under article
|
1156 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
1157 |
+
similar laws prohibiting or restricting circumvention of such
|
1158 |
+
measures.
|
1159 |
+
|
1160 |
+
When you convey a covered work, you waive any legal power to forbid
|
1161 |
+
circumvention of technological measures to the extent such circumvention
|
1162 |
+
is effected by exercising rights under this License with respect to
|
1163 |
+
the covered work, and you disclaim any intention to limit operation or
|
1164 |
+
modification of the work as a means of enforcing, against the work's
|
1165 |
+
users, your or third parties' legal rights to forbid circumvention of
|
1166 |
+
technological measures.
|
1167 |
+
|
1168 |
+
4. Conveying Verbatim Copies.
|
1169 |
+
|
1170 |
+
You may convey verbatim copies of the Program's source code as you
|
1171 |
+
receive it, in any medium, provided that you conspicuously and
|
1172 |
+
appropriately publish on each copy an appropriate copyright notice;
|
1173 |
+
keep intact all notices stating that this License and any
|
1174 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
1175 |
+
keep intact all notices of the absence of any warranty; and give all
|
1176 |
+
recipients a copy of this License along with the Program.
|
1177 |
+
|
1178 |
+
You may charge any price or no price for each copy that you convey,
|
1179 |
+
and you may offer support or warranty protection for a fee.
|
1180 |
+
|
1181 |
+
5. Conveying Modified Source Versions.
|
1182 |
+
|
1183 |
+
You may convey a work based on the Program, or the modifications to
|
1184 |
+
produce it from the Program, in the form of source code under the
|
1185 |
+
terms of section 4, provided that you also meet all of these conditions:
|
1186 |
+
|
1187 |
+
a) The work must carry prominent notices stating that you modified
|
1188 |
+
it, and giving a relevant date.
|
1189 |
+
|
1190 |
+
b) The work must carry prominent notices stating that it is
|
1191 |
+
released under this License and any conditions added under section
|
1192 |
+
7. This requirement modifies the requirement in section 4 to
|
1193 |
+
"keep intact all notices".
|
1194 |
+
|
1195 |
+
c) You must license the entire work, as a whole, under this
|
1196 |
+
License to anyone who comes into possession of a copy. This
|
1197 |
+
License will therefore apply, along with any applicable section 7
|
1198 |
+
additional terms, to the whole of the work, and all its parts,
|
1199 |
+
regardless of how they are packaged. This License gives no
|
1200 |
+
permission to license the work in any other way, but it does not
|
1201 |
+
invalidate such permission if you have separately received it.
|
1202 |
+
|
1203 |
+
d) If the work has interactive user interfaces, each must display
|
1204 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
1205 |
+
interfaces that do not display Appropriate Legal Notices, your
|
1206 |
+
work need not make them do so.
|
1207 |
+
|
1208 |
+
A compilation of a covered work with other separate and independent
|
1209 |
+
works, which are not by their nature extensions of the covered work,
|
1210 |
+
and which are not combined with it such as to form a larger program,
|
1211 |
+
in or on a volume of a storage or distribution medium, is called an
|
1212 |
+
"aggregate" if the compilation and its resulting copyright are not
|
1213 |
+
used to limit the access or legal rights of the compilation's users
|
1214 |
+
beyond what the individual works permit. Inclusion of a covered work
|
1215 |
+
in an aggregate does not cause this License to apply to the other
|
1216 |
+
parts of the aggregate.
|
1217 |
+
|
1218 |
+
6. Conveying Non-Source Forms.
|
1219 |
+
|
1220 |
+
You may convey a covered work in object code form under the terms
|
1221 |
+
of sections 4 and 5, provided that you also convey the
|
1222 |
+
machine-readable Corresponding Source under the terms of this License,
|
1223 |
+
in one of these ways:
|
1224 |
+
|
1225 |
+
a) Convey the object code in, or embodied in, a physical product
|
1226 |
+
(including a physical distribution medium), accompanied by the
|
1227 |
+
Corresponding Source fixed on a durable physical medium
|
1228 |
+
customarily used for software interchange.
|
1229 |
+
|
1230 |
+
b) Convey the object code in, or embodied in, a physical product
|
1231 |
+
(including a physical distribution medium), accompanied by a
|
1232 |
+
written offer, valid for at least three years and valid for as
|
1233 |
+
long as you offer spare parts or customer support for that product
|
1234 |
+
model, to give anyone who possesses the object code either (1) a
|
1235 |
+
copy of the Corresponding Source for all the software in the
|
1236 |
+
product that is covered by this License, on a durable physical
|
1237 |
+
medium customarily used for software interchange, for a price no
|
1238 |
+
more than your reasonable cost of physically performing this
|
1239 |
+
conveying of source, or (2) access to copy the
|
1240 |
+
Corresponding Source from a network server at no charge.
|
1241 |
+
|
1242 |
+
c) Convey individual copies of the object code with a copy of the
|
1243 |
+
written offer to provide the Corresponding Source. This
|
1244 |
+
alternative is allowed only occasionally and noncommercially, and
|
1245 |
+
only if you received the object code with such an offer, in accord
|
1246 |
+
with subsection 6b.
|
1247 |
+
|
1248 |
+
d) Convey the object code by offering access from a designated
|
1249 |
+
place (gratis or for a charge), and offer equivalent access to the
|
1250 |
+
Corresponding Source in the same way through the same place at no
|
1251 |
+
further charge. You need not require recipients to copy the
|
1252 |
+
Corresponding Source along with the object code. If the place to
|
1253 |
+
copy the object code is a network server, the Corresponding Source
|
1254 |
+
may be on a different server (operated by you or a third party)
|
1255 |
+
that supports equivalent copying facilities, provided you maintain
|
1256 |
+
clear directions next to the object code saying where to find the
|
1257 |
+
Corresponding Source. Regardless of what server hosts the
|
1258 |
+
Corresponding Source, you remain obligated to ensure that it is
|
1259 |
+
available for as long as needed to satisfy these requirements.
|
1260 |
+
|
1261 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
1262 |
+
you inform other peers where the object code and Corresponding
|
1263 |
+
Source of the work are being offered to the general public at no
|
1264 |
+
charge under subsection 6d.
|
1265 |
+
|
1266 |
+
A separable portion of the object code, whose source code is excluded
|
1267 |
+
from the Corresponding Source as a System Library, need not be
|
1268 |
+
included in conveying the object code work.
|
1269 |
+
|
1270 |
+
A "User Product" is either (1) a "consumer product", which means any
|
1271 |
+
tangible personal property which is normally used for personal, family,
|
1272 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
1273 |
+
into a dwelling. In determining whether a product is a consumer product,
|
1274 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
1275 |
+
product received by a particular user, "normally used" refers to a
|
1276 |
+
typical or common use of that class of product, regardless of the status
|
1277 |
+
of the particular user or of the way in which the particular user
|
1278 |
+
actually uses, or expects or is expected to use, the product. A product
|
1279 |
+
is a consumer product regardless of whether the product has substantial
|
1280 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
1281 |
+
the only significant mode of use of the product.
|
1282 |
+
|
1283 |
+
"Installation Information" for a User Product means any methods,
|
1284 |
+
procedures, authorization keys, or other information required to install
|
1285 |
+
and execute modified versions of a covered work in that User Product from
|
1286 |
+
a modified version of its Corresponding Source. The information must
|
1287 |
+
suffice to ensure that the continued functioning of the modified object
|
1288 |
+
code is in no case prevented or interfered with solely because
|
1289 |
+
modification has been made.
|
1290 |
+
|
1291 |
+
If you convey an object code work under this section in, or with, or
|
1292 |
+
specifically for use in, a User Product, and the conveying occurs as
|
1293 |
+
part of a transaction in which the right of possession and use of the
|
1294 |
+
User Product is transferred to the recipient in perpetuity or for a
|
1295 |
+
fixed term (regardless of how the transaction is characterized), the
|
1296 |
+
Corresponding Source conveyed under this section must be accompanied
|
1297 |
+
by the Installation Information. But this requirement does not apply
|
1298 |
+
if neither you nor any third party retains the ability to install
|
1299 |
+
modified object code on the User Product (for example, the work has
|
1300 |
+
been installed in ROM).
|
1301 |
+
|
1302 |
+
The requirement to provide Installation Information does not include a
|
1303 |
+
requirement to continue to provide support service, warranty, or updates
|
1304 |
+
for a work that has been modified or installed by the recipient, or for
|
1305 |
+
the User Product in which it has been modified or installed. Access to a
|
1306 |
+
network may be denied when the modification itself materially and
|
1307 |
+
adversely affects the operation of the network or violates the rules and
|
1308 |
+
protocols for communication across the network.
|
1309 |
+
|
1310 |
+
Corresponding Source conveyed, and Installation Information provided,
|
1311 |
+
in accord with this section must be in a format that is publicly
|
1312 |
+
documented (and with an implementation available to the public in
|
1313 |
+
source code form), and must require no special password or key for
|
1314 |
+
unpacking, reading or copying.
|
1315 |
+
|
1316 |
+
7. Additional Terms.
|
1317 |
+
|
1318 |
+
"Additional permissions" are terms that supplement the terms of this
|
1319 |
+
License by making exceptions from one or more of its conditions.
|
1320 |
+
Additional permissions that are applicable to the entire Program shall
|
1321 |
+
be treated as though they were included in this License, to the extent
|
1322 |
+
that they are valid under applicable law. If additional permissions
|
1323 |
+
apply only to part of the Program, that part may be used separately
|
1324 |
+
under those permissions, but the entire Program remains governed by
|
1325 |
+
this License without regard to the additional permissions.
|
1326 |
+
|
1327 |
+
When you convey a copy of a covered work, you may at your option
|
1328 |
+
remove any additional permissions from that copy, or from any part of
|
1329 |
+
it. (Additional permissions may be written to require their own
|
1330 |
+
removal in certain cases when you modify the work.) You may place
|
1331 |
+
additional permissions on material, added by you to a covered work,
|
1332 |
+
for which you have or can give appropriate copyright permission.
|
1333 |
+
|
1334 |
+
Notwithstanding any other provision of this License, for material you
|
1335 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
1336 |
+
that material) supplement the terms of this License with terms:
|
1337 |
+
|
1338 |
+
a) Disclaiming warranty or limiting liability differently from the
|
1339 |
+
terms of sections 15 and 16 of this License; or
|
1340 |
+
|
1341 |
+
b) Requiring preservation of specified reasonable legal notices or
|
1342 |
+
author attributions in that material or in the Appropriate Legal
|
1343 |
+
Notices displayed by works containing it; or
|
1344 |
+
|
1345 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
1346 |
+
requiring that modified versions of such material be marked in
|
1347 |
+
reasonable ways as different from the original version; or
|
1348 |
+
|
1349 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
1350 |
+
authors of the material; or
|
1351 |
+
|
1352 |
+
e) Declining to grant rights under trademark law for use of some
|
1353 |
+
trade names, trademarks, or service marks; or
|
1354 |
+
|
1355 |
+
f) Requiring indemnification of licensors and authors of that
|
1356 |
+
material by anyone who conveys the material (or modified versions of
|
1357 |
+
it) with contractual assumptions of liability to the recipient, for
|
1358 |
+
any liability that these contractual assumptions directly impose on
|
1359 |
+
those licensors and authors.
|
1360 |
+
|
1361 |
+
All other non-permissive additional terms are considered "further
|
1362 |
+
restrictions" within the meaning of section 10. If the Program as you
|
1363 |
+
received it, or any part of it, contains a notice stating that it is
|
1364 |
+
governed by this License along with a term that is a further
|
1365 |
+
restriction, you may remove that term. If a license document contains
|
1366 |
+
a further restriction but permits relicensing or conveying under this
|
1367 |
+
License, you may add to a covered work material governed by the terms
|
1368 |
+
of that license document, provided that the further restriction does
|
1369 |
+
not survive such relicensing or conveying.
|
1370 |
+
|
1371 |
+
If you add terms to a covered work in accord with this section, you
|
1372 |
+
must place, in the relevant source files, a statement of the
|
1373 |
+
additional terms that apply to those files, or a notice indicating
|
1374 |
+
where to find the applicable terms.
|
1375 |
+
|
1376 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
1377 |
+
form of a separately written license, or stated as exceptions;
|
1378 |
+
the above requirements apply either way.
|
1379 |
+
|
1380 |
+
8. Termination.
|
1381 |
+
|
1382 |
+
You may not propagate or modify a covered work except as expressly
|
1383 |
+
provided under this License. Any attempt otherwise to propagate or
|
1384 |
+
modify it is void, and will automatically terminate your rights under
|
1385 |
+
this License (including any patent licenses granted under the third
|
1386 |
+
paragraph of section 11).
|
1387 |
+
|
1388 |
+
However, if you cease all violation of this License, then your
|
1389 |
+
license from a particular copyright holder is reinstated (a)
|
1390 |
+
provisionally, unless and until the copyright holder explicitly and
|
1391 |
+
finally terminates your license, and (b) permanently, if the copyright
|
1392 |
+
holder fails to notify you of the violation by some reasonable means
|
1393 |
+
prior to 60 days after the cessation.
|
1394 |
+
|
1395 |
+
Moreover, your license from a particular copyright holder is
|
1396 |
+
reinstated permanently if the copyright holder notifies you of the
|
1397 |
+
violation by some reasonable means, this is the first time you have
|
1398 |
+
received notice of violation of this License (for any work) from that
|
1399 |
+
copyright holder, and you cure the violation prior to 30 days after
|
1400 |
+
your receipt of the notice.
|
1401 |
+
|
1402 |
+
Termination of your rights under this section does not terminate the
|
1403 |
+
licenses of parties who have received copies or rights from you under
|
1404 |
+
this License. If your rights have been terminated and not permanently
|
1405 |
+
reinstated, you do not qualify to receive new licenses for the same
|
1406 |
+
material under section 10.
|
1407 |
+
|
1408 |
+
9. Acceptance Not Required for Having Copies.
|
1409 |
+
|
1410 |
+
You are not required to accept this License in order to receive or
|
1411 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
1412 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
1413 |
+
to receive a copy likewise does not require acceptance. However,
|
1414 |
+
nothing other than this License grants you permission to propagate or
|
1415 |
+
modify any covered work. These actions infringe copyright if you do
|
1416 |
+
not accept this License. Therefore, by modifying or propagating a
|
1417 |
+
covered work, you indicate your acceptance of this License to do so.
|
1418 |
+
|
1419 |
+
10. Automatic Licensing of Downstream Recipients.
|
1420 |
+
|
1421 |
+
Each time you convey a covered work, the recipient automatically
|
1422 |
+
receives a license from the original licensors, to run, modify and
|
1423 |
+
propagate that work, subject to this License. You are not responsible
|
1424 |
+
for enforcing compliance by third parties with this License.
|
1425 |
+
|
1426 |
+
An "entity transaction" is a transaction transferring control of an
|
1427 |
+
organization, or substantially all assets of one, or subdividing an
|
1428 |
+
organization, or merging organizations. If propagation of a covered
|
1429 |
+
work results from an entity transaction, each party to that
|
1430 |
+
transaction who receives a copy of the work also receives whatever
|
1431 |
+
licenses to the work the party's predecessor in interest had or could
|
1432 |
+
give under the previous paragraph, plus a right to possession of the
|
1433 |
+
Corresponding Source of the work from the predecessor in interest, if
|
1434 |
+
the predecessor has it or can get it with reasonable efforts.
|
1435 |
+
|
1436 |
+
You may not impose any further restrictions on the exercise of the
|
1437 |
+
rights granted or affirmed under this License. For example, you may
|
1438 |
+
not impose a license fee, royalty, or other charge for exercise of
|
1439 |
+
rights granted under this License, and you may not initiate litigation
|
1440 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
1441 |
+
any patent claim is infringed by making, using, selling, offering for
|
1442 |
+
sale, or importing the Program or any portion of it.
|
1443 |
+
|
1444 |
+
11. Patents.
|
1445 |
+
|
1446 |
+
A "contributor" is a copyright holder who authorizes use under this
|
1447 |
+
License of the Program or a work on which the Program is based. The
|
1448 |
+
work thus licensed is called the contributor's "contributor version".
|
1449 |
+
|
1450 |
+
A contributor's "essential patent claims" are all patent claims
|
1451 |
+
owned or controlled by the contributor, whether already acquired or
|
1452 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
1453 |
+
by this License, of making, using, or selling its contributor version,
|
1454 |
+
but do not include claims that would be infringed only as a
|
1455 |
+
consequence of further modification of the contributor version. For
|
1456 |
+
purposes of this definition, "control" includes the right to grant
|
1457 |
+
patent sublicenses in a manner consistent with the requirements of
|
1458 |
+
this License.
|
1459 |
+
|
1460 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
1461 |
+
patent license under the contributor's essential patent claims, to
|
1462 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
1463 |
+
propagate the contents of its contributor version.
|
1464 |
+
|
1465 |
+
In the following three paragraphs, a "patent license" is any express
|
1466 |
+
agreement or commitment, however denominated, not to enforce a patent
|
1467 |
+
(such as an express permission to practice a patent or covenant not to
|
1468 |
+
sue for patent infringement). To "grant" such a patent license to a
|
1469 |
+
party means to make such an agreement or commitment not to enforce a
|
1470 |
+
patent against the party.
|
1471 |
+
|
1472 |
+
If you convey a covered work, knowingly relying on a patent license,
|
1473 |
+
and the Corresponding Source of the work is not available for anyone
|
1474 |
+
to copy, free of charge and under the terms of this License, through a
|
1475 |
+
publicly available network server or other readily accessible means,
|
1476 |
+
then you must either (1) cause the Corresponding Source to be so
|
1477 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
1478 |
+
patent license for this particular work, or (3) arrange, in a manner
|
1479 |
+
consistent with the requirements of this License, to extend the patent
|
1480 |
+
license to downstream recipients. "Knowingly relying" means you have
|
1481 |
+
actual knowledge that, but for the patent license, your conveying the
|
1482 |
+
covered work in a country, or your recipient's use of the covered work
|
1483 |
+
in a country, would infringe one or more identifiable patents in that
|
1484 |
+
country that you have reason to believe are valid.
|
1485 |
+
|
1486 |
+
If, pursuant to or in connection with a single transaction or
|
1487 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
1488 |
+
covered work, and grant a patent license to some of the parties
|
1489 |
+
receiving the covered work authorizing them to use, propagate, modify
|
1490 |
+
or convey a specific copy of the covered work, then the patent license
|
1491 |
+
you grant is automatically extended to all recipients of the covered
|
1492 |
+
work and works based on it.
|
1493 |
+
|
1494 |
+
A patent license is "discriminatory" if it does not include within
|
1495 |
+
the scope of its coverage, prohibits the exercise of, or is
|
1496 |
+
conditioned on the non-exercise of one or more of the rights that are
|
1497 |
+
specifically granted under this License. You may not convey a covered
|
1498 |
+
work if you are a party to an arrangement with a third party that is
|
1499 |
+
in the business of distributing software, under which you make payment
|
1500 |
+
to the third party based on the extent of your activity of conveying
|
1501 |
+
the work, and under which the third party grants, to any of the
|
1502 |
+
parties who would receive the covered work from you, a discriminatory
|
1503 |
+
patent license (a) in connection with copies of the covered work
|
1504 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
1505 |
+
for and in connection with specific products or compilations that
|
1506 |
+
contain the covered work, unless you entered into that arrangement,
|
1507 |
+
or that patent license was granted, prior to 28 March 2007.
|
1508 |
+
|
1509 |
+
Nothing in this License shall be construed as excluding or limiting
|
1510 |
+
any implied license or other defenses to infringement that may
|
1511 |
+
otherwise be available to you under applicable patent law.
|
1512 |
+
|
1513 |
+
12. No Surrender of Others' Freedom.
|
1514 |
+
|
1515 |
+
If conditions are imposed on you (whether by court order, agreement or
|
1516 |
+
otherwise) that contradict the conditions of this License, they do not
|
1517 |
+
excuse you from the conditions of this License. If you cannot convey a
|
1518 |
+
covered work so as to satisfy simultaneously your obligations under this
|
1519 |
+
License and any other pertinent obligations, then as a consequence you may
|
1520 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
1521 |
+
to collect a royalty for further conveying from those to whom you convey
|
1522 |
+
the Program, the only way you could satisfy both those terms and this
|
1523 |
+
License would be to refrain entirely from conveying the Program.
|
1524 |
+
|
1525 |
+
13. Use with the GNU Affero General Public License.
|
1526 |
+
|
1527 |
+
Notwithstanding any other provision of this License, you have
|
1528 |
+
permission to link or combine any covered work with a work licensed
|
1529 |
+
under version 3 of the GNU Affero General Public License into a single
|
1530 |
+
combined work, and to convey the resulting work. The terms of this
|
1531 |
+
License will continue to apply to the part which is the covered work,
|
1532 |
+
but the special requirements of the GNU Affero General Public License,
|
1533 |
+
section 13, concerning interaction through a network will apply to the
|
1534 |
+
combination as such.
|
1535 |
+
|
1536 |
+
14. Revised Versions of this License.
|
1537 |
+
|
1538 |
+
The Free Software Foundation may publish revised and/or new versions of
|
1539 |
+
the GNU General Public License from time to time. Such new versions will
|
1540 |
+
be similar in spirit to the present version, but may differ in detail to
|
1541 |
+
address new problems or concerns.
|
1542 |
+
|
1543 |
+
Each version is given a distinguishing version number. If the
|
1544 |
+
Program specifies that a certain numbered version of the GNU General
|
1545 |
+
Public License "or any later version" applies to it, you have the
|
1546 |
+
option of following the terms and conditions either of that numbered
|
1547 |
+
version or of any later version published by the Free Software
|
1548 |
+
Foundation. If the Program does not specify a version number of the
|
1549 |
+
GNU General Public License, you may choose any version ever published
|
1550 |
+
by the Free Software Foundation.
|
1551 |
+
|
1552 |
+
If the Program specifies that a proxy can decide which future
|
1553 |
+
versions of the GNU General Public License can be used, that proxy's
|
1554 |
+
public statement of acceptance of a version permanently authorizes you
|
1555 |
+
to choose that version for the Program.
|
1556 |
+
|
1557 |
+
Later license versions may give you additional or different
|
1558 |
+
permissions. However, no additional obligations are imposed on any
|
1559 |
+
author or copyright holder as a result of your choosing to follow a
|
1560 |
+
later version.
|
1561 |
+
|
1562 |
+
15. Disclaimer of Warranty.
|
1563 |
+
|
1564 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
1565 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
1566 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
1567 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
1568 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
1569 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
1570 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
1571 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
1572 |
+
|
1573 |
+
16. Limitation of Liability.
|
1574 |
+
|
1575 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
1576 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
1577 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
1578 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
1579 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
1580 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
1581 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
1582 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
1583 |
+
SUCH DAMAGES.
|
1584 |
+
|
1585 |
+
17. Interpretation of Sections 15 and 16.
|
1586 |
+
|
1587 |
+
If the disclaimer of warranty and limitation of liability provided
|
1588 |
+
above cannot be given local legal effect according to their terms,
|
1589 |
+
reviewing courts shall apply local law that most closely approximates
|
1590 |
+
an absolute waiver of all civil liability in connection with the
|
1591 |
+
Program, unless a warranty or assumption of liability accompanies a
|
1592 |
+
copy of the Program in return for a fee.
|
1593 |
+
|
1594 |
+
END OF TERMS AND CONDITIONS
|
1595 |
+
|
1596 |
+
How to Apply These Terms to Your New Programs
|
1597 |
+
|
1598 |
+
If you develop a new program, and you want it to be of the greatest
|
1599 |
+
possible use to the public, the best way to achieve this is to make it
|
1600 |
+
free software which everyone can redistribute and change under these terms.
|
1601 |
+
|
1602 |
+
To do so, attach the following notices to the program. It is safest
|
1603 |
+
to attach them to the start of each source file to most effectively
|
1604 |
+
state the exclusion of warranty; and each file should have at least
|
1605 |
+
the "copyright" line and a pointer to where the full notice is found.
|
1606 |
+
|
1607 |
+
<one line to give the program's name and a brief idea of what it does.>
|
1608 |
+
Copyright (C) <year> <name of author>
|
1609 |
+
|
1610 |
+
This program is free software: you can redistribute it and/or modify
|
1611 |
+
it under the terms of the GNU General Public License as published by
|
1612 |
+
the Free Software Foundation, either version 3 of the License, or
|
1613 |
+
(at your option) any later version.
|
1614 |
+
|
1615 |
+
This program is distributed in the hope that it will be useful,
|
1616 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
1617 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
1618 |
+
GNU General Public License for more details.
|
1619 |
+
|
1620 |
+
You should have received a copy of the GNU General Public License
|
1621 |
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
1622 |
+
|
1623 |
+
Also add information on how to contact you by electronic and paper mail.
|
1624 |
+
|
1625 |
+
If the program does terminal interaction, make it output a short
|
1626 |
+
notice like this when it starts in an interactive mode:
|
1627 |
+
|
1628 |
+
<program> Copyright (C) <year> <name of author>
|
1629 |
+
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
1630 |
+
This is free software, and you are welcome to redistribute it
|
1631 |
+
under certain conditions; type `show c' for details.
|
1632 |
+
|
1633 |
+
The hypothetical commands `show w' and `show c' should show the appropriate
|
1634 |
+
parts of the General Public License. Of course, your program's commands
|
1635 |
+
might be different; for a GUI interface, you would use an "about box".
|
1636 |
+
|
1637 |
+
You should also get your employer (if you work as a programmer) or school,
|
1638 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
1639 |
+
For more information on this, and how to apply and follow the GNU GPL, see
|
1640 |
+
<http://www.gnu.org/licenses/>.
|
1641 |
+
|
1642 |
+
The GNU General Public License does not permit incorporating your program
|
1643 |
+
into proprietary programs. If your program is a subroutine library, you
|
1644 |
+
may consider it more useful to permit linking proprietary applications with
|
1645 |
+
the library. If this is what you want to do, use the GNU Lesser General
|
1646 |
+
Public License instead of this License. But first, please read
|
1647 |
+
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
1648 |
+
---------------------------------------------------------
|
Notice
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Llama 2 is licensed under the LLAMA 2 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.
|
README.md
CHANGED
@@ -1,27 +1,45 @@
|
|
1 |
-
#
|
2 |
|
3 |
-
> Integrate into your apps over 10,000 datasets via simple HTTP requests, with pre-processed responses and scalability built-in.
|
4 |
|
5 |
-
|
6 |
|
7 |
-
## Ask for a new feature 🎁
|
8 |
|
9 |
-
|
10 |
|
11 |
-
|
12 |
|
13 |
-
|
14 |
|
15 |
-
|
16 |
|
17 |
-
|
18 |
|
19 |
-
|
|
|
|
|
20 |
|
21 |
-
|
22 |
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
-
You can also report bugs and propose enhancements on the code, or the documentation, in the [GitHub issues](https://github.com/huggingface/datasets-server/issues).
|
|
|
1 |
+
# Azure Machine Learning Python SDK notebooks
|
2 |
|
|
|
3 |
|
4 |
+
** **With the introduction of AzureML SDK v2, this samples repository for the v1 SDK is now deprecated and will not be monitored or updated. Users are encouraged to visit the [v2 SDK samples repository](https://github.com/Azure/azureml-examples) instead for up-to-date and enhanced examples of how to build, train, and deploy machine learning models with AzureML's newest features.** **
|
5 |
|
|
|
6 |
|
7 |
+
Welcome to the Azure Machine Learning Python SDK notebooks repository!
|
8 |
|
9 |
+
## Getting started
|
10 |
|
11 |
+
These notebooks are recommended for use in an Azure Machine Learning [Compute Instance](https://docs.microsoft.com/azure/machine-learning/concept-compute-instance), where you can run them without any additional set up.
|
12 |
|
13 |
+
However, the notebooks can be run in any development environment with the correct `azureml` packages installed.
|
14 |
|
15 |
+
Install the `azureml.core` Python package:
|
16 |
|
17 |
+
```sh
|
18 |
+
pip install azureml-core
|
19 |
+
```
|
20 |
|
21 |
+
Install additional packages as needed:
|
22 |
|
23 |
+
```sh
|
24 |
+
pip install azureml-mlflow
|
25 |
+
pip install azureml-dataset-runtime
|
26 |
+
pip install azureml-automl-runtime
|
27 |
+
pip install azureml-pipeline
|
28 |
+
pip install azureml-pipeline-steps
|
29 |
+
...
|
30 |
+
```
|
31 |
|
32 |
+
We recommend starting with one of the [quickstarts](tutorials/compute-instance-quickstarts).
|
33 |
+
|
34 |
+
## Contributing
|
35 |
+
|
36 |
+
This repository is a push-only mirror. Pull requests are ignored.
|
37 |
+
|
38 |
+
## Code of Conduct
|
39 |
+
|
40 |
+
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Please see the [code of conduct](CODE_OF_CONDUCT.md) for details.
|
41 |
+
|
42 |
+
## Reference
|
43 |
+
|
44 |
+
- [Documentation](https://docs.microsoft.com/azure/machine-learning)
|
45 |
|
|
Releases.md
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# DirectML Release History <!-- omit in toc -->
|
2 |
+
|
3 |
+
See [DirectML version history on MSDN](https://docs.microsoft.com/windows/win32/direct3d12/dml-version-history) for more detailed notes.
|
4 |
+
|
5 |
+
| Version | Feature level | First available in OS | Redistributable |
|
6 |
+
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------- | ------------------------------------------------------------------------------------------- |
|
7 |
+
| [1.12.0](#directml-1120) | [DML_FEATURE_LEVEL_6_1](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_6_1) | TBD | [Microsoft.AI.DirectML.1.12.0](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.12.0) |
|
8 |
+
| [1.11.0](#directml-1110) | [DML_FEATURE_LEVEL_6_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_6_0) | TBD | [Microsoft.AI.DirectML.1.11.0](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.11.0) |
|
9 |
+
| [1.10.1](#directml-1101) | [DML_FEATURE_LEVEL_5_2](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_5_2) | TBD | [Microsoft.AI.DirectML.1.10.1](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.10.1) |
|
10 |
+
| [1.10.0](#directml-1100) | [DML_FEATURE_LEVEL_5_2](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_5_2) | TBD | [Microsoft.AI.DirectML.1.10.0](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.10.0) |
|
11 |
+
| [1.9.1](#directml-191) | [DML_FEATURE_LEVEL_5_1](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_5_1) | TBD | [Microsoft.AI.DirectML.1.9.1](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.9.1) |
|
12 |
+
| [1.9.0](#directml-190) | [DML_FEATURE_LEVEL_5_1](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_5_1) | TBD | [Microsoft.AI.DirectML.1.9.0](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.9.0) |
|
13 |
+
| [1.8.2](#directml-182) | [DML_FEATURE_LEVEL_5_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_5_0) | TBD | [Microsoft.AI.DirectML.1.8.2](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.8.2) |
|
14 |
+
| [1.8.1](#directml-181) | [DML_FEATURE_LEVEL_5_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_5_0) | TBD | [Microsoft.AI.DirectML.1.8.1](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.8.1) |
|
15 |
+
| [1.8.0](#directml-180) | [DML_FEATURE_LEVEL_5_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_5_0) | Windows 11 2022 Update (Build 22621, 22H2) | [Microsoft.AI.DirectML.1.8.0](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.8.0) |
|
16 |
+
| [1.7.0](#directml-170) | [DML_FEATURE_LEVEL_4_1](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_4_1) | Redistributable only | [Microsoft.AI.DirectML.1.7.0](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.7.0) |
|
17 |
+
| [1.6.0](#directml-160) | [DML_FEATURE_LEVEL_4_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_4_0) | Windows 11 (Build 10.0.22000; 21H2) | [Microsoft.AI.DirectML.1.6.0](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.6.0) |
|
18 |
+
| [1.5.1](#directml-151) | [DML_FEATURE_LEVEL_3_1](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_3_1) | Redistributable only | [Microsoft.AI.DirectML.1.5.1](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.5.1) |
|
19 |
+
| [1.5.0](#directml-150) | [DML_FEATURE_LEVEL_3_1](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_3_1) | Redistributable only | [Microsoft.AI.DirectML.1.5.0](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.5.0) |
|
20 |
+
| [1.4.3](#directml-143) | [DML_FEATURE_LEVEL_3_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_3_0) | Redistributable only | [Microsoft.AI.DirectML.1.4.3](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.4.3) |
|
21 |
+
| [1.4.2](#directml-142) | [DML_FEATURE_LEVEL_3_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_3_0) | Redistributable only | [Microsoft.AI.DirectML.1.4.2](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.4.2) |
|
22 |
+
| [1.4.1](#directml-141) | [DML_FEATURE_LEVEL_3_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_3_0) | Redistributable only | [Microsoft.AI.DirectML.1.4.1](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.4.1) |
|
23 |
+
| [1.4.0](#directml-140) | [DML_FEATURE_LEVEL_3_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_3_0) | Redistributable only | [Microsoft.AI.DirectML.1.4.0](https://www.nuget.org/packages/Microsoft.AI.DirectML/1.4.0) |
|
24 |
+
| [1.1.0](#directml-110) | [DML_FEATURE_LEVEL_2_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_2_0) | Windows 10 May 2020 Update, Version 2004 (Build 10.0.19041, 20H1) | - |
|
25 |
+
| [1.0.0](#directml-100) | [DML_FEATURE_LEVEL_1_0](https://learn.microsoft.com/windows/ai/directml/dml-feature-level-history#dml_feature_level_1_0) | Windows 10 May 2019 Update, Version 1903 (Build 10.0.18362; 19H1) | - |
|
26 |
+
|
27 |
+
# DirectML 1.12.0
|
28 |
+
|
29 |
+
- Introduced DML_FEATURE_LEVEL 6.1
|
30 |
+
- Added DML_OPERATOR_MULTIHEAD_ATTENTION.
|
31 |
+
- Added DML_OPERATOR_ACTIVATION_SOFTMAX and DML_OPERATOR_ACTIVATION_SOFTMAX1 to the list of fusable activations for DML_OPERATOR_GEMM.
|
32 |
+
|
33 |
+
# DirectML 1.11.0
|
34 |
+
|
35 |
+
- Introduced DML_FEATURE_LEVEL 6.0
|
36 |
+
- Added UINT64 and INT64 data type support for DML_OPERATOR_ELEMENT_WISE_DIVIDE, DML_OPERATOR_ELEMENT_WISE_MODULUS_FLOOR, and DML_OPERATOR_ELEMENT_WISE_MODULUS_TRUNCATE.
|
37 |
+
- Added FLOAT16 data type support in ScaleTensor for DML_OPERATOR_ELEMENT_WISE_QUANTIZE_LINEAR
|
38 |
+
- Added FLOAT16 data type support in ScaleTensor and OutputTensor for DML_OPERATOR_ELEMENT_WISE_DEQUANTIZE_LINEAR
|
39 |
+
- Added DML_OPERATOR_ELEMENT_WISE_CLIP operator to the supported fused activation list.
|
40 |
+
- Improved performance of DML_OPERATOR_ACTIVATION_SOFTMAX/1.
|
41 |
+
- Improved performance of DML_OPERATOR_GEMM on certain GPU hardware.
|
42 |
+
- Improved performance of DML_OPERATOR_CONVOLUTION_INTEGER and DML_OPERATOR_QUANTIZED_LINEAR_CONVOLUTION on INT8 tensors.
|
43 |
+
- Fixed bug in DML_OPERATOR_CONVOLUTION_INTEGER and DML_OPERATOR_QUANTIZED_LINEAR_CONVOLUTION on certain GPU hardware.
|
44 |
+
- Fixed crash in DML graph compilation when DML_OPERATOR_ROI_ALIGN_GRAD is used.
|
45 |
+
- Fixed Windows App Certification Kit failure in windows app using DirectML.
|
46 |
+
|
47 |
+
# DirectML 1.10.1
|
48 |
+
|
49 |
+
- Fixed bug in DMLCreateDevice1 that would cause it to incorrectly fail when a minimumFeatureLevel greater than DML_FEATURE_LEVEL_5_0 is supplied.
|
50 |
+
|
51 |
+
# DirectML 1.10.0
|
52 |
+
|
53 |
+
- Introduced DML_FEATURE_LEVEL 5.2:
|
54 |
+
- Expanded supported rank count across certain DML operators.
|
55 |
+
- Enabled DML_OPERATOR_MEAN_VARIANCE_NORMALIZATION/1 to allow optional ScaleTensor regardless of the value of the BiasTensor and vice-versa.
|
56 |
+
- Enabled DMLCreateDevice/1 to use DML_CREATE_DEVICE_FLAG_DEBUG flag even if Direct3D 12 debug layer is not enabled.
|
57 |
+
- Improved DML_OPERATOR_RESAMPLE/1/2 APIs performance significantly.
|
58 |
+
- Improved graph-level layout transformation logic performance significantly.
|
59 |
+
- Fixed DML_OPERATOR_ELEMENT_WISE_MODULUS_TRUNCATE/FLOOR precision issue with non-power of 2 on specific GPUs.
|
60 |
+
- Fixed DML_OPERATOR_CAST casting between 16-bit and 64-bit data types on specific GPUs.
|
61 |
+
- Fixed DML_OPERATOR_ACTIVATION_HARDMAX/1 result for certain OutputTensor stride.
|
62 |
+
|
63 |
+
# DirectML 1.9.1
|
64 |
+
|
65 |
+
- Fixed bug in DML_OPERATOR_ONE_HOT operator when using large uint64 indices.
|
66 |
+
- Improve FP32 convolution performance.
|
67 |
+
- Improve DML_OPERATOR_JOIN operator performance.
|
68 |
+
- Fix bug with unconnected split nodes when executing in DML graph.
|
69 |
+
- Fix identity node optimization when near end of DML graph.
|
70 |
+
|
71 |
+
# DirectML 1.9.0
|
72 |
+
|
73 |
+
- Introduces [DML_FEATURE_LEVEL_5_1](https://docs.microsoft.com/windows/win32/direct3d12/dml-feature-level-history#dml_feature_level_5_1)
|
74 |
+
- Adds 7 new operators:
|
75 |
+
- DML_OPERATOR_ACTIVATION_GELU
|
76 |
+
- DML_OPERATOR_ACTIVATION_SOFTMAX1
|
77 |
+
- DML_OPERATOR_ACTIVATION_LOG_SOFTMAX1
|
78 |
+
- DML_OPERATOR_ACTIVATION_HARDMAX1
|
79 |
+
- DML_OPERATOR_RESAMPLE2
|
80 |
+
- DML_OPERATOR_RESAMPLE_GRAD1
|
81 |
+
- DML_OPERATOR_DIAGONAL_MATRIX1
|
82 |
+
- GRU significant performance boost.
|
83 |
+
- INT8 convolution performance improvement using DP4A HLSL intrinsics.
|
84 |
+
|
85 |
+
# DirectML 1.8.2
|
86 |
+
|
87 |
+
- Fix Linux-specific execution failure in a TensorFlow graph due to bad alignment related to bitscan forward instruction.
|
88 |
+
- Fix incorrect results in 2D convolution with certain combinations of parameters where group count > 1 ([issue](https://github.com/microsoft/DirectML/issues/234)).
|
89 |
+
|
90 |
+
# DirectML 1.8.1
|
91 |
+
|
92 |
+
- Fix telemetry bug that caused slower CPU execution over time with repeated operator creation.
|
93 |
+
|
94 |
+
# DirectML 1.8.0
|
95 |
+
|
96 |
+
- Introduces [DML_FEATURE_LEVEL_5_0](https://docs.microsoft.com/windows/win32/direct3d12/dml-feature-level-history#dml_feature_level_5_0)
|
97 |
+
- Adds 4 new operators:
|
98 |
+
- DML_OPERATOR_ELEMENT_WISE_CLIP1
|
99 |
+
- DML_OPERATOR_ELEMENT_WISE_CLIP_GRAD1
|
100 |
+
- DML_OPERATOR_PADDING1
|
101 |
+
- DML_OPERATOR_ELEMENT_WISE_NEGATE
|
102 |
+
- Supports 64-bit data type for operators: CLIP, CLIP_GRAD, CUMULATIVE_SUMMATION, CUMULATIVE_PRODUCT, ELEMENT_WISE_MAX, ELEMENT_WISE_MIN, REDUCE+REDUCE_FUNCTION_MAX, REDUCE+REDUCE_FUNCTION_MAX, REDUCE+REDUCE_FUNCTION_SUM, REDUCE+REDUCE_FUNCTION_MULTIPLY, REDUCE+REDUCE_FUNCTION_SUM_SQUARE, REDUCE+REDUCE_FUNCTION_L1, PADDING, SPACE_TO_DEPTH, DEPTH_TO_SPACE, TOP_K, ELEMENT_WISE_NEGATE, ELEMENT_WISE_IF, MAX_POOLING, MAX_UNPOOLING, FILL_VALUE_SEQUENCE, REVERSE_SUBSEQUENCES, ROI_ALIGN BatchIndicesTensor.
|
103 |
+
- Bug fixes.
|
104 |
+
|
105 |
+
# DirectML 1.7.0
|
106 |
+
|
107 |
+
- Introduces [DML_FEATURE_LEVEL_4_1](https://docs.microsoft.com/windows/win32/direct3d12/dml-feature-level-history#dml_feature_level_4_1)
|
108 |
+
- Adds 3 new operators:
|
109 |
+
- DML_OPERATOR_ROI_ALIGN_GRAD
|
110 |
+
- DML_OPERATOR_BATCH_NORMALIZATION_TRAINING
|
111 |
+
- DML_OPERATOR_BATCH_NORMALIZATION_TRAINING_GRAD
|
112 |
+
- Supports 64-bit data type for operators: ELEMENT_WISE_IDENTITY, ELEMENT_WISE_ADD, ELEMENT_WISE_SUBTRACT, ELEMENT_WISE_MULTIPLY, ELEMENT_WISE_ABS, ELEMENT_WISE_SIGN, ELEMENT_WISE_LOGICAL_EQUALS, ELEMENT_WISE_LOGICAL_GREATER_THAN, ELEMENT_WISE_LOGICAL_LESS_THAN, ELEMENT_WISE_LOGICAL_GREATER_THAN_OR_EQUAL, ELEMENT_WISE_LOGICAL_LESS_THAN_OR_EQUAL, ELEMENT_WISE_BIT_SHIFT_LEFT, ELEMENT_WISE_BIT_SHIFT_RIGHT, ELEMENT_WISE_BIT_AND, ELEMENT_WISE_BIT_OR, ELEMENT_WISE_BIT_NOT, ELEMENT_WISE_BIT_XOR, ELEMENT_WISE_BIT_COUNT, ARGMIN, ARGMAX, CAST, SLICE, SLICE1, SLICE_GRAD, SPLIT, JOIN, GATHER, GATHER_ELEMENTS, GATHER_ND, GATHER_ND1, SCATTER, SCATTER_ND, FILL_VALUE_CONSTANT, TILE, ONE_HOT
|
113 |
+
- Substantial performance improvements for several operators (especially in training scenarios).
|
114 |
+
- Bug fixes.
|
115 |
+
|
116 |
+
# DirectML 1.6.0
|
117 |
+
|
118 |
+
- Introduces [DML_FEATURE_LEVEL_4_0](https://docs.microsoft.com/windows/win32/direct3d12/dml-feature-level-history#dml_feature_level_4_0)
|
119 |
+
- Adds 3 new operators:
|
120 |
+
- DML_OPERATOR_ELEMENT_WISE_QUANTIZED_LINEAR_ADD
|
121 |
+
- DML_OPERATOR_DYNAMIC_QUANTIZE_LINEAR
|
122 |
+
- DML_OPERATOR_ROI_ALIGN1
|
123 |
+
- Supports 8D tensors for operators: FILL_VALUE_CONSTANT, FILL_VALUE_SEQUENCE, CUMULATIVE_SUMMATION, CUMULATIVE_PRODUCT, REVERSE_SUBSEQUENCES, ACTIVATION_RELU_GRAD, RANDOM_GENERATOR, NONZERO_COORDINATES, ADAM_OPTIMIZER, DYNAMIC_QUANTIZE_LINEAR, ELEMENT_WISE_QUANTIZED_LINEAR_ADD
|
124 |
+
- Substantial performance improvements for several operators.
|
125 |
+
- Bug fixes.
|
126 |
+
|
127 |
+
# DirectML 1.5.1
|
128 |
+
|
129 |
+
- Adds a workaround for a driver issue that affects some Intel devices. For the best performance it is recommended to use the latest drivers.
|
130 |
+
|
131 |
+
# DirectML 1.5.0
|
132 |
+
|
133 |
+
- Introduces a new feature level: [DML_FEATURE_LEVEL_3_1](https://docs.microsoft.com/windows/win32/direct3d12/dml-feature-level-history#dml_feature_level_3_1)
|
134 |
+
- Adds 6 new operators:
|
135 |
+
- DML_OPERATOR_ELEMENT_WISE_ATAN_YX,
|
136 |
+
- DML_OPERATOR_ELEMENT_WISE_CLIP_GRAD,
|
137 |
+
- DML_OPERATOR_ELEMENT_WISE_DIFFERENCE_SQUARE,
|
138 |
+
- DML_OPERATOR_LOCAL_RESPONSE_NORMALIZATION_GRAD,
|
139 |
+
- DML_OPERATOR_CUMULATIVE_PRODUCT,
|
140 |
+
- DML_OPERATOR_BATCH_NORMALIZATION_GRAD,
|
141 |
+
- Supports 8D tensors for operators: ELEMENT_WISE_CLIP_GRAD, ELEMENT_WISE_DIFFERENCE_SQUARE, ELEMENT_WISE_ATAN_YX, CAST, JOIN, PADDING, TILE, TOP_K, BATCH_NORMALIZATION, BATCH_NORMALIZATION_GRAD, LP_NORMALIZATION, TOP_K1, MEAN_VARIANCE_NORMALIZATION1, SLICE_GRAD
|
142 |
+
- Initial support ARM/ARM64 builds of DirectML.
|
143 |
+
- Substantial performance improvements for several operators.
|
144 |
+
- Bug fixes.
|
145 |
+
|
146 |
+
# DirectML 1.4.3
|
147 |
+
|
148 |
+
- Fix perf issue for NHWC layouts of fused activation with Convolution/GEMM/Normalization.
|
149 |
+
|
150 |
+
# DirectML 1.4.2
|
151 |
+
|
152 |
+
- Add PIX markers support to redist to enable profiling graph at operator level.
|
153 |
+
|
154 |
+
# DirectML 1.4.1
|
155 |
+
|
156 |
+
- Bug fixes related to metacomands:
|
157 |
+
- Fix DML_OPERATOR_BATCH_NORMALIZATION crash when the operator is created with DimensionCount > 5.
|
158 |
+
- Fix DML_OPERATOR_MAX_POOLING1/2 binding order for optional output indices tensor. This did not affect the output, but when running with GPU validation enabled, an error would happen "Supplied parameters size doesn't match enumerated size".
|
159 |
+
|
160 |
+
# DirectML 1.4.0
|
161 |
+
|
162 |
+
- First release of DirectML as a redistributable NuGet package, [Microsoft.AI.DirectML](https://www.nuget.org/packages/Microsoft.AI.DirectML).
|
163 |
+
- Introduces two new feature levels since DirectML 1.1.0: [DML_FEATURE_LEVEL_3_0](https://docs.microsoft.com/windows/win32/direct3d12/dml-feature-level-history#dml_feature_level_3_0) and [DML_FEATURE_LEVEL_2_1](https://docs.microsoft.com/windows/win32/direct3d12/dml-feature-level-history#dml_feature_level_2_1).
|
164 |
+
- Adds 44 new operators.
|
165 |
+
- The maximum number of tensor dimensions has been increased from 5 to 8 for operators: ELEMENT_WISE_IDENTITY, ELEMENT_WISE_ABS, ELEMENT_WISE_ACOS, ELEMENT_WISE_ADD, ELEMENT_WISE_ASIN, ELEMENT_WISE_ATAN, ELEMENT_WISE_CEIL, ELEMENT_WISE_CLIP, ELEMENT_WISE_COS, ELEMENT_WISE_DIVIDE, ELEMENT_WISE_EXP, ELEMENT_WISE_FLOOR, ELEMENT_WISE_LOG, ELEMENT_WISE_LOGICAL_AND, ELEMENT_WISE_LOGICAL_EQUALS, ELEMENT_WISE_LOGICAL_GREATER_THAN, ELEMENT_WISE_LOGICAL_LESS_THAN, ELEMENT_WISE_LOGICAL_GREATER_THAN_OR_EQUAL, ELEMENT_WISE_LOGICAL_LESS_THAN_OR_EQUAL, ELEMENT_WISE_LOGICAL_NOT, ELEMENT_WISE_LOGICAL_OR, ELEMENT_WISE_LOGICAL_XOR, ELEMENT_WISE_MAX, ELEMENT_WISE_MEAN, ELEMENT_WISE_MIN, ELEMENT_WISE_MULTIPLY, ELEMENT_WISE_POW, ELEMENT_WISE_CONSTANT_POW, ELEMENT_WISE_RECIP, ELEMENT_WISE_SIN, ELEMENT_WISE_SQRT, ELEMENT_WISE_SUBTRACT, ELEMENT_WISE_TAN, ELEMENT_WISE_THRESHOLD, ELEMENT_WISE_QUANTIZE_LINEAR, ELEMENT_WISE_DEQUANTIZE_LINEAR, ARGMIN, ARGMAX, SLICE, SPLIT, GATHER, ELEMENT_WISE_SIGN, ELEMENT_WISE_IS_NAN, ELEMENT_WISE_ERF, ELEMENT_WISE_SINH, ELEMENT_WISE_COSH, ELEMENT_WISE_TANH, ELEMENT_WISE_ASINH, ELEMENT_WISE_ACOSH, ELEMENT_WISE_ATANH, ELEMENT_WISE_IF, ELEMENT_WISE_ADD1, SCATTER, ONE_HOT, ELEMENT_WISE_BIT_SHIFT_LEFT, ELEMENT_WISE_BIT_SHIFT_RIGHT, ELEMENT_WISE_ROUND, ELEMENT_WISE_IS_INFINITY, ELEMENT_WISE_MODULUS_TRUNCATE, ELEMENT_WISE_MODULUS_FLOOR, GATHER_ELEMENTS, GATHER_ND, SCATTER_ND, SLICE1, ELEMENT_WISE_BIT_AND, ELEMENT_WISE_BIT_OR, ELEMENT_WISE_BIT_XOR, ELEMENT_WISE_BIT_NOT, ELEMENT_WISE_BIT_COUNT, GATHER_ND1
|
166 |
+
- Select operators support additional tensor data types.
|
167 |
+
- Substantial performance improvements for several operators.
|
168 |
+
- Bug fixes.
|
169 |
+
|
170 |
+
# DirectML 1.1.0
|
171 |
+
|
172 |
+
- Introduces a new feature level: [DML_FEATURE_LEVEL_2_0](https://docs.microsoft.com/windows/win32/direct3d12/dml-feature-level-history#dml_feature_level_2_0).
|
173 |
+
- Adds 19 new operators.
|
174 |
+
- When binding an input resource for dispatch of an IDMLOperatorInitializer, it is now legal to provide a resource with D3D12_HEAP_TYPE_CUSTOM (in addition to D3D12_HEAP_TYPE_DEFAULT), as long as appropriate heap properties are also set.
|
175 |
+
- Select operators support 8-bit integer tensors.
|
176 |
+
- 5D activation functions now support the use of strides on their input and output tensors.
|
177 |
+
- Substantial performance improvements for several operators.
|
178 |
+
- Bug fixes.
|
179 |
+
|
180 |
+
# DirectML 1.0.0
|
181 |
+
|
182 |
+
- First release of DirectML
|
SECURITY.md
CHANGED
@@ -1,33 +1,41 @@
|
|
1 |
-
|
2 |
|
3 |
-
##
|
4 |
|
5 |
-
|
6 |
-
Use this section to tell people about which versions of your project are
|
7 |
-
currently being supported with security updates.
|
8 |
|
9 |
-
|
10 |
-
| ------- | ------------------ |
|
11 |
-
| 5.1.x | :white_check_mark: |
|
12 |
-
| 5.0.x | :x: |
|
13 |
-
| 4.0.x | :white_check_mark: |
|
14 |
-
| < 4.0 | :x: |
|
15 |
-
-->
|
16 |
|
17 |
-
|
18 |
|
19 |
-
|
20 |
-
| ------- | ------------------ |
|
21 |
-
| 1.x.x | :white_check_mark: |
|
22 |
|
23 |
-
|
24 |
|
25 |
-
|
26 |
-
Use this section to tell people how to report a vulnerability.
|
27 |
|
28 |
-
|
29 |
-
reported vulnerability, what to expect if the vulnerability is accepted or
|
30 |
-
declined, etc.
|
31 |
-
-->
|
32 |
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.7 BLOCK -->
|
2 |
|
3 |
+
## Security
|
4 |
|
5 |
+
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
|
|
|
|
6 |
|
7 |
+
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
+
## Reporting Security Issues
|
10 |
|
11 |
+
**Please do not report security vulnerabilities through public GitHub issues.**
|
|
|
|
|
12 |
|
13 |
+
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
14 |
|
15 |
+
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
|
|
16 |
|
17 |
+
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
|
|
|
|
|
|
18 |
|
19 |
+
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
20 |
+
|
21 |
+
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
22 |
+
* Full paths of source file(s) related to the manifestation of the issue
|
23 |
+
* The location of the affected source code (tag/branch/commit or direct URL)
|
24 |
+
* Any special configuration required to reproduce the issue
|
25 |
+
* Step-by-step instructions to reproduce the issue
|
26 |
+
* Proof-of-concept or exploit code (if possible)
|
27 |
+
* Impact of the issue, including how an attacker might exploit the issue
|
28 |
+
|
29 |
+
This information will help us triage your report more quickly.
|
30 |
+
|
31 |
+
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
32 |
+
|
33 |
+
## Preferred Languages
|
34 |
+
|
35 |
+
We prefer all communications to be in English.
|
36 |
+
|
37 |
+
## Policy
|
38 |
+
|
39 |
+
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
40 |
+
|
41 |
+
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
USE_POLICY.md
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Llama 2 Acceptable Use Policy
|
2 |
+
|
3 |
+
Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
|
4 |
+
|
5 |
+
## Prohibited Uses
|
6 |
+
We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
|
7 |
+
|
8 |
+
1. Violate the law or others’ rights, including to:
|
9 |
+
1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
|
10 |
+
1. Violence or terrorism
|
11 |
+
2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
|
12 |
+
3. Human trafficking, exploitation, and sexual violence
|
13 |
+
4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
|
14 |
+
5. Sexual solicitation
|
15 |
+
6. Any other criminal activity
|
16 |
+
2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
|
17 |
+
3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
|
18 |
+
4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
|
19 |
+
5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
|
20 |
+
6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
|
21 |
+
7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
|
26 |
+
1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
|
27 |
+
2. Guns and illegal weapons (including weapon development)
|
28 |
+
3. Illegal drugs and regulated/controlled substances
|
29 |
+
4. Operation of critical infrastructure, transportation technologies, or heavy machinery
|
30 |
+
5. Self-harm or harm to others, including suicide, cutting, and eating disorders
|
31 |
+
6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
|
36 |
+
1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
|
37 |
+
2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
|
38 |
+
3. Generating, promoting, or further distributing spam
|
39 |
+
4. Impersonating another individual without consent, authorization, or legal right
|
40 |
+
5. Representing that the use of Llama 2 or outputs are human-generated
|
41 |
+
6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
|
42 |
+
4. Fail to appropriately disclose to end users any known dangers of your AI system
|
43 |
+
|
44 |
+
Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
|
45 |
+
|
46 |
+
* Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
|
47 |
+
* Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
|
48 |
+
* Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
|
49 |
+
* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [LlamaUseReport@meta.com](mailto:LlamaUseReport@meta.com)
|
50 |
+
|
__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""__init__."""
|
cgmanifest.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"$schema": "https://json.schemastore.org/component-detection-manifest.json",
|
3 |
+
"Registrations": [
|
4 |
+
{
|
5 |
+
"component": {
|
6 |
+
"type": "git",
|
7 |
+
"git": {
|
8 |
+
"commitHash": "5f296bbef998e75721818d6b336264ae10f4a77d",
|
9 |
+
"repositoryUrl": "https://github.com/tensorflow/models"
|
10 |
+
}
|
11 |
+
}
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"component": {
|
15 |
+
"type": "git",
|
16 |
+
"git": {
|
17 |
+
"commitHash": "f13555462e835409480829f8fdacac96835161e5",
|
18 |
+
"repositoryUrl": "https://github.com/vonclites/squeezenet"
|
19 |
+
}
|
20 |
+
}
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"component": {
|
24 |
+
"type": "git",
|
25 |
+
"git": {
|
26 |
+
"commitHash": "65294d5dc1794b325db5a37b2ed02773ca5bf839",
|
27 |
+
"repositoryUrl": "https://github.com/zzh8829/yolov3-tf2"
|
28 |
+
}
|
29 |
+
}
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"component": {
|
33 |
+
"type": "git",
|
34 |
+
"git": {
|
35 |
+
"commitHash": "1be31704c9c690929e4f6e6d950f40755ef2dcdc",
|
36 |
+
"repositoryUrl": "https://github.com/ultralytics/yolov3"
|
37 |
+
}
|
38 |
+
}
|
39 |
+
}
|
40 |
+
],
|
41 |
+
"Version": 1
|
42 |
+
}
|
component-governance.yml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
resources:
|
2 |
+
- repo: self
|
3 |
+
clean: true
|
4 |
+
|
5 |
+
pool:
|
6 |
+
vmImage: windows-latest
|
7 |
+
|
8 |
+
trigger:
|
9 |
+
batch: true
|
10 |
+
branches:
|
11 |
+
include:
|
12 |
+
- master
|
13 |
+
|
14 |
+
variables:
|
15 |
+
ComponentDetection.Timeout: 1200
|
16 |
+
|
17 |
+
steps:
|
18 |
+
- task: ComponentGovernanceComponentDetection@0
|
compute_metrics.py
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""Main script for compute metrics."""
|
5 |
+
|
6 |
+
import azureml.evaluate.mlflow as aml_mlflow
|
7 |
+
import os
|
8 |
+
import glob
|
9 |
+
import pandas as pd
|
10 |
+
import numpy as np
|
11 |
+
from azureml.telemetry.activity import log_activity
|
12 |
+
|
13 |
+
import constants
|
14 |
+
from exceptions import (
|
15 |
+
DataLoaderException,
|
16 |
+
DataValidationException,
|
17 |
+
ComputeMetricsException,
|
18 |
+
swallow_all_exceptions,
|
19 |
+
)
|
20 |
+
from error_definitions import (
|
21 |
+
ComputeMetricsInternalError,
|
22 |
+
BadForecastData,
|
23 |
+
InvalidGroundTruthColumnName,
|
24 |
+
InvalidGroundTruthColumnNameData,
|
25 |
+
InvalidPredictionColumnNameData,
|
26 |
+
BadInputData,
|
27 |
+
)
|
28 |
+
from logging_utilities import custom_dimensions, current_run, get_logger, log_traceback
|
29 |
+
from utils import (
|
30 |
+
ArgumentParser,
|
31 |
+
fetch_compute_metrics_args,
|
32 |
+
check_and_return_if_mltable,
|
33 |
+
read_data,
|
34 |
+
evaluate_predictions
|
35 |
+
)
|
36 |
+
from validation import validate_compute_metrics_args
|
37 |
+
from mlflow.models.evaluation.artifacts import JsonEvaluationArtifact
|
38 |
+
from azureml._common._error_definition.azureml_error import AzureMLError
|
39 |
+
|
40 |
+
logger = get_logger(name=__name__)
|
41 |
+
custom_dimensions.app_name = constants.TelemetryConstants.COMPUTE_METRICS_NAME
|
42 |
+
# current_run = TestRun()
|
43 |
+
test_run = current_run.run
|
44 |
+
root_run = current_run.root_run
|
45 |
+
ws = current_run.workspace
|
46 |
+
aml_mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
|
47 |
+
custom_dims_dict = vars(custom_dimensions)
|
48 |
+
|
49 |
+
|
50 |
+
class ComputeMetricsRunner:
|
51 |
+
"""Model Evaluation Runner."""
|
52 |
+
|
53 |
+
def __init__(self,
|
54 |
+
task: str,
|
55 |
+
ground_truth: str,
|
56 |
+
predictions: str,
|
57 |
+
prediction_probabilities: str,
|
58 |
+
output: str,
|
59 |
+
config: dict = None,
|
60 |
+
is_ground_truth_mltable: str = None,
|
61 |
+
is_predictions_mltable: str = None,
|
62 |
+
is_prediction_probabilities_mltable: str = None,
|
63 |
+
ground_truths_column_name: str = None,
|
64 |
+
predictions_column_name: str = constants.PREDICTIONS_COLUMN_NAME):
|
65 |
+
"""__init__.
|
66 |
+
|
67 |
+
Args:
|
68 |
+
task (str): _description_
|
69 |
+
"""
|
70 |
+
self.task = task
|
71 |
+
self.ground_truth = ground_truth
|
72 |
+
self.predictions = predictions
|
73 |
+
self.predictions_probabilities = prediction_probabilities if prediction_probabilities != '' else None
|
74 |
+
self.output = output
|
75 |
+
self.is_ground_truth_mltable = is_ground_truth_mltable
|
76 |
+
self.is_predictions_mltable = is_predictions_mltable
|
77 |
+
self.is_predictions_probabilities_mltable = is_prediction_probabilities_mltable
|
78 |
+
self.ground_truths_column_name = ground_truths_column_name
|
79 |
+
self.predictions_column_name = predictions_column_name
|
80 |
+
|
81 |
+
self.label_column_name, self.prediction_column_name = None, None
|
82 |
+
self.config = config
|
83 |
+
self._is_multilabel = self.config.get("multilabel", False)
|
84 |
+
self._has_multiple_output = self._is_multilabel or self.task == constants.TASK.NER
|
85 |
+
|
86 |
+
def read_multiple_files(self, path):
|
87 |
+
"""Read multiple JSON Lines file from folder.
|
88 |
+
|
89 |
+
Args:
|
90 |
+
path (_type_): _description_
|
91 |
+
|
92 |
+
Raises:
|
93 |
+
DataLoaderException: _description_
|
94 |
+
|
95 |
+
Returns:
|
96 |
+
_type_: _description_
|
97 |
+
"""
|
98 |
+
dfs = []
|
99 |
+
for file_path in glob.glob(os.path.join(path, "**", "*.jsonl"), recursive=True):
|
100 |
+
df = read_data(file_path=file_path, is_mltable=False)
|
101 |
+
df = list(df)[0]
|
102 |
+
dfs.append(df)
|
103 |
+
if not dfs:
|
104 |
+
exception = DataLoaderException._with_error(
|
105 |
+
AzureMLError.create(BadInputData, error="No JSON Lines files found in folder.")
|
106 |
+
)
|
107 |
+
log_traceback(exception, logger)
|
108 |
+
raise exception
|
109 |
+
data = pd.concat(dfs, ignore_index=True)
|
110 |
+
return iter([data])
|
111 |
+
|
112 |
+
def load_data(self):
|
113 |
+
"""Load Test data.
|
114 |
+
|
115 |
+
Returns:
|
116 |
+
_type_: _description_
|
117 |
+
"""
|
118 |
+
if os.path.isdir(self.ground_truth) and not self.is_ground_truth_mltable:
|
119 |
+
ground_truth = self.read_multiple_files(self.ground_truth)
|
120 |
+
else:
|
121 |
+
ground_truth = read_data(self.ground_truth, is_mltable=self.is_ground_truth_mltable)
|
122 |
+
ground_truth = list(ground_truth)[0]
|
123 |
+
ground_truth = filter_ground_truths(ground_truth, self.task, self.ground_truths_column_name)
|
124 |
+
|
125 |
+
if os.path.isdir(self.predictions) and not self.is_predictions_mltable:
|
126 |
+
predictions = self.read_multiple_files(path=self.predictions)
|
127 |
+
else:
|
128 |
+
predictions = read_data(self.predictions, is_mltable=self.is_predictions_mltable)
|
129 |
+
predictions = list(predictions)[0]
|
130 |
+
predictions = filter_predictions(predictions, self.task, self.predictions_column_name)
|
131 |
+
|
132 |
+
predictions_probabilities = None
|
133 |
+
if self.predictions_probabilities is not None:
|
134 |
+
if os.path.isdir(self.predictions_probabilities) and not self.is_predictions_probabilities_mltable:
|
135 |
+
predictions_probabilities = self.read_multiple_files(path=self.predictions_probabilities)
|
136 |
+
else:
|
137 |
+
predictions_probabilities = read_data(self.predictions_probabilities,
|
138 |
+
is_mltable=self.is_predictions_probabilities_mltable)
|
139 |
+
predictions_probabilities = list(predictions_probabilities)[0]
|
140 |
+
self.ground_truth, self.predictions, self.predictions_probabilities = \
|
141 |
+
ground_truth, predictions, predictions_probabilities
|
142 |
+
|
143 |
+
def compute_metrics(self):
|
144 |
+
"""Compute Metrics Mode."""
|
145 |
+
try:
|
146 |
+
ground_true_regressors = None
|
147 |
+
self.rename_columns = {}
|
148 |
+
if self.task == constants.TASK.FORECASTING:
|
149 |
+
ground_truth = self.ground_truth.pop(self.ground_truths_column_name).values
|
150 |
+
ground_true_regressors = self.ground_truth
|
151 |
+
self.ground_truth = ground_truth
|
152 |
+
forecast_origin_column = self.config.get(
|
153 |
+
constants.ForecastingConfigContract.FORECAST_ORIGIN_COLUMN_NAME,
|
154 |
+
constants.ForecastColumns._FORECAST_ORIGIN_COLUMN_DEFAULT)
|
155 |
+
if isinstance(self.predictions, pd.DataFrame):
|
156 |
+
# In rolling forecast scenarios we will need to convert the horizon origins to datetime
|
157 |
+
# and give it a default name. Later we will rename this column back.
|
158 |
+
if forecast_origin_column in ground_true_regressors:
|
159 |
+
ground_true_regressors[forecast_origin_column] = pd.to_datetime(
|
160 |
+
self.predictions[forecast_origin_column], unit='ms')
|
161 |
+
if forecast_origin_column != constants.ForecastColumns._FORECAST_ORIGIN_COLUMN_DEFAULT:
|
162 |
+
ground_true_regressors.rename({
|
163 |
+
forecast_origin_column: constants.ForecastColumns._FORECAST_ORIGIN_COLUMN_DEFAULT},
|
164 |
+
inplace=True, axis=1)
|
165 |
+
self.rename_columns = {
|
166 |
+
constants.ForecastColumns._FORECAST_ORIGIN_COLUMN_DEFAULT: forecast_origin_column}
|
167 |
+
if self.predictions_column_name:
|
168 |
+
self.predictions = self.predictions.pop(self.predictions_column_name)
|
169 |
+
return evaluate_predictions(self.ground_truth, self.predictions, self.predictions_probabilities,
|
170 |
+
self.task, self.config, ground_true_regressors)
|
171 |
+
except Exception as e:
|
172 |
+
exception = ComputeMetricsException._with_error(
|
173 |
+
AzureMLError.create(ComputeMetricsInternalError, error=repr(e)),
|
174 |
+
inner_exception=e
|
175 |
+
)
|
176 |
+
log_traceback(exception, logger)
|
177 |
+
raise exception
|
178 |
+
|
179 |
+
def log_and_write_outputs(self, result):
|
180 |
+
"""Log and Save Outputs."""
|
181 |
+
if result:
|
182 |
+
scalar_metrics = result.metrics
|
183 |
+
logger.info("Computed metrics:")
|
184 |
+
for metrics, value in scalar_metrics.items():
|
185 |
+
formatted = f"{metrics}: {value}"
|
186 |
+
logger.info(formatted)
|
187 |
+
if self.task == constants.TASK.FORECASTING and self.rename_columns \
|
188 |
+
and 'forecast_time_series_id_distribution_table' in result.artifacts:
|
189 |
+
artifact_content = result.artifacts['forecast_time_series_id_distribution_table'].content
|
190 |
+
ts_id_table = pd.DataFrame(artifact_content['data'])
|
191 |
+
ts_id_table.rename(self.rename_columns, axis=1, inplace=True)
|
192 |
+
artifact_content['data'] = ts_id_table.to_dict(orient='recrds')
|
193 |
+
new_table = JsonEvaluationArtifact(
|
194 |
+
uri=result.artifacts['forecast_time_series_id_distribution_table'].uri,
|
195 |
+
content=artifact_content)
|
196 |
+
result.artifacts['forecast_time_series_id_distribution_table'] = new_table
|
197 |
+
result.save(os.path.join(self.output, constants.EVALUATION_RESULTS_PATH))
|
198 |
+
|
199 |
+
|
200 |
+
def filter_ground_truths(data, task_type, column_name=None):
|
201 |
+
"""Read Json file utility function.
|
202 |
+
|
203 |
+
Args:
|
204 |
+
data (_type_): _description_
|
205 |
+
column_name (_type_, optional): _description_. Defaults to None.
|
206 |
+
|
207 |
+
Returns:
|
208 |
+
_type_: _description_
|
209 |
+
"""
|
210 |
+
# for Question-Answering checking for multiple columns in ground truth
|
211 |
+
if task_type == constants.TASK.QnA and column_name:
|
212 |
+
if isinstance(data[data.columns[0]][0], dict) and len(data[data.columns[0]][0].keys()) > 1:
|
213 |
+
try:
|
214 |
+
if isinstance(data, pd.DataFrame):
|
215 |
+
logger.warning("Multiple ground truths are not supported for the \
|
216 |
+
Question and Answering currently.\
|
217 |
+
Considering only the first ground truth in case of multiple values.")
|
218 |
+
data[data.columns[0]] = data[data.columns[0]].apply(
|
219 |
+
lambda x: x[column_name][0] if len(x[column_name]) > 0 else ""
|
220 |
+
)
|
221 |
+
except Exception as e:
|
222 |
+
exception = DataValidationException._with_error(
|
223 |
+
AzureMLError.create(InvalidGroundTruthColumnNameData),
|
224 |
+
inner_exception=e
|
225 |
+
)
|
226 |
+
log_traceback(exception, logger)
|
227 |
+
raise exception
|
228 |
+
if column_name in data.columns:
|
229 |
+
if isinstance(data[column_name].iloc[0], list) or isinstance(data[column_name].iloc[0], np.ndarray):
|
230 |
+
logger.warning("Multiple ground truths are not supported for the Question and Answering currently.\
|
231 |
+
Considering only the first ground truth in case of multiple values.")
|
232 |
+
data[column_name] = data[column_name].apply(lambda x: x[0])
|
233 |
+
if len(data.columns) > 1:
|
234 |
+
if column_name:
|
235 |
+
if column_name in data.columns:
|
236 |
+
data = data[[column_name]]
|
237 |
+
else:
|
238 |
+
exception = DataValidationException._with_error(
|
239 |
+
AzureMLError.create(InvalidGroundTruthColumnNameData)
|
240 |
+
)
|
241 |
+
log_traceback(exception, logger)
|
242 |
+
raise exception
|
243 |
+
else:
|
244 |
+
exception = DataValidationException._with_error(
|
245 |
+
AzureMLError.create(InvalidGroundTruthColumnName)
|
246 |
+
)
|
247 |
+
log_traceback(exception, logger)
|
248 |
+
raise exception
|
249 |
+
|
250 |
+
return data
|
251 |
+
|
252 |
+
|
253 |
+
def filter_predictions(data, task_type, column_name):
|
254 |
+
"""Read Json file utility function.
|
255 |
+
|
256 |
+
Args:
|
257 |
+
data (_type_): _description_
|
258 |
+
column_name (_type_, optional): _description_. Defaults to None.
|
259 |
+
|
260 |
+
Returns:
|
261 |
+
_type_: _description_
|
262 |
+
"""
|
263 |
+
# for Question-Answering checking for multiple columns in ground truth
|
264 |
+
if task_type == constants.TASK.QnA:
|
265 |
+
if isinstance(data[data.columns[0]][0], dict) and len(data[data.columns[0]][0].keys()) > 1:
|
266 |
+
try:
|
267 |
+
if isinstance(data, pd.DataFrame):
|
268 |
+
logger.warning("Multiple predictions are not supported for the \
|
269 |
+
Question and Answering currently.\
|
270 |
+
Considering only the first prediction in case of multiple values.")
|
271 |
+
data[data.columns[0]] = data[data.columns[0]].apply(
|
272 |
+
lambda x: x[column_name][0] if len(x[column_name]) > 0 else ""
|
273 |
+
)
|
274 |
+
except Exception as e:
|
275 |
+
exception = DataValidationException._with_error(
|
276 |
+
AzureMLError.create(InvalidPredictionColumnNameData),
|
277 |
+
inner_exception=e
|
278 |
+
)
|
279 |
+
log_traceback(exception, logger)
|
280 |
+
raise exception
|
281 |
+
if column_name in data.columns:
|
282 |
+
if isinstance(data[column_name].iloc[0], list) or isinstance(data[column_name].iloc[0], np.ndarray):
|
283 |
+
logger.warning("Multiple predictions are not supported for the Question and Answering currently.\
|
284 |
+
Considering only the first prediction in case of multiple values.")
|
285 |
+
data[column_name] = data[column_name].apply(lambda x: x[0])
|
286 |
+
if len(data.columns) > 1:
|
287 |
+
logger.info("Multiple columns found. Picking only prediction column.")
|
288 |
+
if column_name in data.columns:
|
289 |
+
data = data[[column_name]]
|
290 |
+
else:
|
291 |
+
exception = DataValidationException._with_error(
|
292 |
+
AzureMLError.create(InvalidPredictionColumnNameData)
|
293 |
+
)
|
294 |
+
log_traceback(exception, logger)
|
295 |
+
raise exception
|
296 |
+
|
297 |
+
return data
|
298 |
+
|
299 |
+
|
300 |
+
@swallow_all_exceptions(logger)
|
301 |
+
def run():
|
302 |
+
"""Entry point for compute metrics component."""
|
303 |
+
parser = ArgumentParser()
|
304 |
+
# Inputs
|
305 |
+
parser.add_argument("--task", type=str, dest="task", choices=constants.ALL_TASKS)
|
306 |
+
parser.add_argument("--ground_truths", type=str, dest="ground_truths", required=True)
|
307 |
+
parser.add_argument("--ground_truths_column_name", type=str,
|
308 |
+
dest="ground_truths_column_name", required=False, default=None)
|
309 |
+
parser.add_argument("--predictions", type=str, dest="predictions", required=True)
|
310 |
+
parser.add_argument("--predictions_column_name", type=str,
|
311 |
+
dest="predictions_column_name", required=False, default=None)
|
312 |
+
parser.add_argument("--prediction_probabilities", type=str, dest="prediction_probabilities",
|
313 |
+
required=False, default="")
|
314 |
+
parser.add_argument("--config_str", type=str, dest="config_str", required=False, default=None)
|
315 |
+
parser.add_argument("--config-file-name", type=str, dest="config_file_name", required=False, default="")
|
316 |
+
|
317 |
+
# Outputs
|
318 |
+
parser.add_argument("--output", type=str, dest="output")
|
319 |
+
|
320 |
+
args, unknown_args_ = parser.parse_known_args()
|
321 |
+
|
322 |
+
with log_activity(logger, constants.TelemetryConstants.VALIDATION_NAME,
|
323 |
+
custom_dimensions=custom_dims_dict):
|
324 |
+
logger.info("Validating arguments: " + repr(args.__dict__))
|
325 |
+
validate_compute_metrics_args(args)
|
326 |
+
config = fetch_compute_metrics_args(args.config, args.task)
|
327 |
+
|
328 |
+
ground_truths = args.ground_truths
|
329 |
+
is_ground_truths_mltable = check_and_return_if_mltable(ground_truths)
|
330 |
+
predictions = args.predictions
|
331 |
+
is_predictions_mltable = check_and_return_if_mltable(predictions)
|
332 |
+
prediction_probabilities = args.prediction_probabilities
|
333 |
+
is_prediction_probabilities_mltable = check_and_return_if_mltable(
|
334 |
+
args.prediction_probabilities
|
335 |
+
)
|
336 |
+
|
337 |
+
if args.task == constants.TASK.FORECASTING:
|
338 |
+
if not args.ground_truths_column_name:
|
339 |
+
# If the ground true column name was not provided, we will try to take it from the config.
|
340 |
+
args.ground_truths_column_name = config.get('ground_truths_column_name', '')
|
341 |
+
if not args.predictions_column_name:
|
342 |
+
args.predictions_column_name = config.get('predictions_column_name', '')
|
343 |
+
if not args.ground_truths_column_name or (not is_ground_truths_mltable and not args.ground_truths):
|
344 |
+
exception = DataValidationException._with_error(
|
345 |
+
AzureMLError.create(BadForecastData)
|
346 |
+
)
|
347 |
+
log_traceback(exception, logger)
|
348 |
+
raise exception
|
349 |
+
|
350 |
+
runner = ComputeMetricsRunner(
|
351 |
+
task=args.task,
|
352 |
+
ground_truth=ground_truths,
|
353 |
+
predictions=predictions,
|
354 |
+
prediction_probabilities=prediction_probabilities,
|
355 |
+
output=args.output,
|
356 |
+
config=config,
|
357 |
+
is_ground_truth_mltable=is_ground_truths_mltable,
|
358 |
+
is_predictions_mltable=is_predictions_mltable,
|
359 |
+
is_prediction_probabilities_mltable=is_prediction_probabilities_mltable,
|
360 |
+
ground_truths_column_name=args.ground_truths_column_name,
|
361 |
+
predictions_column_name=args.predictions_column_name
|
362 |
+
)
|
363 |
+
|
364 |
+
with log_activity(logger, activity_name=constants.TelemetryConstants.DATA_LOADING,
|
365 |
+
custom_dimensions=custom_dims_dict):
|
366 |
+
try:
|
367 |
+
logger.info("Loading Data.")
|
368 |
+
runner.load_data()
|
369 |
+
except Exception as e:
|
370 |
+
exception = DataLoaderException._with_error(
|
371 |
+
AzureMLError.create(BadInputData, error=repr(e)),
|
372 |
+
inner_exception=e
|
373 |
+
)
|
374 |
+
log_traceback(exception, logger)
|
375 |
+
raise exception
|
376 |
+
|
377 |
+
with log_activity(logger, activity_name=constants.TelemetryConstants.COMPUTE_METRICS_NAME,
|
378 |
+
custom_dimensions=custom_dims_dict):
|
379 |
+
logger.info("Computing metrics.")
|
380 |
+
result = runner.compute_metrics()
|
381 |
+
|
382 |
+
logger.info("Logging and Saving outputs.")
|
383 |
+
runner.log_and_write_outputs(result)
|
384 |
+
|
385 |
+
test_run.add_properties(properties=constants.RUN_PROPERTIES)
|
386 |
+
try:
|
387 |
+
root_run.add_properties(properties=constants.ROOT_RUN_PROPERTIES)
|
388 |
+
except Exception:
|
389 |
+
logger.info("PipelineType is already a property at Root Pipeline Run.")
|
390 |
+
test_run.complete()
|
391 |
+
return
|
392 |
+
|
393 |
+
|
394 |
+
if __name__ == "__main__":
|
395 |
+
run()
|
conda_dependencies.yaml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: llm-auto-prompt
|
2 |
+
channels:
|
3 |
+
- conda-forge
|
4 |
+
dependencies:
|
5 |
+
- python=3.8
|
6 |
+
- pip=21.2.4
|
7 |
+
- pip:
|
8 |
+
- azureml-rag[cognitive_search,data_generation]>=0.1.16
|
9 |
+
- matplotlib>=3.3,<3.4
|
10 |
+
- psutil>=5.8,<5.9
|
11 |
+
- tqdm>=4.59,<4.60
|
12 |
+
- pandas>=1.1,<1.2
|
13 |
+
- scipy>=1.5,<1.6
|
14 |
+
- numpy>=1.22
|
15 |
+
- ipykernel~=6.0
|
16 |
+
- azureml-core==1.52.0
|
17 |
+
- azureml-telemetry==1.52.0
|
18 |
+
- azureml-train-core==1.52.0
|
19 |
+
- azureml-mlflow==1.52.0
|
20 |
+
- scikit-learn==0.24.1
|
21 |
+
- mltable
|
22 |
+
- transformers==4.21.1
|
23 |
+
- torch~=1.11
|
24 |
+
- openai~=0.27.4
|
25 |
+
- azure-ai-ml~=1.8.0
|
26 |
+
- bert_score
|
27 |
+
- seaborn
|
28 |
+
- sentence-transformers
|
29 |
+
- tenacity==8.2.2
|
config.json
CHANGED
@@ -1,52 +1,3 @@
|
|
1 |
{
|
2 |
-
|
3 |
-
|
4 |
-
],
|
5 |
-
"attn_config": {
|
6 |
-
"alibi": true,
|
7 |
-
"alibi_bias_max": 8,
|
8 |
-
"attn_impl": "torch",
|
9 |
-
"attn_pdrop": 0,
|
10 |
-
"attn_type": "multihead_attention",
|
11 |
-
"attn_uses_sequence_id": false,
|
12 |
-
"clip_qkv": null,
|
13 |
-
"prefix_lm": false,
|
14 |
-
"qk_ln": false,
|
15 |
-
"softmax_scale": null
|
16 |
-
},
|
17 |
-
"auto_map": {
|
18 |
-
"AutoConfig": "configuration_mpt.MPTConfig",
|
19 |
-
"AutoModelForCausalLM": "modeling_mpt.MPTForCausalLM"
|
20 |
-
},
|
21 |
-
"d_model": 4096,
|
22 |
-
"emb_pdrop": 0,
|
23 |
-
"embedding_fraction": 1.0,
|
24 |
-
"expansion_ratio": 4,
|
25 |
-
"init_config": {
|
26 |
-
"emb_init_std": null,
|
27 |
-
"emb_init_uniform_lim": null,
|
28 |
-
"fan_mode": "fan_in",
|
29 |
-
"init_div_is_residual": true,
|
30 |
-
"init_gain": 0,
|
31 |
-
"init_nonlinearity": "relu",
|
32 |
-
"init_std": 0.02,
|
33 |
-
"name": "kaiming_normal_",
|
34 |
-
"verbose": 0
|
35 |
-
},
|
36 |
-
"init_device": "cpu",
|
37 |
-
"learned_pos_emb": true,
|
38 |
-
"logit_scale": null,
|
39 |
-
"max_seq_len": 2048,
|
40 |
-
"model_type": "mpt",
|
41 |
-
"n_heads": 32,
|
42 |
-
"n_layers": 32,
|
43 |
-
"no_bias": true,
|
44 |
-
"norm_type": "low_precision_layernorm",
|
45 |
-
"resid_pdrop": 0,
|
46 |
-
"tokenizer_name": "EleutherAI/gpt-neox-20b",
|
47 |
-
"torch_dtype": "bfloat16",
|
48 |
-
"transformers_version": "4.28.1",
|
49 |
-
"use_cache": false,
|
50 |
-
"verbose": 0,
|
51 |
-
"vocab_size": 50432
|
52 |
-
}
|
|
|
1 |
{
|
2 |
+
"model_type": "llama"
|
3 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configuration.ipynb
ADDED
@@ -0,0 +1,389 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"Copyright (c) Microsoft Corporation. All rights reserved.\n",
|
8 |
+
"\n",
|
9 |
+
"Licensed under the MIT License."
|
10 |
+
]
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"cell_type": "markdown",
|
14 |
+
"metadata": {},
|
15 |
+
"source": [
|
16 |
+
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/configuration.png)"
|
17 |
+
]
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"cell_type": "markdown",
|
21 |
+
"metadata": {},
|
22 |
+
"source": [
|
23 |
+
"# Configuration\n",
|
24 |
+
"\n",
|
25 |
+
"_**Setting up your Azure Machine Learning services workspace and configuring your notebook library**_\n",
|
26 |
+
"\n",
|
27 |
+
"---\n",
|
28 |
+
"---\n",
|
29 |
+
"\n",
|
30 |
+
"## Table of Contents\n",
|
31 |
+
"\n",
|
32 |
+
"1. [Introduction](#Introduction)\n",
|
33 |
+
" 1. What is an Azure Machine Learning workspace\n",
|
34 |
+
"1. [Setup](#Setup)\n",
|
35 |
+
" 1. Azure subscription\n",
|
36 |
+
" 1. Azure ML SDK and other library installation\n",
|
37 |
+
" 1. Azure Container Instance registration\n",
|
38 |
+
"1. [Configure your Azure ML Workspace](#Configure%20your%20Azure%20ML%20workspace)\n",
|
39 |
+
" 1. Workspace parameters\n",
|
40 |
+
" 1. Access your workspace\n",
|
41 |
+
" 1. Create a new workspace\n",
|
42 |
+
" 1. Create compute resources\n",
|
43 |
+
"1. [Next steps](#Next%20steps)\n",
|
44 |
+
"\n",
|
45 |
+
"---\n",
|
46 |
+
"\n",
|
47 |
+
"## Introduction\n",
|
48 |
+
"\n",
|
49 |
+
"This notebook configures your library of notebooks to connect to an Azure Machine Learning (ML) workspace. In this case, a library contains all of the notebooks in the current folder and any nested folders. You can configure this notebook library to use an existing workspace or create a new workspace.\n",
|
50 |
+
"\n",
|
51 |
+
"Typically you will need to run this notebook only once per notebook library as all other notebooks will use connection information that is written here. If you want to redirect your notebook library to work with a different workspace, then you should re-run this notebook.\n",
|
52 |
+
"\n",
|
53 |
+
"In this notebook you will\n",
|
54 |
+
"* Learn about getting an Azure subscription\n",
|
55 |
+
"* Specify your workspace parameters\n",
|
56 |
+
"* Access or create your workspace\n",
|
57 |
+
"* Add a default compute cluster for your workspace\n",
|
58 |
+
"\n",
|
59 |
+
"### What is an Azure Machine Learning workspace\n",
|
60 |
+
"\n",
|
61 |
+
"An Azure ML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models."
|
62 |
+
]
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"cell_type": "markdown",
|
66 |
+
"metadata": {},
|
67 |
+
"source": [
|
68 |
+
"## Setup\n",
|
69 |
+
"\n",
|
70 |
+
"This section describes activities required before you can access any Azure ML services functionality."
|
71 |
+
]
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"cell_type": "markdown",
|
75 |
+
"metadata": {},
|
76 |
+
"source": [
|
77 |
+
"### 1. Azure Subscription\n",
|
78 |
+
"\n",
|
79 |
+
"In order to create an Azure ML Workspace, first you need access to an Azure subscription. An Azure subscription allows you to manage storage, compute, and other assets in the Azure cloud. You can [create a new subscription](https://azure.microsoft.com/en-us/free/) or access existing subscription information from the [Azure portal](https://portal.azure.com). Later in this notebook you will need information such as your subscription ID in order to create and access AML workspaces.\n",
|
80 |
+
"\n",
|
81 |
+
"### 2. Azure ML SDK and other library installation\n",
|
82 |
+
"\n",
|
83 |
+
"If you are running in your own environment, follow [SDK installation instructions](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment). If you are running in Azure Notebooks or another Microsoft managed environment, the SDK is already installed.\n",
|
84 |
+
"\n",
|
85 |
+
"Also install following libraries to your environment. Many of the example notebooks depend on them\n",
|
86 |
+
"\n",
|
87 |
+
"```\n",
|
88 |
+
"(myenv) $ conda install -y matplotlib tqdm scikit-learn\n",
|
89 |
+
"```\n",
|
90 |
+
"\n",
|
91 |
+
"Once installation is complete, the following cell checks the Azure ML SDK version:"
|
92 |
+
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"cell_type": "code",
|
96 |
+
"execution_count": null,
|
97 |
+
"metadata": {
|
98 |
+
"tags": [
|
99 |
+
"install"
|
100 |
+
]
|
101 |
+
},
|
102 |
+
"outputs": [],
|
103 |
+
"source": [
|
104 |
+
"import azureml.core\n",
|
105 |
+
"\n",
|
106 |
+
"print(\"This notebook was created using version 1.51.0 of the Azure ML SDK\")\n",
|
107 |
+
"print(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")"
|
108 |
+
]
|
109 |
+
},
|
110 |
+
{
|
111 |
+
"cell_type": "markdown",
|
112 |
+
"metadata": {},
|
113 |
+
"source": [
|
114 |
+
"If you are using an older version of the SDK then this notebook was created using, you should upgrade your SDK.\n",
|
115 |
+
"\n",
|
116 |
+
"### 3. Azure Container Instance registration\n",
|
117 |
+
"Azure Machine Learning uses of [Azure Container Instance (ACI)](https://azure.microsoft.com/services/container-instances) to deploy dev/test web services. An Azure subscription needs to be registered to use ACI. If you or the subscription owner have not yet registered ACI on your subscription, you will need to use the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and execute the following commands. Note that if you ran through the AML [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) you have already registered ACI. \n",
|
118 |
+
"\n",
|
119 |
+
"```shell\n",
|
120 |
+
"# check to see if ACI is already registered\n",
|
121 |
+
"(myenv) $ az provider show -n Microsoft.ContainerInstance -o table\n",
|
122 |
+
"\n",
|
123 |
+
"# if ACI is not registered, run this command.\n",
|
124 |
+
"# note you need to be the subscription owner in order to execute this command successfully.\n",
|
125 |
+
"(myenv) $ az provider register -n Microsoft.ContainerInstance\n",
|
126 |
+
"```\n",
|
127 |
+
"\n",
|
128 |
+
"---"
|
129 |
+
]
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"cell_type": "markdown",
|
133 |
+
"metadata": {},
|
134 |
+
"source": [
|
135 |
+
"## Configure your Azure ML workspace\n",
|
136 |
+
"\n",
|
137 |
+
"### Workspace parameters\n",
|
138 |
+
"\n",
|
139 |
+
"To use an AML Workspace, you will need to import the Azure ML SDK and supply the following information:\n",
|
140 |
+
"* Your subscription id\n",
|
141 |
+
"* A resource group name\n",
|
142 |
+
"* (optional) The region that will host your workspace\n",
|
143 |
+
"* A name for your workspace\n",
|
144 |
+
"\n",
|
145 |
+
"You can get your subscription ID from the [Azure portal](https://portal.azure.com).\n",
|
146 |
+
"\n",
|
147 |
+
"You will also need access to a [_resource group_](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview#resource-groups), which organizes Azure resources and provides a default region for the resources in a group. You can see what resource groups to which you have access, or create a new one in the [Azure portal](https://portal.azure.com). If you don't have a resource group, the create workspace command will create one for you using the name you provide.\n",
|
148 |
+
"\n",
|
149 |
+
"The region to host your workspace will be used if you are creating a new workspace. You do not need to specify this if you are using an existing workspace. You can find the list of supported regions [here](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=machine-learning-service). You should pick a region that is close to your location or that contains your data.\n",
|
150 |
+
"\n",
|
151 |
+
"The name for your workspace is unique within the subscription and should be descriptive enough to discern among other AML Workspaces. The subscription may be used only by you, or it may be used by your department or your entire enterprise, so choose a name that makes sense for your situation.\n",
|
152 |
+
"\n",
|
153 |
+
"The following cell allows you to specify your workspace parameters. This cell uses the python method `os.getenv` to read values from environment variables which is useful for automation. If no environment variable exists, the parameters will be set to the specified default values. \n",
|
154 |
+
"\n",
|
155 |
+
"If you ran the Azure Machine Learning [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) in Azure Notebooks, you already have a configured workspace! You can go to your Azure Machine Learning Getting Started library, view *config.json* file, and copy-paste the values for subscription ID, resource group and workspace name below.\n",
|
156 |
+
"\n",
|
157 |
+
"Replace the default values in the cell below with your workspace parameters"
|
158 |
+
]
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"cell_type": "code",
|
162 |
+
"execution_count": null,
|
163 |
+
"metadata": {},
|
164 |
+
"outputs": [],
|
165 |
+
"source": [
|
166 |
+
"import os\n",
|
167 |
+
"\n",
|
168 |
+
"subscription_id = os.getenv(\"SUBSCRIPTION_ID\", default=\"<my-subscription-id>\")\n",
|
169 |
+
"resource_group = os.getenv(\"RESOURCE_GROUP\", default=\"<my-resource-group>\")\n",
|
170 |
+
"workspace_name = os.getenv(\"WORKSPACE_NAME\", default=\"<my-workspace-name>\")\n",
|
171 |
+
"workspace_region = os.getenv(\"WORKSPACE_REGION\", default=\"eastus2\")"
|
172 |
+
]
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"cell_type": "markdown",
|
176 |
+
"metadata": {},
|
177 |
+
"source": [
|
178 |
+
"### Access your workspace\n",
|
179 |
+
"\n",
|
180 |
+
"The following cell uses the Azure ML SDK to attempt to load the workspace specified by your parameters. If this cell succeeds, your notebook library will be configured to access the workspace from all notebooks using the `Workspace.from_config()` method. The cell can fail if the specified workspace doesn't exist or you don't have permissions to access it. "
|
181 |
+
]
|
182 |
+
},
|
183 |
+
{
|
184 |
+
"cell_type": "code",
|
185 |
+
"execution_count": null,
|
186 |
+
"metadata": {},
|
187 |
+
"outputs": [],
|
188 |
+
"source": [
|
189 |
+
"from azureml.core import Workspace\n",
|
190 |
+
"\n",
|
191 |
+
"try:\n",
|
192 |
+
" ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)\n",
|
193 |
+
" # write the details of the workspace to a configuration file to the notebook library\n",
|
194 |
+
" ws.write_config()\n",
|
195 |
+
" print(\"Workspace configuration succeeded. Skip the workspace creation steps below\")\n",
|
196 |
+
"except:\n",
|
197 |
+
" print(\"Workspace not accessible. Change your parameters or create a new workspace below\")"
|
198 |
+
]
|
199 |
+
},
|
200 |
+
{
|
201 |
+
"cell_type": "markdown",
|
202 |
+
"metadata": {},
|
203 |
+
"source": [
|
204 |
+
"### Create a new workspace\n",
|
205 |
+
"\n",
|
206 |
+
"If you don't have an existing workspace and are the owner of the subscription or resource group, you can create a new workspace. If you don't have a resource group, the create workspace command will create one for you using the name you provide.\n",
|
207 |
+
"\n",
|
208 |
+
"**Note**: As with other Azure services, there are limits on certain resources (for example AmlCompute quota) associated with the Azure ML service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.\n",
|
209 |
+
"\n",
|
210 |
+
"This cell will create an Azure ML workspace for you in a subscription provided you have the correct permissions.\n",
|
211 |
+
"\n",
|
212 |
+
"This will fail if:\n",
|
213 |
+
"* You do not have permission to create a workspace in the resource group\n",
|
214 |
+
"* You do not have permission to create a resource group if it's non-existing.\n",
|
215 |
+
"* You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription\n",
|
216 |
+
"\n",
|
217 |
+
"If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.\n",
|
218 |
+
"\n",
|
219 |
+
"**Note**: A Basic workspace is created by default. If you would like to create an Enterprise workspace, please specify sku = 'enterprise'.\n",
|
220 |
+
"Please visit our [pricing page](https://azure.microsoft.com/en-us/pricing/details/machine-learning/) for more details on our Enterprise edition.\n"
|
221 |
+
]
|
222 |
+
},
|
223 |
+
{
|
224 |
+
"cell_type": "code",
|
225 |
+
"execution_count": null,
|
226 |
+
"metadata": {
|
227 |
+
"tags": [
|
228 |
+
"create workspace"
|
229 |
+
]
|
230 |
+
},
|
231 |
+
"outputs": [],
|
232 |
+
"source": [
|
233 |
+
"from azureml.core import Workspace\n",
|
234 |
+
"\n",
|
235 |
+
"# Create the workspace using the specified parameters\n",
|
236 |
+
"ws = Workspace.create(name = workspace_name,\n",
|
237 |
+
" subscription_id = subscription_id,\n",
|
238 |
+
" resource_group = resource_group, \n",
|
239 |
+
" location = workspace_region,\n",
|
240 |
+
" create_resource_group = True,\n",
|
241 |
+
" sku = 'basic',\n",
|
242 |
+
" exist_ok = True)\n",
|
243 |
+
"ws.get_details()\n",
|
244 |
+
"\n",
|
245 |
+
"# write the details of the workspace to a configuration file to the notebook library\n",
|
246 |
+
"ws.write_config()"
|
247 |
+
]
|
248 |
+
},
|
249 |
+
{
|
250 |
+
"cell_type": "markdown",
|
251 |
+
"metadata": {},
|
252 |
+
"source": [
|
253 |
+
"### Create compute resources for your training experiments\n",
|
254 |
+
"\n",
|
255 |
+
"Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n",
|
256 |
+
"\n",
|
257 |
+
"> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n",
|
258 |
+
"\n",
|
259 |
+
"To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n",
|
260 |
+
"\n",
|
261 |
+
"The cluster parameters are:\n",
|
262 |
+
"* vm_size - this describes the virtual machine type and size used in the cluster. All machines in the cluster are the same type. You can get the list of vm sizes available in your region by using the CLI command\n",
|
263 |
+
"\n",
|
264 |
+
"```shell\n",
|
265 |
+
"az vm list-skus -o tsv\n",
|
266 |
+
"```\n",
|
267 |
+
"* min_nodes - this sets the minimum size of the cluster. If you set the minimum to 0 the cluster will shut down all nodes while not in use. Setting this number to a value higher than 0 will allow for faster start-up times, but you will also be billed when the cluster is not in use.\n",
|
268 |
+
"* max_nodes - this sets the maximum size of the cluster. Setting this to a larger number allows for more concurrency and a greater distributed processing of scale-out jobs.\n",
|
269 |
+
"\n",
|
270 |
+
"\n",
|
271 |
+
"To create a **CPU** cluster now, run the cell below. The autoscale settings mean that the cluster will scale down to 0 nodes when inactive and up to 4 nodes when busy."
|
272 |
+
]
|
273 |
+
},
|
274 |
+
{
|
275 |
+
"cell_type": "code",
|
276 |
+
"execution_count": null,
|
277 |
+
"metadata": {},
|
278 |
+
"outputs": [],
|
279 |
+
"source": [
|
280 |
+
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
281 |
+
"from azureml.core.compute_target import ComputeTargetException\n",
|
282 |
+
"\n",
|
283 |
+
"# Choose a name for your CPU cluster\n",
|
284 |
+
"cpu_cluster_name = \"cpu-cluster\"\n",
|
285 |
+
"\n",
|
286 |
+
"# Verify that cluster does not exist already\n",
|
287 |
+
"try:\n",
|
288 |
+
" cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n",
|
289 |
+
" print(\"Found existing cpu-cluster\")\n",
|
290 |
+
"except ComputeTargetException:\n",
|
291 |
+
" print(\"Creating new cpu-cluster\")\n",
|
292 |
+
" \n",
|
293 |
+
" # Specify the configuration for the new cluster\n",
|
294 |
+
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_D2_V2\",\n",
|
295 |
+
" min_nodes=0,\n",
|
296 |
+
" max_nodes=4)\n",
|
297 |
+
"\n",
|
298 |
+
" # Create the cluster with the specified name and configuration\n",
|
299 |
+
" cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n",
|
300 |
+
" \n",
|
301 |
+
" # Wait for the cluster to complete, show the output log\n",
|
302 |
+
" cpu_cluster.wait_for_completion(show_output=True)"
|
303 |
+
]
|
304 |
+
},
|
305 |
+
{
|
306 |
+
"cell_type": "markdown",
|
307 |
+
"metadata": {},
|
308 |
+
"source": [
|
309 |
+
"To create a **GPU** cluster, run the cell below. Note that your subscription must have sufficient quota for GPU VMs or the command will fail. To increase quota, see [these instructions](https://docs.microsoft.com/en-us/azure/azure-supportability/resource-manager-core-quotas-request). "
|
310 |
+
]
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"cell_type": "code",
|
314 |
+
"execution_count": null,
|
315 |
+
"metadata": {},
|
316 |
+
"outputs": [],
|
317 |
+
"source": [
|
318 |
+
"from azureml.core.compute import ComputeTarget, AmlCompute\n",
|
319 |
+
"from azureml.core.compute_target import ComputeTargetException\n",
|
320 |
+
"\n",
|
321 |
+
"# Choose a name for your GPU cluster\n",
|
322 |
+
"gpu_cluster_name = \"gpu-cluster\"\n",
|
323 |
+
"\n",
|
324 |
+
"# Verify that cluster does not exist already\n",
|
325 |
+
"try:\n",
|
326 |
+
" gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n",
|
327 |
+
" print(\"Found existing gpu cluster\")\n",
|
328 |
+
"except ComputeTargetException:\n",
|
329 |
+
" print(\"Creating new gpu-cluster\")\n",
|
330 |
+
" \n",
|
331 |
+
" # Specify the configuration for the new cluster\n",
|
332 |
+
" compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n",
|
333 |
+
" min_nodes=0,\n",
|
334 |
+
" max_nodes=4)\n",
|
335 |
+
" # Create the cluster with the specified name and configuration\n",
|
336 |
+
" gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n",
|
337 |
+
"\n",
|
338 |
+
" # Wait for the cluster to complete, show the output log\n",
|
339 |
+
" gpu_cluster.wait_for_completion(show_output=True)"
|
340 |
+
]
|
341 |
+
},
|
342 |
+
{
|
343 |
+
"cell_type": "markdown",
|
344 |
+
"metadata": {},
|
345 |
+
"source": [
|
346 |
+
"---\n",
|
347 |
+
"\n",
|
348 |
+
"## Next steps\n",
|
349 |
+
"\n",
|
350 |
+
"In this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.\n",
|
351 |
+
"\n",
|
352 |
+
"If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into \"how-to\" notebooks and start creating and deploying models. A good place to start is the [train within notebook](./how-to-use-azureml/training/train-within-notebook) example that walks through a simplified but complete end to end machine learning process."
|
353 |
+
]
|
354 |
+
},
|
355 |
+
{
|
356 |
+
"cell_type": "code",
|
357 |
+
"execution_count": null,
|
358 |
+
"metadata": {},
|
359 |
+
"outputs": [],
|
360 |
+
"source": []
|
361 |
+
}
|
362 |
+
],
|
363 |
+
"metadata": {
|
364 |
+
"authors": [
|
365 |
+
{
|
366 |
+
"name": "ninhu"
|
367 |
+
}
|
368 |
+
],
|
369 |
+
"kernelspec": {
|
370 |
+
"display_name": "Python 3.8 - AzureML",
|
371 |
+
"language": "python",
|
372 |
+
"name": "python38-azureml"
|
373 |
+
},
|
374 |
+
"language_info": {
|
375 |
+
"codemirror_mode": {
|
376 |
+
"name": "ipython",
|
377 |
+
"version": 3
|
378 |
+
},
|
379 |
+
"file_extension": ".py",
|
380 |
+
"mimetype": "text/x-python",
|
381 |
+
"name": "python",
|
382 |
+
"nbconvert_exporter": "python",
|
383 |
+
"pygments_lexer": "ipython3",
|
384 |
+
"version": "3.6.5"
|
385 |
+
}
|
386 |
+
},
|
387 |
+
"nbformat": 4,
|
388 |
+
"nbformat_minor": 2
|
389 |
+
}
|
configuration.yml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: configuration
|
2 |
+
dependencies:
|
3 |
+
- pip:
|
4 |
+
- azureml-sdk
|
configuration_chatglm.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig
|
2 |
+
|
3 |
+
|
4 |
+
class ChatGLMConfig(PretrainedConfig):
|
5 |
+
model_type = "chatglm"
|
6 |
+
def __init__(
|
7 |
+
self,
|
8 |
+
num_layers=28,
|
9 |
+
padded_vocab_size=65024,
|
10 |
+
hidden_size=4096,
|
11 |
+
ffn_hidden_size=13696,
|
12 |
+
kv_channels=128,
|
13 |
+
num_attention_heads=32,
|
14 |
+
seq_length=2048,
|
15 |
+
hidden_dropout=0.0,
|
16 |
+
attention_dropout=0.0,
|
17 |
+
layernorm_epsilon=1e-5,
|
18 |
+
rmsnorm=True,
|
19 |
+
apply_residual_connection_post_layernorm=False,
|
20 |
+
post_layer_norm=True,
|
21 |
+
add_bias_linear=False,
|
22 |
+
add_qkv_bias=False,
|
23 |
+
bias_dropout_fusion=True,
|
24 |
+
multi_query_attention=False,
|
25 |
+
multi_query_group_num=1,
|
26 |
+
apply_query_key_layer_scaling=True,
|
27 |
+
attention_softmax_in_fp32=True,
|
28 |
+
fp32_residual_connection=False,
|
29 |
+
quantization_bit=0,
|
30 |
+
pre_seq_len=None,
|
31 |
+
prefix_projection=False,
|
32 |
+
**kwargs
|
33 |
+
):
|
34 |
+
self.num_layers = num_layers
|
35 |
+
self.vocab_size = padded_vocab_size
|
36 |
+
self.padded_vocab_size = padded_vocab_size
|
37 |
+
self.hidden_size = hidden_size
|
38 |
+
self.ffn_hidden_size = ffn_hidden_size
|
39 |
+
self.kv_channels = kv_channels
|
40 |
+
self.num_attention_heads = num_attention_heads
|
41 |
+
self.seq_length = seq_length
|
42 |
+
self.hidden_dropout = hidden_dropout
|
43 |
+
self.attention_dropout = attention_dropout
|
44 |
+
self.layernorm_epsilon = layernorm_epsilon
|
45 |
+
self.rmsnorm = rmsnorm
|
46 |
+
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
|
47 |
+
self.post_layer_norm = post_layer_norm
|
48 |
+
self.add_bias_linear = add_bias_linear
|
49 |
+
self.add_qkv_bias = add_qkv_bias
|
50 |
+
self.bias_dropout_fusion = bias_dropout_fusion
|
51 |
+
self.multi_query_attention = multi_query_attention
|
52 |
+
self.multi_query_group_num = multi_query_group_num
|
53 |
+
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
|
54 |
+
self.attention_softmax_in_fp32 = attention_softmax_in_fp32
|
55 |
+
self.fp32_residual_connection = fp32_residual_connection
|
56 |
+
self.quantization_bit = quantization_bit
|
57 |
+
self.pre_seq_len = pre_seq_len
|
58 |
+
self.prefix_projection = prefix_projection
|
59 |
+
super().__init__(**kwargs)
|
constants.py
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""File containing constants for model evaluation script."""
|
5 |
+
|
6 |
+
PREDICTIONS_COLUMN_NAME = "predictions"
|
7 |
+
TRANSFORMER_KEY = "y_transformer"
|
8 |
+
EVALUATION_RESULTS_PATH = "evaluationResult"
|
9 |
+
|
10 |
+
MLTABLE_FILE_NAME = "MLTable"
|
11 |
+
LLM_FT_PREPROCESS_FILENAME = "preprocess_args.json"
|
12 |
+
LLM_FT_TEST_DATA_KEY = "raw_test_data_fname"
|
13 |
+
|
14 |
+
RUN_PROPERTIES = {
|
15 |
+
"showMetricsAtRoot": "true"
|
16 |
+
}
|
17 |
+
|
18 |
+
ROOT_RUN_PROPERTIES = {
|
19 |
+
"PipelineType": "Evaluate"
|
20 |
+
}
|
21 |
+
|
22 |
+
|
23 |
+
class DEVICE:
|
24 |
+
"""Device list."""
|
25 |
+
|
26 |
+
AUTO = "auto"
|
27 |
+
CPU = "cpu"
|
28 |
+
GPU = "gpu"
|
29 |
+
|
30 |
+
|
31 |
+
ALL_DEVICES = [DEVICE.AUTO, DEVICE.CPU, DEVICE.GPU]
|
32 |
+
|
33 |
+
|
34 |
+
class TASK:
|
35 |
+
"""TASK list."""
|
36 |
+
|
37 |
+
CLASSIFICATION = "tabular-classification"
|
38 |
+
CLASSIFICATION_MULTILABEL = "tabular-classification-multilabel"
|
39 |
+
REGRESSION = "tabular-regression"
|
40 |
+
TEXT_CLASSIFICATION = "text-classification"
|
41 |
+
TEXT_CLASSIFICATION_MULTILABEL = "text-classification-multilabel"
|
42 |
+
NER = "text-named-entity-recognition"
|
43 |
+
SUMMARIZATION = "text-summarization"
|
44 |
+
QnA = "question-answering"
|
45 |
+
TRANSLATION = "text-translation"
|
46 |
+
TEXT_GENERATION = "text-generation"
|
47 |
+
FILL_MASK = "fill-mask"
|
48 |
+
IMAGE_CLASSIFICATION = "image-classification"
|
49 |
+
IMAGE_CLASSIFICATION_MULTILABEL = "image-classification-multilabel"
|
50 |
+
IMAGE_OBJECT_DETECTION = "image-object-detection"
|
51 |
+
IMAGE_INSTANCE_SEGMENTATION = "image-instance-segmentation"
|
52 |
+
FORECASTING = "tabular-forecasting"
|
53 |
+
|
54 |
+
|
55 |
+
ALL_TASKS = [
|
56 |
+
TASK.CLASSIFICATION,
|
57 |
+
TASK.CLASSIFICATION_MULTILABEL,
|
58 |
+
TASK.REGRESSION,
|
59 |
+
TASK.TEXT_CLASSIFICATION,
|
60 |
+
TASK.TEXT_CLASSIFICATION_MULTILABEL,
|
61 |
+
TASK.NER,
|
62 |
+
TASK.FORECASTING,
|
63 |
+
TASK.SUMMARIZATION,
|
64 |
+
TASK.QnA,
|
65 |
+
TASK.TRANSLATION,
|
66 |
+
TASK.FILL_MASK,
|
67 |
+
TASK.TEXT_GENERATION,
|
68 |
+
TASK.IMAGE_CLASSIFICATION,
|
69 |
+
TASK.IMAGE_CLASSIFICATION_MULTILABEL,
|
70 |
+
TASK.IMAGE_OBJECT_DETECTION,
|
71 |
+
TASK.IMAGE_INSTANCE_SEGMENTATION
|
72 |
+
]
|
73 |
+
|
74 |
+
MULTILABEL_SET = [
|
75 |
+
TASK.CLASSIFICATION_MULTILABEL,
|
76 |
+
TASK.TEXT_CLASSIFICATION_MULTILABEL,
|
77 |
+
TASK.IMAGE_CLASSIFICATION_MULTILABEL
|
78 |
+
]
|
79 |
+
|
80 |
+
CLASSIFICATION_SET = [
|
81 |
+
TASK.CLASSIFICATION,
|
82 |
+
TASK.CLASSIFICATION_MULTILABEL,
|
83 |
+
TASK.TEXT_CLASSIFICATION,
|
84 |
+
TASK.TEXT_CLASSIFICATION_MULTILABEL,
|
85 |
+
TASK.IMAGE_CLASSIFICATION,
|
86 |
+
TASK.IMAGE_CLASSIFICATION_MULTILABEL
|
87 |
+
]
|
88 |
+
|
89 |
+
MULTIPLE_OUTPUTS_SET = [
|
90 |
+
TASK.CLASSIFICATION_MULTILABEL,
|
91 |
+
TASK.NER,
|
92 |
+
TASK.TEXT_CLASSIFICATION_MULTILABEL,
|
93 |
+
TASK.IMAGE_CLASSIFICATION_MULTILABEL
|
94 |
+
]
|
95 |
+
|
96 |
+
MLFLOW_MODEL_TYPE_MAP = {
|
97 |
+
TASK.CLASSIFICATION: "classifier",
|
98 |
+
TASK.CLASSIFICATION_MULTILABEL: "classifier-multilabel",
|
99 |
+
TASK.REGRESSION: "regressor",
|
100 |
+
TASK.TEXT_CLASSIFICATION: "text-classifier",
|
101 |
+
TASK.TEXT_CLASSIFICATION_MULTILABEL: "classifier-multilabel",
|
102 |
+
TASK.NER: "text-ner",
|
103 |
+
TASK.FORECASTING: "forecasting",
|
104 |
+
TASK.TRANSLATION: "translation",
|
105 |
+
TASK.QnA: "question-answering",
|
106 |
+
TASK.SUMMARIZATION: "summarization",
|
107 |
+
TASK.TEXT_GENERATION: "text-generation",
|
108 |
+
TASK.FILL_MASK: "fill-mask",
|
109 |
+
TASK.IMAGE_CLASSIFICATION: "image-classifier",
|
110 |
+
TASK.IMAGE_CLASSIFICATION_MULTILABEL: "image-classifier-multilabel",
|
111 |
+
TASK.IMAGE_OBJECT_DETECTION: "image-object-detection",
|
112 |
+
TASK.IMAGE_INSTANCE_SEGMENTATION: "image-instance-segmentation"
|
113 |
+
}
|
114 |
+
|
115 |
+
IMAGE_TASKS = [
|
116 |
+
TASK.IMAGE_CLASSIFICATION,
|
117 |
+
TASK.IMAGE_CLASSIFICATION_MULTILABEL,
|
118 |
+
TASK.IMAGE_OBJECT_DETECTION,
|
119 |
+
TASK.IMAGE_INSTANCE_SEGMENTATION
|
120 |
+
]
|
121 |
+
|
122 |
+
|
123 |
+
class TelemetryConstants:
|
124 |
+
"""Telemetry Constants."""
|
125 |
+
|
126 |
+
COMPONENT_NAME = "model_evaluation"
|
127 |
+
|
128 |
+
VALIDATION_NAME = "argument_validation"
|
129 |
+
DATA_LOADING = "loading_data"
|
130 |
+
LOG_AND_SAVE_OUTPUT = "log_and_save_output"
|
131 |
+
|
132 |
+
LOAD_MODEL = "load_model"
|
133 |
+
|
134 |
+
PREDICT_NAME = "predict"
|
135 |
+
MODEL_PREDICTION_NAME = "model_prediction"
|
136 |
+
COMPUTE_METRICS_NAME = "compute_metrics"
|
137 |
+
SCORE_NAME = "score"
|
138 |
+
EVALUATE_MODEL_NAME = "evaluate_model"
|
139 |
+
|
140 |
+
MLFLOW_NAME = "mlflow_evaluate"
|
141 |
+
|
142 |
+
MODEL_EVALUATION_HANDLER_NAME = "ModelEvaluationHandler"
|
143 |
+
LOGGER_NAME = "model_evaluation_component"
|
144 |
+
APP_INSIGHT_HANDLER_NAME = "AppInsightsHandler"
|
145 |
+
|
146 |
+
|
147 |
+
class ExceptionLiterals:
|
148 |
+
"""Exception Constants."""
|
149 |
+
|
150 |
+
MODEL_EVALUATION_TARGET = "AzureML Model Evaluation"
|
151 |
+
DATA_TARGET = "AzureML Model Evaluation Data Validation"
|
152 |
+
DATA_LOADING_TARGET = "AzureML Model Evaluation Data Loading"
|
153 |
+
ARGS_TARGET = "AzureML Model Evaluation Arguments Validation"
|
154 |
+
MODEL_LOADER_TARGET = "AzureML Model Evaluation Model Loading"
|
155 |
+
|
156 |
+
|
157 |
+
class ExceptionTypes:
|
158 |
+
"""AzureML Exception Types."""
|
159 |
+
|
160 |
+
User = "User"
|
161 |
+
System = "System"
|
162 |
+
Service = "Service"
|
163 |
+
Unclassified = "Unclassified"
|
164 |
+
All = {User, System, Service, Unclassified}
|
165 |
+
|
166 |
+
|
167 |
+
class ErrorStrings:
|
168 |
+
"""Error Strings."""
|
169 |
+
|
170 |
+
GenericModelEvaluationError = "Model Evaluation failed due to [{error}]"
|
171 |
+
GenericModelPredictionError = "Model Prediction failed due to [{error}]"
|
172 |
+
GenericComputeMetricsError = "Compute metrics failed due to [{error}]"
|
173 |
+
|
174 |
+
# Arguments related
|
175 |
+
ArgumentParsingError = "Failed to parse input arguments."
|
176 |
+
InvalidTaskType = "Given Task Type [{TaskName}] is not supported. " + \
|
177 |
+
"Please see the list of supported task types:\n" + \
|
178 |
+
"\n".join(ALL_TASKS)
|
179 |
+
InvalidModel = "Either correct Model URI or Mlflow Model should be passed.\n" \
|
180 |
+
"If you have passed Model URI, your Model URI is incorrect."
|
181 |
+
BadModelData = "Model load failed due to error: [{error}]"
|
182 |
+
InvalidTestData = "Test data should be passed."
|
183 |
+
InvalidPredictionsData = "Predictions should be passed."
|
184 |
+
InvalidGroundTruthData = "Ground truth should be passed."
|
185 |
+
InvalidGroundTruthColumnName = "Ground truth column name should be passed since columns in data are > 0."
|
186 |
+
InvalidGroundTruthColumnNameData = "Ground truth column name not found in input data."
|
187 |
+
InvalidPredictionColumnNameData = "Prediction Column name not found in input data."
|
188 |
+
|
189 |
+
# Data Asset related
|
190 |
+
BadLabelColumnName = "No label column found in test data."
|
191 |
+
BadFeatureColumnNames = "input_column_names is not a subset of input test dataset columns.\
|
192 |
+
input_column_names include [{keep_columns}] whereas data has [{data_columns}]"
|
193 |
+
BadInputData = "Failed to load data with error: [{error}]"
|
194 |
+
BadEvaluationConfigFile = "Evaluation Config file failed to load due to [{error}]"
|
195 |
+
BadEvaluationConfigParam = "Evaluation Config Params failed to load due to [{error}]"
|
196 |
+
BadEvaluationConfig = "Evaluation Config failed to load due to [{error}]"
|
197 |
+
|
198 |
+
BadForecastGroundTruthData = "For forecasting tasks, the table needs to be provided " \
|
199 |
+
"in the ground_truths parameter." \
|
200 |
+
"The table must contain time, prediction " \
|
201 |
+
"ground truth and time series IDs columns."
|
202 |
+
BadRegressionColumnType = "Expected target columns of type float found [{y_test_dtype}] instead"
|
203 |
+
|
204 |
+
# Logging Related
|
205 |
+
MetricLoggingError = "Failed to log metric {metric_name} due to [{error}]"
|
206 |
+
|
207 |
+
|
208 |
+
class ForecastingConfigContract:
|
209 |
+
"""Forecasting data contract on forecasting metrics config."""
|
210 |
+
|
211 |
+
TIME_COLUMN_NAME = 'time_column_name'
|
212 |
+
TIME_SERIES_ID_COLUMN_NAMES = 'time_series_id_column_names'
|
213 |
+
FORECAST_ORIGIN_COLUMN_NAME = 'forecast_origin_column_name'
|
214 |
+
|
215 |
+
|
216 |
+
class ForecastColumns:
|
217 |
+
"""The columns, returned in the forecast data frame."""
|
218 |
+
|
219 |
+
_ACTUAL_COLUMN_NAME = '_automl_actual'
|
220 |
+
_FORECAST_COLUMN_NAME = '_automl_forecast'
|
221 |
+
_FORECAST_ORIGIN_COLUMN_DEFAULT = '_automl_forecast_origin'
|
222 |
+
|
223 |
+
|
224 |
+
ALLOWED_PIPELINE_PARAMS = {
|
225 |
+
"tokenizer_config",
|
226 |
+
"generator_config",
|
227 |
+
"model_kwargs",
|
228 |
+
"pipeline_init_args",
|
229 |
+
"trust_remote_code",
|
230 |
+
"source_lang",
|
231 |
+
"target_lang"
|
232 |
+
}
|
download_dependencies.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""Script for downloading and installing mlflow model dependencies."""
|
5 |
+
|
6 |
+
from mlflow.pyfunc import _get_model_dependencies
|
7 |
+
from argparse import ArgumentParser
|
8 |
+
import traceback
|
9 |
+
|
10 |
+
try:
|
11 |
+
from pip import main as pipmain
|
12 |
+
except ImportError:
|
13 |
+
from pip._internal import main as pipmain
|
14 |
+
REQUIREMENTS_FILE = "./requirements.txt"
|
15 |
+
|
16 |
+
|
17 |
+
def main():
|
18 |
+
"""Download and Install mlflow model dependencies."""
|
19 |
+
parser = ArgumentParser()
|
20 |
+
parser.add_argument("--model-uri", type=str, dest="model_uri", required=False, default="")
|
21 |
+
parser.add_argument("--mlflow-model", type=str, dest="mlflow_model", required=False, default=None)
|
22 |
+
|
23 |
+
args = parser.parse_args()
|
24 |
+
|
25 |
+
model_uri = args.model_uri.strip()
|
26 |
+
mlflow_model = args.mlflow_model
|
27 |
+
if mlflow_model:
|
28 |
+
model_uri = mlflow_model
|
29 |
+
|
30 |
+
reqs_file = _get_model_dependencies(model_uri, "pip")
|
31 |
+
|
32 |
+
with open(reqs_file, "r") as f:
|
33 |
+
for line in f.readlines():
|
34 |
+
if line.strip() == "mlflow":
|
35 |
+
continue
|
36 |
+
if "azureml_evaluate_mlflow" in line.strip() or "azureml-evaluate-mlflow" in line.strip():
|
37 |
+
continue
|
38 |
+
if "azureml_metrics" in line.strip() or "azureml-metrics" in line.strip():
|
39 |
+
continue
|
40 |
+
try:
|
41 |
+
pipmain(["install", line.strip()])
|
42 |
+
except Exception:
|
43 |
+
print("Failed to install package", line)
|
44 |
+
print("Traceback:")
|
45 |
+
traceback.print_exc()
|
46 |
+
|
47 |
+
|
48 |
+
if __name__ == "__main__":
|
49 |
+
main()
|
environment_dev.yml
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: argilla
|
2 |
+
|
3 |
+
channels:
|
4 |
+
- conda-forge
|
5 |
+
|
6 |
+
dependencies:
|
7 |
+
- python~=3.8.0
|
8 |
+
- pip>=2.22.0
|
9 |
+
- openjdk=11
|
10 |
+
# pyparsing 3.0.5 seems to be buggy
|
11 |
+
- pyparsing!=3.0.5
|
12 |
+
# tests
|
13 |
+
- pytest
|
14 |
+
- pytest-cov
|
15 |
+
- pytest-mock
|
16 |
+
- pytest-asyncio
|
17 |
+
- factory_boy~=3.2.1
|
18 |
+
# docs, pandoc needs conda ...
|
19 |
+
- pandoc==2.12
|
20 |
+
# we need this to ensure syntax highlighting in the notebook code cells for the docs
|
21 |
+
- ipython<8.0.0
|
22 |
+
# for building the frontend
|
23 |
+
- nodejs~=18.16.0
|
24 |
+
- pip:
|
25 |
+
# docs
|
26 |
+
- sphinx==4.5.0
|
27 |
+
- sphinx-design==0.2.0
|
28 |
+
- furo==2022.6.4.1
|
29 |
+
- myst-parser==0.16.1
|
30 |
+
- nbsphinx==0.8.9
|
31 |
+
- sphinxext.opengraph==0.6.3
|
32 |
+
- sphinx-copybutton==0.5.0
|
33 |
+
- sphinx-notfound-page==0.8.3
|
34 |
+
# code formatting
|
35 |
+
- pre-commit~=2.15.0
|
36 |
+
# extra test dependencies
|
37 |
+
- cleanlab~=2.0.0 # With this version, tests are failing
|
38 |
+
- datasets>1.17.0,!= 2.3.2 # TODO: push_to_hub fails up to 2.3.2, check patches when they come out eventually
|
39 |
+
- huggingface_hub
|
40 |
+
- flair>=0.12.2
|
41 |
+
- faiss-cpu
|
42 |
+
- flyingsquid
|
43 |
+
- pgmpy
|
44 |
+
- plotly>=4.1.0
|
45 |
+
- snorkel>=0.9.7
|
46 |
+
- spacy==3.5.3
|
47 |
+
- https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0.tar.gz
|
48 |
+
- spacy-transformers>=1.2.5
|
49 |
+
- transformers[torch]>=4.19.0
|
50 |
+
- evaluate
|
51 |
+
- seqeval
|
52 |
+
- setfit
|
53 |
+
- span_marker
|
54 |
+
- openai
|
55 |
+
- peft
|
56 |
+
- autotrain-advanced==0.5.2
|
57 |
+
- rich!=13.1.0
|
58 |
+
# install Argilla in editable mode
|
59 |
+
- -e .[server,listeners]
|
error_definitions.py
ADDED
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""Error Definitions."""
|
5 |
+
|
6 |
+
from azureml._common._error_definition import error_decorator
|
7 |
+
from azureml._common._error_definition.user_error import (
|
8 |
+
BadArgument,
|
9 |
+
BadData
|
10 |
+
)
|
11 |
+
from azureml._common._error_definition.system_error import ClientError
|
12 |
+
from constants import ErrorStrings
|
13 |
+
|
14 |
+
|
15 |
+
class ModelEvaluationInternalError(ClientError):
|
16 |
+
"""Model Evaluation Internal Error.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
ClientError (_type_): _description_
|
20 |
+
"""
|
21 |
+
|
22 |
+
@property
|
23 |
+
def message_format(self) -> str:
|
24 |
+
"""Message format."""
|
25 |
+
return ErrorStrings.GenericModelEvaluationError
|
26 |
+
|
27 |
+
|
28 |
+
class ModelPredictionInternalError(ClientError):
|
29 |
+
"""Model Prediction error."""
|
30 |
+
|
31 |
+
@property
|
32 |
+
def message_format(self) -> str:
|
33 |
+
"""Message Format.
|
34 |
+
|
35 |
+
Returns:
|
36 |
+
str: _description_
|
37 |
+
"""
|
38 |
+
return ErrorStrings.GenericModelPredictionError
|
39 |
+
|
40 |
+
|
41 |
+
class ComputeMetricsInternalError(ClientError):
|
42 |
+
"""Compute Metrics error."""
|
43 |
+
|
44 |
+
@property
|
45 |
+
def message_format(self) -> str:
|
46 |
+
"""Message Format.
|
47 |
+
|
48 |
+
Returns:
|
49 |
+
str: _description_
|
50 |
+
"""
|
51 |
+
return ErrorStrings.GenericComputeMetricsError
|
52 |
+
|
53 |
+
|
54 |
+
@error_decorator(use_parent_error_code=True)
|
55 |
+
class InvalidTaskType(BadArgument):
|
56 |
+
"""Task Validation error."""
|
57 |
+
|
58 |
+
@property
|
59 |
+
def message_format(self) -> str:
|
60 |
+
"""Message Format.
|
61 |
+
|
62 |
+
Returns:
|
63 |
+
str: _description_
|
64 |
+
"""
|
65 |
+
return ErrorStrings.InvalidTaskType
|
66 |
+
|
67 |
+
|
68 |
+
@error_decorator(use_parent_error_code=True)
|
69 |
+
class InvalidModel(BadArgument):
|
70 |
+
"""Model Validation error."""
|
71 |
+
|
72 |
+
@property
|
73 |
+
def message_format(self) -> str:
|
74 |
+
"""Message Format.
|
75 |
+
|
76 |
+
Returns:
|
77 |
+
str: _description_
|
78 |
+
"""
|
79 |
+
return ErrorStrings.InvalidModel
|
80 |
+
|
81 |
+
|
82 |
+
@error_decorator(use_parent_error_code=True)
|
83 |
+
class BadModel(BadData):
|
84 |
+
"""Invalid Model Data error."""
|
85 |
+
|
86 |
+
@property
|
87 |
+
def message_format(self) -> str:
|
88 |
+
"""Message Format.
|
89 |
+
|
90 |
+
Returns:
|
91 |
+
str: _description_
|
92 |
+
"""
|
93 |
+
return ErrorStrings.BadModelData
|
94 |
+
|
95 |
+
|
96 |
+
@error_decorator(use_parent_error_code=True)
|
97 |
+
class InvalidTestData(BadArgument):
|
98 |
+
"""Invalid Test Data error."""
|
99 |
+
|
100 |
+
@property
|
101 |
+
def message_format(self) -> str:
|
102 |
+
"""Message Format.
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
str: _description_
|
106 |
+
"""
|
107 |
+
return ErrorStrings.InvalidTestData
|
108 |
+
|
109 |
+
|
110 |
+
@error_decorator(use_parent_error_code=True)
|
111 |
+
class InvalidPredictionsData(BadArgument):
|
112 |
+
"""Invalid Predictions file error."""
|
113 |
+
|
114 |
+
@property
|
115 |
+
def message_format(self) -> str:
|
116 |
+
"""Message Format.
|
117 |
+
|
118 |
+
Returns:
|
119 |
+
str: _description_
|
120 |
+
"""
|
121 |
+
return ErrorStrings.InvalidPredictionsData
|
122 |
+
|
123 |
+
|
124 |
+
@error_decorator(use_parent_error_code=True)
|
125 |
+
class InvalidPredictionColumnNameData(BadArgument):
|
126 |
+
"""Invalid Prediction Column Name data error."""
|
127 |
+
|
128 |
+
@property
|
129 |
+
def message_format(self) -> str:
|
130 |
+
"""Message Format.
|
131 |
+
|
132 |
+
Returns:
|
133 |
+
str: _description_
|
134 |
+
"""
|
135 |
+
return ErrorStrings.InvalidPredictionColumnNameData
|
136 |
+
|
137 |
+
|
138 |
+
@error_decorator(use_parent_error_code=True)
|
139 |
+
class InvalidGroundTruthData(BadArgument):
|
140 |
+
"""Invalid Ground Truth data error."""
|
141 |
+
|
142 |
+
@property
|
143 |
+
def message_format(self) -> str:
|
144 |
+
"""Message Format.
|
145 |
+
|
146 |
+
Returns:
|
147 |
+
str: _description_
|
148 |
+
"""
|
149 |
+
return ErrorStrings.InvalidGroundTruthData
|
150 |
+
|
151 |
+
|
152 |
+
@error_decorator(use_parent_error_code=True)
|
153 |
+
class ArgumentParsingError(BadArgument):
|
154 |
+
"""Argument Parsing Error error."""
|
155 |
+
|
156 |
+
@property
|
157 |
+
def message_format(self) -> str:
|
158 |
+
"""Message Format.
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
str: _description_
|
162 |
+
"""
|
163 |
+
return ErrorStrings.ArgumentParsingError
|
164 |
+
|
165 |
+
|
166 |
+
@error_decorator(use_parent_error_code=True)
|
167 |
+
class InvalidGroundTruthColumnNameData(BadArgument):
|
168 |
+
"""Invalid Ground Truth Column Name data error."""
|
169 |
+
|
170 |
+
@property
|
171 |
+
def message_format(self) -> str:
|
172 |
+
"""Message Format.
|
173 |
+
|
174 |
+
Returns:
|
175 |
+
str: _description_
|
176 |
+
"""
|
177 |
+
return ErrorStrings.InvalidGroundTruthColumnNameData
|
178 |
+
|
179 |
+
|
180 |
+
@error_decorator(use_parent_error_code=True)
|
181 |
+
class InvalidGroundTruthColumnName(BadArgument):
|
182 |
+
"""Ground Truth Column Name should be passed."""
|
183 |
+
|
184 |
+
@property
|
185 |
+
def message_format(self) -> str:
|
186 |
+
"""Message Format.
|
187 |
+
|
188 |
+
Returns:
|
189 |
+
str: _description_
|
190 |
+
"""
|
191 |
+
return ErrorStrings.InvalidGroundTruthColumnName
|
192 |
+
|
193 |
+
|
194 |
+
@error_decorator(use_parent_error_code=True)
|
195 |
+
class BadInputData(BadData):
|
196 |
+
"""Bad Input Data error."""
|
197 |
+
|
198 |
+
@property
|
199 |
+
def message_format(self) -> str:
|
200 |
+
"""Message Format.
|
201 |
+
|
202 |
+
Returns:
|
203 |
+
str: _description_
|
204 |
+
"""
|
205 |
+
return ErrorStrings.BadInputData
|
206 |
+
|
207 |
+
|
208 |
+
@error_decorator(use_parent_error_code=True)
|
209 |
+
class BadLabelColumnData(BadData):
|
210 |
+
"""Bad Label Column Data error."""
|
211 |
+
|
212 |
+
@property
|
213 |
+
def message_format(self) -> str:
|
214 |
+
"""Message Format.
|
215 |
+
|
216 |
+
Returns:
|
217 |
+
str: _description_
|
218 |
+
"""
|
219 |
+
return ErrorStrings.BadLabelColumnName
|
220 |
+
|
221 |
+
|
222 |
+
@error_decorator(use_parent_error_code=True)
|
223 |
+
class BadFeatureColumnData(BadData):
|
224 |
+
"""Bad Feature Data error."""
|
225 |
+
|
226 |
+
@property
|
227 |
+
def message_format(self) -> str:
|
228 |
+
"""Message Format.
|
229 |
+
|
230 |
+
Returns:
|
231 |
+
str: _description_
|
232 |
+
"""
|
233 |
+
return ErrorStrings.BadFeatureColumnNames
|
234 |
+
|
235 |
+
|
236 |
+
@error_decorator(use_parent_error_code=True)
|
237 |
+
class BadEvaluationConfigFile(BadData):
|
238 |
+
"""Bad Evaluation Config file."""
|
239 |
+
|
240 |
+
@property
|
241 |
+
def message_format(self) -> str:
|
242 |
+
"""Message Format.
|
243 |
+
|
244 |
+
Returns:
|
245 |
+
str: _description_
|
246 |
+
"""
|
247 |
+
return ErrorStrings.BadEvaluationConfigFile
|
248 |
+
|
249 |
+
|
250 |
+
@error_decorator(use_parent_error_code=True)
|
251 |
+
class BadEvaluationConfigParam(BadData):
|
252 |
+
"""Bad Evaluation Config param data."""
|
253 |
+
|
254 |
+
@property
|
255 |
+
def message_format(self) -> str:
|
256 |
+
"""Message Format.
|
257 |
+
|
258 |
+
Returns:
|
259 |
+
str: _description_
|
260 |
+
"""
|
261 |
+
return ErrorStrings.BadEvaluationConfigParam
|
262 |
+
|
263 |
+
|
264 |
+
@error_decorator(use_parent_error_code=True)
|
265 |
+
class BadEvaluationConfig(BadData):
|
266 |
+
"""Bad Evaluation Config data."""
|
267 |
+
|
268 |
+
@property
|
269 |
+
def message_format(self) -> str:
|
270 |
+
"""Message Format.
|
271 |
+
|
272 |
+
Returns:
|
273 |
+
str: _description_
|
274 |
+
"""
|
275 |
+
return ErrorStrings.BadEvaluationConfig
|
276 |
+
|
277 |
+
|
278 |
+
@error_decorator(use_parent_error_code=True)
|
279 |
+
class BadForecastData(BadInputData):
|
280 |
+
"""Bad Forecasting Data passed."""
|
281 |
+
|
282 |
+
@property
|
283 |
+
def message_format(self) -> str:
|
284 |
+
"""Message Format.
|
285 |
+
|
286 |
+
Returns:
|
287 |
+
str: _description_
|
288 |
+
"""
|
289 |
+
return ErrorStrings.BadForecastGroundTruthData
|
290 |
+
|
291 |
+
|
292 |
+
@error_decorator(use_parent_error_code=True)
|
293 |
+
class BadRegressionData(BadInputData):
|
294 |
+
"""Bad Regression Column type."""
|
295 |
+
|
296 |
+
@property
|
297 |
+
def message_format(self) -> str:
|
298 |
+
"""Message Format.
|
299 |
+
|
300 |
+
Returns:
|
301 |
+
str: _description_
|
302 |
+
"""
|
303 |
+
return ErrorStrings.BadRegressionColumnType
|
304 |
+
|
305 |
+
|
306 |
+
class MetricsLoggingError(ClientError):
|
307 |
+
"""Metrics Logging Failure error."""
|
308 |
+
|
309 |
+
@property
|
310 |
+
def message_format(self) -> str:
|
311 |
+
"""Message Format.
|
312 |
+
|
313 |
+
Returns:
|
314 |
+
str: _description_
|
315 |
+
"""
|
316 |
+
return ErrorStrings.MetricLoggingError
|
evaluate_model.py
ADDED
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""Entry script for Model Evaluation Component."""
|
5 |
+
|
6 |
+
import azureml.evaluate.mlflow as aml_mlflow
|
7 |
+
import os
|
8 |
+
import constants
|
9 |
+
import torch
|
10 |
+
from mlflow.models import Model
|
11 |
+
from itertools import repeat
|
12 |
+
|
13 |
+
from image_constants import ImageDataFrameParams, SettingLiterals as ImageSettingLiterals
|
14 |
+
from exceptions import (
|
15 |
+
ScoringException,
|
16 |
+
DataLoaderException,
|
17 |
+
ModelValidationException,
|
18 |
+
swallow_all_exceptions
|
19 |
+
)
|
20 |
+
from error_definitions import (
|
21 |
+
ModelEvaluationInternalError,
|
22 |
+
BadInputData,
|
23 |
+
BadModel
|
24 |
+
)
|
25 |
+
from azureml._common._error_definition.azureml_error import AzureMLError
|
26 |
+
from logging_utilities import custom_dimensions, current_run, get_logger, log_traceback
|
27 |
+
from azureml.telemetry.activity import log_activity
|
28 |
+
from utils import (
|
29 |
+
ArgumentParser,
|
30 |
+
check_and_return_if_mltable,
|
31 |
+
read_data,
|
32 |
+
prepare_data,
|
33 |
+
get_predictor,
|
34 |
+
filter_pipeline_params,
|
35 |
+
fetch_compute_metrics_args
|
36 |
+
)
|
37 |
+
from validation import _validate, validate_args, validate_Xy
|
38 |
+
|
39 |
+
logger = get_logger(name=__name__)
|
40 |
+
custom_dimensions.app_name = constants.TelemetryConstants.EVALUATE_MODEL_NAME
|
41 |
+
# current_run = TestRun()
|
42 |
+
test_run = current_run.run
|
43 |
+
root_run = current_run.root_run
|
44 |
+
ws = current_run.workspace
|
45 |
+
aml_mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
|
46 |
+
custom_dims_dict = vars(custom_dimensions)
|
47 |
+
|
48 |
+
|
49 |
+
class EvaluateModel:
|
50 |
+
"""EvaluateModel object."""
|
51 |
+
|
52 |
+
def __init__(self,
|
53 |
+
task: str,
|
54 |
+
model_uri: str,
|
55 |
+
output: str,
|
56 |
+
device: str = constants.DEVICE.AUTO,
|
57 |
+
config: dict = None,
|
58 |
+
batch_size: int = 1) -> None:
|
59 |
+
"""
|
60 |
+
Evaluate Model Object.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
task: str
|
64 |
+
model_uri: str
|
65 |
+
output: str
|
66 |
+
device: str
|
67 |
+
config: dict,
|
68 |
+
batch_size: int
|
69 |
+
"""
|
70 |
+
self.task = task
|
71 |
+
self.model_uri = model_uri
|
72 |
+
self.output = output
|
73 |
+
self.multilabel = task in constants.MULTILABEL_SET
|
74 |
+
self._has_multiple_output = task in constants.MULTIPLE_OUTPUTS_SET
|
75 |
+
self.batch_size = batch_size
|
76 |
+
self.masks_required = True if task == constants.TASK.IMAGE_INSTANCE_SEGMENTATION else False
|
77 |
+
self.config = config
|
78 |
+
|
79 |
+
self.current_device = device
|
80 |
+
self.device = device
|
81 |
+
if device == constants.DEVICE.CPU:
|
82 |
+
self.device = -1
|
83 |
+
elif device == constants.DEVICE.GPU:
|
84 |
+
if torch.cuda.is_available():
|
85 |
+
self.device = torch.cuda.current_device()
|
86 |
+
else:
|
87 |
+
logger.warning("No GPU found. Using CPU instead.")
|
88 |
+
self.device = -1
|
89 |
+
logger.info("Logging to check metrics config in evaluate_model: " + str(self.config))
|
90 |
+
|
91 |
+
def _ensure_model_on_cpu(self):
|
92 |
+
"""Ensure model is on cpu.
|
93 |
+
|
94 |
+
Args:
|
95 |
+
model (_type_): _description_
|
96 |
+
"""
|
97 |
+
if self.is_hf:
|
98 |
+
if hasattr(self.model._model_impl, "hf_model"):
|
99 |
+
self.model._model_impl.hf_model = self.model._model_impl.hf_model.cpu()
|
100 |
+
else:
|
101 |
+
logger.warning("hf_model not found in mlflow model")
|
102 |
+
elif self.is_torch:
|
103 |
+
import torch
|
104 |
+
if isinstance(self.model, torch.nn.Module):
|
105 |
+
self.model = self.model.cpu()
|
106 |
+
elif hasattr(self.model, "_model_impl") and isinstance(self.model._model_impl, torch.nn.Module):
|
107 |
+
self.model._model_impl = self.model._model_impl.cpu()
|
108 |
+
else:
|
109 |
+
logger.warning("Torch model is not of type nn.Module")
|
110 |
+
|
111 |
+
def handle_device_failure(self):
|
112 |
+
"""Handle device failure."""
|
113 |
+
if self.device == constants.DEVICE.AUTO and torch.cuda.is_available():
|
114 |
+
try:
|
115 |
+
cuda_current_device = torch.cuda.current_device()
|
116 |
+
logger.info("Loading model and prediction with cuda current device ")
|
117 |
+
if self.current_device != cuda_current_device:
|
118 |
+
logger.info(
|
119 |
+
f"Current Device: {self.current_device} does not match expected device {cuda_current_device}")
|
120 |
+
# self.model = load_model(self.model_uri, cuda_current_device, self.task_type)
|
121 |
+
self.current_device = cuda_current_device
|
122 |
+
# self.device = self.current_device
|
123 |
+
except Exception as e:
|
124 |
+
logger.info("Failed on GPU with error: " + repr(e))
|
125 |
+
if self.device != -1:
|
126 |
+
logger.warning("Predict failed on GPU. Falling back to CPU")
|
127 |
+
try:
|
128 |
+
logger.info("Loading model and prediction with cuda current device. Trying CPU ")
|
129 |
+
if self.current_device != -1:
|
130 |
+
self.current_device = -1
|
131 |
+
# self._ensure_model_on_cpu()
|
132 |
+
self.device = -1
|
133 |
+
except Exception as e:
|
134 |
+
logger.info("Failed on CPU with error: " + repr(e))
|
135 |
+
raise e
|
136 |
+
curr_model = Model.load(self.model_uri).flavors
|
137 |
+
aml_args = {
|
138 |
+
"model_hf_load_kwargs": curr_model.get("model_hf_load_kwargs", {})
|
139 |
+
}
|
140 |
+
if self.device == constants.DEVICE.AUTO:
|
141 |
+
aml_args["model_hf_load_kwargs"]["device_map"] = constants.DEVICE.AUTO
|
142 |
+
else:
|
143 |
+
aml_args["model_hf_load_kwargs"]["device_map"] = "eval_na"
|
144 |
+
|
145 |
+
self.model = aml_mlflow.aml.load_model(self.model_uri, constants.MLFLOW_MODEL_TYPE_MAP[self.task], **aml_args)
|
146 |
+
|
147 |
+
def _validate_schema(self, X_test):
|
148 |
+
with log_activity(logger, constants.TelemetryConstants.LOAD_MODEL,
|
149 |
+
custom_dimensions=custom_dims_dict):
|
150 |
+
try:
|
151 |
+
self.handle_device_failure() # Handling device failure
|
152 |
+
predictor_cls = get_predictor(self.task)
|
153 |
+
predictor = predictor_cls(self.model_uri, self.task, self.device)
|
154 |
+
logger.info(f"model loaded, Device: {getattr(predictor.model, 'device', 'not present')}")
|
155 |
+
except Exception as e:
|
156 |
+
exception = ModelValidationException._with_error(
|
157 |
+
AzureMLError.create(BadModel, error=repr(e)),
|
158 |
+
inner_exception=e
|
159 |
+
)
|
160 |
+
log_traceback(exception, logger)
|
161 |
+
raise exception
|
162 |
+
|
163 |
+
def load_data(self, test_data, label_column_name, input_column_names=None, is_mltable=True):
|
164 |
+
"""
|
165 |
+
Load data in required format.
|
166 |
+
|
167 |
+
Args:
|
168 |
+
test_data: jsonlines
|
169 |
+
label_column_name: str
|
170 |
+
input_column_names: list
|
171 |
+
is_mltable: boolean
|
172 |
+
|
173 |
+
Returns: Dataframe
|
174 |
+
|
175 |
+
"""
|
176 |
+
if self.task in constants.IMAGE_TASKS:
|
177 |
+
from image_dataset import get_image_dataset
|
178 |
+
df = get_image_dataset(task_type=self.task, test_mltable=test_data)
|
179 |
+
data = iter([df])
|
180 |
+
input_column_names = [ImageDataFrameParams.IMAGE_COLUMN_NAME]
|
181 |
+
label_column_name = ImageDataFrameParams.LABEL_COLUMN_NAME
|
182 |
+
if self.task in [constants.TASK.IMAGE_OBJECT_DETECTION,
|
183 |
+
constants.TASK.IMAGE_INSTANCE_SEGMENTATION]:
|
184 |
+
input_column_names.append(ImageDataFrameParams.IMAGE_META_INFO)
|
185 |
+
else:
|
186 |
+
data = read_data(test_data, is_mltable)
|
187 |
+
data = map(_validate, data, repeat(input_column_names), repeat(label_column_name))
|
188 |
+
data = map(prepare_data, data, repeat(self.task), repeat(label_column_name), repeat(self._has_multiple_output))
|
189 |
+
return data # X_test, y_test
|
190 |
+
|
191 |
+
def score(self, data, label_column_name):
|
192 |
+
"""
|
193 |
+
Evaluate model.
|
194 |
+
|
195 |
+
Args:
|
196 |
+
data: DataFrame
|
197 |
+
label_column_name: str
|
198 |
+
input_column_names: list
|
199 |
+
is_mltable: boolean
|
200 |
+
|
201 |
+
Returns: None
|
202 |
+
|
203 |
+
"""
|
204 |
+
# No batching support for evaluate model component. Length of data is always 1.
|
205 |
+
X_test, y_test = list(data)[0]
|
206 |
+
|
207 |
+
validate_Xy(X_test, y_test)
|
208 |
+
self._validate_schema(X_test)
|
209 |
+
|
210 |
+
with log_activity(logger, constants.TelemetryConstants.MLFLOW_NAME, custom_dimensions=custom_dims_dict):
|
211 |
+
feature_names = X_test.columns
|
212 |
+
|
213 |
+
eval_data = X_test
|
214 |
+
eval_data[label_column_name] = y_test
|
215 |
+
targets = label_column_name
|
216 |
+
self.config.update(
|
217 |
+
{
|
218 |
+
"log_activity": log_activity,
|
219 |
+
# "log_traceback": log_traceback,
|
220 |
+
"custom_dimensions": custom_dims_dict,
|
221 |
+
"output": self.output,
|
222 |
+
"device": self.device,
|
223 |
+
"multi_label": self.multilabel,
|
224 |
+
"batch_size": self.batch_size,
|
225 |
+
ImageSettingLiterals.MASKS_REQUIRED: self.masks_required,
|
226 |
+
# Image ML classification, identifies task as "multilabel" in azureml-evaluate-mlflow package
|
227 |
+
"multilabel": self.multilabel,
|
228 |
+
}
|
229 |
+
)
|
230 |
+
result = None
|
231 |
+
try:
|
232 |
+
# print(self.config)
|
233 |
+
try:
|
234 |
+
dataset_name = test_run.experiment.name
|
235 |
+
except Exception:
|
236 |
+
dataset_name = "test_run.experiment.name"
|
237 |
+
|
238 |
+
result = aml_mlflow.evaluate(
|
239 |
+
self.model,
|
240 |
+
eval_data,
|
241 |
+
targets=targets,
|
242 |
+
feature_names=list(feature_names),
|
243 |
+
model_type=constants.MLFLOW_MODEL_TYPE_MAP[self.task],
|
244 |
+
dataset_name=dataset_name,
|
245 |
+
evaluators=["azureml"],
|
246 |
+
evaluator_config={"azureml": self.config},
|
247 |
+
)
|
248 |
+
except RuntimeError:
|
249 |
+
self.handle_device_failure()
|
250 |
+
|
251 |
+
except Exception as e:
|
252 |
+
message = f"mlflow.evaluate failed with {repr(e)}"
|
253 |
+
exception = ScoringException._with_error(
|
254 |
+
AzureMLError.create(ModelEvaluationInternalError, error=repr(e)),
|
255 |
+
inner_exception=e
|
256 |
+
)
|
257 |
+
log_traceback(exception, logger, message, True)
|
258 |
+
raise exception
|
259 |
+
return result
|
260 |
+
|
261 |
+
def log_and_write_outputs(self, result):
|
262 |
+
"""Log and Save Outputs."""
|
263 |
+
if result is not None:
|
264 |
+
scalar_metrics = result.metrics
|
265 |
+
logger.info("Computed metrics:")
|
266 |
+
for metrics, value in scalar_metrics.items():
|
267 |
+
formatted = f"{metrics}: {value}"
|
268 |
+
logger.info(formatted)
|
269 |
+
|
270 |
+
result.save(os.path.join(self.output, constants.EVALUATION_RESULTS_PATH))
|
271 |
+
|
272 |
+
|
273 |
+
@swallow_all_exceptions(logger)
|
274 |
+
def run():
|
275 |
+
"""Entry function of model_test script."""
|
276 |
+
parser = ArgumentParser()
|
277 |
+
# Inputs
|
278 |
+
parser.add_argument("--task", type=str, dest="task", required=True, choices=constants.ALL_TASKS)
|
279 |
+
parser.add_argument("--data", type=str, dest="data", required=True, default=None)
|
280 |
+
parser.add_argument("--config-file-name", type=str, dest="config_file_name", required=False, default=None)
|
281 |
+
parser.add_argument("--label-column-name", type=str, dest="label_column_name", required=True)
|
282 |
+
parser.add_argument("--input-column-names",
|
283 |
+
type=lambda x: [i.strip() for i in x.split(",") if i and not i.isspace()],
|
284 |
+
dest="input_column_names", required=False, default=None)
|
285 |
+
parser.add_argument("--mlflow-model", type=str, dest="mlflow_model", required=True, default=None)
|
286 |
+
parser.add_argument("--model-uri", type=str, dest="model_uri", required=False, default="")
|
287 |
+
parser.add_argument("--device", type=str, dest="device", required=True, choices=constants.ALL_DEVICES,
|
288 |
+
default=constants.DEVICE.AUTO)
|
289 |
+
parser.add_argument("--batch-size", type=int, dest="batch_size", required=False, default=1)
|
290 |
+
parser.add_argument("--config_str", type=str, dest="config_str", required=False, default=None)
|
291 |
+
|
292 |
+
# Outputs
|
293 |
+
parser.add_argument("--output", type=str, dest="output")
|
294 |
+
|
295 |
+
args, unknown_args_ = parser.parse_known_args()
|
296 |
+
|
297 |
+
with log_activity(logger, constants.TelemetryConstants.VALIDATION_NAME,
|
298 |
+
custom_dimensions=custom_dims_dict):
|
299 |
+
logger.info("Validating arguments: " + repr(args.__dict__))
|
300 |
+
validate_args(args)
|
301 |
+
|
302 |
+
config = args.config
|
303 |
+
pipeline_params = filter_pipeline_params(config)
|
304 |
+
config = fetch_compute_metrics_args(config, args.task)
|
305 |
+
config.update(pipeline_params)
|
306 |
+
|
307 |
+
model_uri = args.model_uri.strip()
|
308 |
+
mlflow_model = args.mlflow_model
|
309 |
+
if mlflow_model:
|
310 |
+
model_uri = mlflow_model
|
311 |
+
|
312 |
+
data = args.data
|
313 |
+
is_mltable = check_and_return_if_mltable(data)
|
314 |
+
|
315 |
+
runner = EvaluateModel(
|
316 |
+
task=args.task,
|
317 |
+
output=args.output,
|
318 |
+
model_uri=model_uri,
|
319 |
+
config=config,
|
320 |
+
device=args.device,
|
321 |
+
batch_size=args.batch_size
|
322 |
+
)
|
323 |
+
|
324 |
+
with log_activity(logger, activity_name=constants.TelemetryConstants.DATA_LOADING,
|
325 |
+
custom_dimensions=custom_dims_dict):
|
326 |
+
try:
|
327 |
+
data = runner.load_data(test_data=data, label_column_name=args.label_column_name,
|
328 |
+
input_column_names=args.input_column_names, is_mltable=is_mltable)
|
329 |
+
except Exception as e:
|
330 |
+
message = "Load data failed."
|
331 |
+
exception = DataLoaderException._with_error(
|
332 |
+
AzureMLError.create(BadInputData, error=repr(e))
|
333 |
+
)
|
334 |
+
exception.inner_exception = e
|
335 |
+
log_traceback(exception, logger, message, True)
|
336 |
+
raise exception
|
337 |
+
|
338 |
+
with log_activity(logger, constants.TelemetryConstants.EVALUATE_MODEL_NAME,
|
339 |
+
custom_dimensions=custom_dims_dict):
|
340 |
+
logger.info("Model Evaluation.")
|
341 |
+
result = runner.score(data=data, label_column_name=args.label_column_name)
|
342 |
+
|
343 |
+
logger.info("Logging and Saving outputs.")
|
344 |
+
runner.log_and_write_outputs(result)
|
345 |
+
|
346 |
+
test_run.add_properties(properties=constants.RUN_PROPERTIES)
|
347 |
+
try:
|
348 |
+
root_run.add_properties(properties=constants.ROOT_RUN_PROPERTIES)
|
349 |
+
except Exception:
|
350 |
+
logger.info("PipelineType is already a property at Root Pipeline Run.")
|
351 |
+
test_run.complete()
|
352 |
+
return
|
353 |
+
|
354 |
+
|
355 |
+
if __name__ in "__main__":
|
356 |
+
run()
|
exceptions.py
ADDED
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""File to create AzureML Based Exceptions for Model Evaluation."""
|
5 |
+
|
6 |
+
from azureml.exceptions import AzureMLException
|
7 |
+
from azureml._common._error_definition.azureml_error import AzureMLError # type: ignore
|
8 |
+
from error_definitions import ModelEvaluationInternalError
|
9 |
+
from constants import ExceptionLiterals
|
10 |
+
from functools import wraps
|
11 |
+
import time
|
12 |
+
import logging
|
13 |
+
|
14 |
+
|
15 |
+
def swallow_all_exceptions(logger: logging.Logger):
|
16 |
+
"""Swallow all exceptions.
|
17 |
+
|
18 |
+
1. Catch all the exceptions arising in the functions wherever used
|
19 |
+
2. Raise the exception as an AzureML Exception so that it does not get scrubbed by PII scrubber
|
20 |
+
|
21 |
+
:param logger: The logger to be used for logging the exception raised
|
22 |
+
:type logger: Instance of logging.logger
|
23 |
+
"""
|
24 |
+
def wrap(func):
|
25 |
+
@wraps(func)
|
26 |
+
def wrapper(*args, **kwargs):
|
27 |
+
try:
|
28 |
+
return func(*args, **kwargs)
|
29 |
+
except Exception as e:
|
30 |
+
if isinstance(e, AzureMLException):
|
31 |
+
azureml_exception = e
|
32 |
+
else:
|
33 |
+
azureml_exception = AzureMLException._with_error(
|
34 |
+
AzureMLError.create(ModelEvaluationInternalError, error=e))
|
35 |
+
|
36 |
+
logger.error("Exception {} when calling {}".format(azureml_exception, func.__name__))
|
37 |
+
for handler in logger.handlers:
|
38 |
+
handler.flush()
|
39 |
+
raise azureml_exception
|
40 |
+
finally:
|
41 |
+
time.sleep(60) # Let telemetry logger flush its logs before terminating.
|
42 |
+
|
43 |
+
return wrapper
|
44 |
+
|
45 |
+
return wrap
|
46 |
+
|
47 |
+
|
48 |
+
class ModelEvaluationException(AzureMLException):
|
49 |
+
"""Base Model Evaluation Exception."""
|
50 |
+
|
51 |
+
def __init__(self,
|
52 |
+
exception_message,
|
53 |
+
inner_exception=None,
|
54 |
+
target=None, details=None,
|
55 |
+
message_format=None,
|
56 |
+
message_parameters=None,
|
57 |
+
reference_code=None,
|
58 |
+
**kwargs):
|
59 |
+
"""__init__.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
exception_message (_type_): _description_
|
63 |
+
inner_exception (_type_, optional): _description_. Defaults to None.
|
64 |
+
target (_type_, optional): _description_. Defaults to None.
|
65 |
+
details (_type_, optional): _description_. Defaults to None.
|
66 |
+
message_format (_type_, optional): _description_. Defaults to None.
|
67 |
+
message_parameters (_type_, optional): _description_. Defaults to None.
|
68 |
+
reference_code (_type_, optional): _description_. Defaults to None.
|
69 |
+
"""
|
70 |
+
if not target:
|
71 |
+
target = ExceptionLiterals.MODEL_EVALUATION_TARGET
|
72 |
+
super().__init__(exception_message,
|
73 |
+
inner_exception=inner_exception,
|
74 |
+
target=target,
|
75 |
+
details=details,
|
76 |
+
message_format=message_format,
|
77 |
+
message_parameters=message_parameters,
|
78 |
+
reference_code=reference_code,
|
79 |
+
**kwargs)
|
80 |
+
|
81 |
+
|
82 |
+
class ArgumentValidationException(ModelEvaluationException):
|
83 |
+
"""Argument Validation Exception.
|
84 |
+
|
85 |
+
Args:
|
86 |
+
ModelEvaluationException (_type_): _description_
|
87 |
+
"""
|
88 |
+
|
89 |
+
def __init__(self,
|
90 |
+
exception_message,
|
91 |
+
inner_exception=None,
|
92 |
+
details=None,
|
93 |
+
message_format=None,
|
94 |
+
message_parameters=None,
|
95 |
+
reference_code=None,
|
96 |
+
**kwargs):
|
97 |
+
"""__init__.
|
98 |
+
|
99 |
+
Args:
|
100 |
+
exception_message (_type_): _description_
|
101 |
+
inner_exception (_type_, optional): _description_. Defaults to None.
|
102 |
+
details (_type_, optional): _description_. Defaults to None.
|
103 |
+
message_format (_type_, optional): _description_. Defaults to None.
|
104 |
+
message_parameters (_type_, optional): _description_. Defaults to None.
|
105 |
+
reference_code (_type_, optional): _description_. Defaults to None.
|
106 |
+
"""
|
107 |
+
target = ExceptionLiterals.ARGS_TARGET
|
108 |
+
super().__init__(exception_message,
|
109 |
+
inner_exception=inner_exception,
|
110 |
+
target=target,
|
111 |
+
details=details,
|
112 |
+
message_format=message_format,
|
113 |
+
message_parameters=message_parameters,
|
114 |
+
reference_code=reference_code,
|
115 |
+
**kwargs)
|
116 |
+
|
117 |
+
|
118 |
+
class DataValidationException(ModelEvaluationException):
|
119 |
+
"""Data Validation Exception.
|
120 |
+
|
121 |
+
Args:
|
122 |
+
ModelEvaluationException (_type_): _description_
|
123 |
+
"""
|
124 |
+
|
125 |
+
def __init__(self,
|
126 |
+
exception_message,
|
127 |
+
inner_exception=None,
|
128 |
+
details=None,
|
129 |
+
message_format=None,
|
130 |
+
message_parameters=None,
|
131 |
+
reference_code=None,
|
132 |
+
**kwargs):
|
133 |
+
"""__init__.
|
134 |
+
|
135 |
+
Args:
|
136 |
+
exception_message (_type_): _description_
|
137 |
+
inner_exception (_type_, optional): _description_. Defaults to None.
|
138 |
+
details (_type_, optional): _description_. Defaults to None.
|
139 |
+
message_format (_type_, optional): _description_. Defaults to None.
|
140 |
+
message_parameters (_type_, optional): _description_. Defaults to None.
|
141 |
+
reference_code (_type_, optional): _description_. Defaults to None.
|
142 |
+
"""
|
143 |
+
target = ExceptionLiterals.DATA_TARGET
|
144 |
+
super().__init__(exception_message,
|
145 |
+
inner_exception=inner_exception,
|
146 |
+
target=target,
|
147 |
+
details=details,
|
148 |
+
message_format=message_format,
|
149 |
+
message_parameters=message_parameters,
|
150 |
+
reference_code=reference_code,
|
151 |
+
**kwargs)
|
152 |
+
|
153 |
+
|
154 |
+
class DataLoaderException(ModelEvaluationException):
|
155 |
+
"""Data Loader Exception.
|
156 |
+
|
157 |
+
Args:
|
158 |
+
ModelEvaluationException (_type_): _description_
|
159 |
+
"""
|
160 |
+
|
161 |
+
def __init__(self,
|
162 |
+
exception_message,
|
163 |
+
inner_exception=None,
|
164 |
+
details=None,
|
165 |
+
message_format=None,
|
166 |
+
message_parameters=None,
|
167 |
+
reference_code=None,
|
168 |
+
**kwargs):
|
169 |
+
"""__init__.
|
170 |
+
|
171 |
+
Args:
|
172 |
+
exception_message (_type_): _description_
|
173 |
+
inner_exception (_type_, optional): _description_. Defaults to None.
|
174 |
+
details (_type_, optional): _description_. Defaults to None.
|
175 |
+
message_format (_type_, optional): _description_. Defaults to None.
|
176 |
+
message_parameters (_type_, optional): _description_. Defaults to None.
|
177 |
+
reference_code (_type_, optional): _description_. Defaults to None.
|
178 |
+
"""
|
179 |
+
target = ExceptionLiterals.DATA_LOADING_TARGET
|
180 |
+
super().__init__(exception_message,
|
181 |
+
inner_exception=inner_exception,
|
182 |
+
target=target,
|
183 |
+
details=details,
|
184 |
+
message_format=message_format,
|
185 |
+
message_parameters=message_parameters,
|
186 |
+
reference_code=reference_code,
|
187 |
+
**kwargs)
|
188 |
+
|
189 |
+
|
190 |
+
class ModelValidationException(ModelEvaluationException):
|
191 |
+
"""Model Validation Exception.
|
192 |
+
|
193 |
+
Args:
|
194 |
+
ModelEvaluationException (_type_): _description_
|
195 |
+
"""
|
196 |
+
|
197 |
+
def __init__(self,
|
198 |
+
exception_message,
|
199 |
+
inner_exception=None,
|
200 |
+
details=None,
|
201 |
+
message_format=None,
|
202 |
+
message_parameters=None,
|
203 |
+
reference_code=None,
|
204 |
+
**kwargs):
|
205 |
+
"""__init__.
|
206 |
+
|
207 |
+
Args:
|
208 |
+
exception_message (_type_): _description_
|
209 |
+
inner_exception (_type_, optional): _description_. Defaults to None.
|
210 |
+
details (_type_, optional): _description_. Defaults to None.
|
211 |
+
message_format (_type_, optional): _description_. Defaults to None.
|
212 |
+
message_parameters (_type_, optional): _description_. Defaults to None.
|
213 |
+
reference_code (_type_, optional): _description_. Defaults to None.
|
214 |
+
"""
|
215 |
+
target = ExceptionLiterals.MODEL_LOADER_TARGET
|
216 |
+
super().__init__(exception_message,
|
217 |
+
inner_exception=inner_exception,
|
218 |
+
target=target,
|
219 |
+
details=details,
|
220 |
+
message_format=message_format,
|
221 |
+
message_parameters=message_parameters,
|
222 |
+
reference_code=reference_code,
|
223 |
+
**kwargs)
|
224 |
+
|
225 |
+
|
226 |
+
class ScoringException(ModelEvaluationException):
|
227 |
+
"""Score Mode Exception.
|
228 |
+
|
229 |
+
Args:
|
230 |
+
ModelEvaluationException (_type_): _description_
|
231 |
+
"""
|
232 |
+
|
233 |
+
def __init__(self,
|
234 |
+
exception_message,
|
235 |
+
inner_exception=None,
|
236 |
+
details=None,
|
237 |
+
message_format=None,
|
238 |
+
message_parameters=None,
|
239 |
+
reference_code=None,
|
240 |
+
**kwargs):
|
241 |
+
"""__init__.
|
242 |
+
|
243 |
+
Args:
|
244 |
+
exception_message (_type_): _description_
|
245 |
+
inner_exception (_type_, optional): _description_. Defaults to None.
|
246 |
+
details (_type_, optional): _description_. Defaults to None.
|
247 |
+
message_format (_type_, optional): _description_. Defaults to None.
|
248 |
+
message_parameters (_type_, optional): _description_. Defaults to None.
|
249 |
+
reference_code (_type_, optional): _description_. Defaults to None.
|
250 |
+
"""
|
251 |
+
target = ExceptionLiterals.MODEL_EVALUATION_TARGET
|
252 |
+
super().__init__(exception_message,
|
253 |
+
inner_exception=inner_exception,
|
254 |
+
target=target,
|
255 |
+
details=details,
|
256 |
+
message_format=message_format,
|
257 |
+
message_parameters=message_parameters,
|
258 |
+
reference_code=reference_code,
|
259 |
+
**kwargs)
|
260 |
+
|
261 |
+
|
262 |
+
class PredictException(ModelEvaluationException):
|
263 |
+
"""Predict Mode Exception.
|
264 |
+
|
265 |
+
Args:
|
266 |
+
ModelEvaluationException (_type_): _description_
|
267 |
+
"""
|
268 |
+
|
269 |
+
def __init__(self,
|
270 |
+
exception_message,
|
271 |
+
inner_exception=None,
|
272 |
+
details=None,
|
273 |
+
message_format=None,
|
274 |
+
message_parameters=None,
|
275 |
+
reference_code=None,
|
276 |
+
**kwargs):
|
277 |
+
"""__init__.
|
278 |
+
|
279 |
+
Args:
|
280 |
+
exception_message (_type_): _description_
|
281 |
+
inner_exception (_type_, optional): _description_. Defaults to None.
|
282 |
+
details (_type_, optional): _description_. Defaults to None.
|
283 |
+
message_format (_type_, optional): _description_. Defaults to None.
|
284 |
+
message_parameters (_type_, optional): _description_. Defaults to None.
|
285 |
+
reference_code (_type_, optional): _description_. Defaults to None.
|
286 |
+
"""
|
287 |
+
target = ExceptionLiterals.MODEL_EVALUATION_TARGET
|
288 |
+
super().__init__(exception_message,
|
289 |
+
inner_exception=inner_exception,
|
290 |
+
target=target,
|
291 |
+
details=details,
|
292 |
+
message_format=message_format,
|
293 |
+
message_parameters=message_parameters,
|
294 |
+
reference_code=reference_code,
|
295 |
+
**kwargs)
|
296 |
+
|
297 |
+
|
298 |
+
class ComputeMetricsException(ModelEvaluationException):
|
299 |
+
"""Compute Metrics Mode Exception.
|
300 |
+
|
301 |
+
Args:
|
302 |
+
ModelEvaluationException (_type_): _description_
|
303 |
+
"""
|
304 |
+
|
305 |
+
def __init__(self,
|
306 |
+
exception_message,
|
307 |
+
inner_exception=None,
|
308 |
+
details=None,
|
309 |
+
message_format=None,
|
310 |
+
message_parameters=None,
|
311 |
+
reference_code=None,
|
312 |
+
**kwargs):
|
313 |
+
"""__init__.
|
314 |
+
|
315 |
+
Args:
|
316 |
+
exception_message (_type_): _description_
|
317 |
+
inner_exception (_type_, optional): _description_. Defaults to None.
|
318 |
+
details (_type_, optional): _description_. Defaults to None.
|
319 |
+
message_format (_type_, optional): _description_. Defaults to None.
|
320 |
+
message_parameters (_type_, optional): _description_. Defaults to None.
|
321 |
+
reference_code (_type_, optional): _description_. Defaults to None.
|
322 |
+
"""
|
323 |
+
target = ExceptionLiterals.MODEL_EVALUATION_TARGET
|
324 |
+
super().__init__(exception_message,
|
325 |
+
inner_exception=inner_exception,
|
326 |
+
target=target,
|
327 |
+
details=details,
|
328 |
+
message_format=message_format,
|
329 |
+
message_parameters=message_parameters,
|
330 |
+
reference_code=reference_code,
|
331 |
+
**kwargs)
|
generation_config.json
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
{
|
2 |
"_from_model_config": true,
|
3 |
-
"
|
4 |
-
"
|
5 |
-
"use_cache": false
|
6 |
}
|
|
|
1 |
{
|
2 |
"_from_model_config": true,
|
3 |
+
"eos_token_id": 2,
|
4 |
+
"transformers_version": "4.30.2"
|
|
|
5 |
}
|
image_constants.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
"""Image constants for evaluation."""
|
4 |
+
|
5 |
+
|
6 |
+
class SettingLiterals:
|
7 |
+
"""Setting literals for classification dataset."""
|
8 |
+
|
9 |
+
LABEL_COLUMN_NAME = "label_column_name"
|
10 |
+
MASKS_REQUIRED = "masks_required"
|
11 |
+
USE_BG_LABEL = "use_bg_label"
|
12 |
+
IGNORE_DATA_ERRORS = "ignore_data_errors"
|
13 |
+
BOX_SCORE_THRESHOLD = "box_score_threshold"
|
14 |
+
IOU_THRESHOLD = "iou_threshold"
|
15 |
+
|
16 |
+
|
17 |
+
class ImageDataConstants:
|
18 |
+
"""Data constants."""
|
19 |
+
|
20 |
+
DEFAULT_BOX_SCORE_THRESHOLD = 0.3
|
21 |
+
DEFAULT_IOU_THRESHOLD = 0.5
|
22 |
+
|
23 |
+
|
24 |
+
class ImageDataFrameParams:
|
25 |
+
"""DataFrame parameters for image dataset."""
|
26 |
+
|
27 |
+
IMAGE_COLUMN_NAME = "image"
|
28 |
+
LABEL_COLUMN_NAME = "label"
|
29 |
+
IMAGE_META_INFO = "image_meta_info"
|
30 |
+
PREDICTIONS = "predictions"
|
31 |
+
|
32 |
+
|
33 |
+
class ODISLiterals:
|
34 |
+
"""Object detection and instance segmentation literals."""
|
35 |
+
|
36 |
+
BOXES = "boxes"
|
37 |
+
BOX = "box"
|
38 |
+
CLASSES = "classes"
|
39 |
+
CLASS = "class"
|
40 |
+
SCORES = "scores"
|
41 |
+
SCORE = "score"
|
42 |
+
LABELS = "labels"
|
43 |
+
LABEL = "label"
|
44 |
+
TOP_X = "topX"
|
45 |
+
TOP_Y = "topY"
|
46 |
+
BOTTOM_X = "bottomX"
|
47 |
+
BOTTOM_Y = "bottomY"
|
48 |
+
NUM_CLASSES = "num_classes"
|
49 |
+
MASKS = "masks"
|
50 |
+
POLYGON = "polygon"
|
51 |
+
HEIGHT = "height"
|
52 |
+
WIDTH = "width"
|
53 |
+
LABEL_INDEX = "label_index"
|
image_dataset.py
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""AzureML ACFT Image evaluation component - image dataset."""
|
5 |
+
|
6 |
+
|
7 |
+
from __future__ import annotations
|
8 |
+
import base64
|
9 |
+
import json
|
10 |
+
import pandas as pd
|
11 |
+
import torch
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from PIL import Image
|
15 |
+
from torch import Tensor
|
16 |
+
from typing import cast, Dict, Tuple
|
17 |
+
|
18 |
+
import constants
|
19 |
+
|
20 |
+
from image_constants import SettingLiterals, ImageDataFrameParams, ODISLiterals
|
21 |
+
from logging_utilities import get_logger
|
22 |
+
|
23 |
+
from azureml.automl.core.shared.constants import MLTableLiterals, MLTableDataLabel
|
24 |
+
|
25 |
+
from azureml.acft.common_components.image.runtime_common.common import (
|
26 |
+
utils,
|
27 |
+
)
|
28 |
+
from azureml.acft.common_components.image.runtime_common.common.aml_dataset_base_wrapper import (
|
29 |
+
AmlDatasetBaseWrapper,
|
30 |
+
)
|
31 |
+
from azureml.acft.common_components.image.runtime_common.classification.io.read.dataset_wrappers import (
|
32 |
+
AmlDatasetWrapper,
|
33 |
+
)
|
34 |
+
from azureml.acft.common_components.image.runtime_common.object_detection.common import (
|
35 |
+
masktools
|
36 |
+
)
|
37 |
+
from azureml.acft.common_components.image.runtime_common.object_detection.data.dataset_wrappers import (
|
38 |
+
CommonObjectDetectionDatasetWrapper,
|
39 |
+
DatasetProcessingType,
|
40 |
+
)
|
41 |
+
from azureml.acft.common_components.image.runtime_common.object_detection.data import (
|
42 |
+
datasets,
|
43 |
+
)
|
44 |
+
from azureml.acft.common_components.image.runtime_common.object_detection.data.datasets import (
|
45 |
+
CommonObjectDetectionDataset,
|
46 |
+
)
|
47 |
+
from azureml.core import Workspace
|
48 |
+
from azureml.core.run import Run
|
49 |
+
|
50 |
+
logger = get_logger(name=__name__)
|
51 |
+
|
52 |
+
|
53 |
+
def get_workspace() -> Workspace:
|
54 |
+
"""Get workspace.
|
55 |
+
|
56 |
+
:return: Workspace
|
57 |
+
"""
|
58 |
+
try:
|
59 |
+
ws = Run.get_context().experiment.workspace
|
60 |
+
except Exception:
|
61 |
+
ws = Workspace.from_config()
|
62 |
+
return ws
|
63 |
+
|
64 |
+
|
65 |
+
class RuntimeDetectionDatasetAdapter(CommonObjectDetectionDatasetWrapper):
|
66 |
+
"""Dataset adapter class that makes Runtime dataset classes suitable for finetune components."""
|
67 |
+
|
68 |
+
def __init__(self, dataset: CommonObjectDetectionDataset) -> None:
|
69 |
+
"""Dataset adapter class that makes Runtime dataset classes suitable for finetune components. \
|
70 |
+
It prepares the input parameters and directs the call to corresponding methods in inherited class. \
|
71 |
+
It also modifies the output (before returning) to make it more generic and suitable for finetune components.
|
72 |
+
|
73 |
+
:param dataset: Common object detection dataset
|
74 |
+
:type dataset: CommonObjectDetectionDataset.
|
75 |
+
"""
|
76 |
+
# Since, we don't want to apply any augmentation from runtime dataset, setting following values.
|
77 |
+
# We will apply augmentation/ pre-processing from finetune components.
|
78 |
+
dataset.apply_automl_train_augmentations = False
|
79 |
+
dataset._transform = None
|
80 |
+
|
81 |
+
super().__init__(dataset, DatasetProcessingType.IMAGES)
|
82 |
+
|
83 |
+
def __getitem__(self, index: int) -> Tuple[Tensor, dict, dict]:
|
84 |
+
"""Convert output of dataset get item to make it generalized and usable in components.
|
85 |
+
|
86 |
+
:param index: Index of object
|
87 |
+
:type index: int
|
88 |
+
:return: Image tensor in de-normalized form [0-255], training labels and image info
|
89 |
+
:rtype: Tuple[Tensor, dict, dict]
|
90 |
+
"""
|
91 |
+
image, training_labels, image_info = super().__getitem__(index)
|
92 |
+
|
93 |
+
if image is None:
|
94 |
+
return None, {}, {}
|
95 |
+
|
96 |
+
# CommonObjectDetectionDatasetWrapper returns the normalized image. This adapter returns
|
97 |
+
# the image in generic de-normalized format to the frameworks (MMD need image in denormalized format).
|
98 |
+
|
99 |
+
with torch.no_grad():
|
100 |
+
image = torch.mul(image, 255)
|
101 |
+
image = image.to(torch.uint8)
|
102 |
+
|
103 |
+
training_labels[ODISLiterals.CLASSES] = training_labels[ODISLiterals.LABELS].numpy()
|
104 |
+
training_labels[ODISLiterals.LABELS] = np.array([self._dataset.index_to_label(x)
|
105 |
+
for x in training_labels[ODISLiterals.LABELS]])
|
106 |
+
|
107 |
+
training_labels[ODISLiterals.BOXES] = training_labels[ODISLiterals.BOXES].numpy()
|
108 |
+
# rle masks need for computing metrics
|
109 |
+
if ODISLiterals.MASKS in training_labels:
|
110 |
+
training_labels[ODISLiterals.MASKS] = [masktools.encode_mask_as_rle(mask)
|
111 |
+
for mask in training_labels[ODISLiterals.MASKS]]
|
112 |
+
return image, training_labels, image_info
|
113 |
+
|
114 |
+
|
115 |
+
def _combine_mltables(training_mltable: str, validation_mltable: str = None) -> str:
|
116 |
+
"""Combine mltables to make single mltable to pass in get_tabular_dataset.
|
117 |
+
|
118 |
+
:param training_mltable: The training mltable path
|
119 |
+
:param validation_mltable: The validation mltable path
|
120 |
+
:return: mltable in serialized json format
|
121 |
+
"""
|
122 |
+
mltable = {MLTableDataLabel.TrainData.value: {MLTableLiterals.MLTABLE_RESOLVEDURI: training_mltable}}
|
123 |
+
if validation_mltable is not None:
|
124 |
+
mltable[MLTableDataLabel.ValidData.value] = {MLTableLiterals.MLTABLE_RESOLVEDURI: validation_mltable}
|
125 |
+
return json.dumps(mltable)
|
126 |
+
|
127 |
+
|
128 |
+
def is_valid_image(image_path):
|
129 |
+
"""Check if image is valid.
|
130 |
+
|
131 |
+
:param image_path: The image path
|
132 |
+
"""
|
133 |
+
try:
|
134 |
+
img = Image.open(image_path)
|
135 |
+
if len(img.getbands()) != 3:
|
136 |
+
return False
|
137 |
+
except Exception:
|
138 |
+
return False
|
139 |
+
return True
|
140 |
+
|
141 |
+
|
142 |
+
def read_image(image_path):
|
143 |
+
"""Read image from path.
|
144 |
+
|
145 |
+
:param image_path: The image path
|
146 |
+
"""
|
147 |
+
with open(image_path, "rb") as f:
|
148 |
+
return f.read()
|
149 |
+
|
150 |
+
|
151 |
+
def get_classification_dataset(
|
152 |
+
testing_mltable: str,
|
153 |
+
settings: Dict = {},
|
154 |
+
multi_label: bool = False,
|
155 |
+
) -> AmlDatasetWrapper:
|
156 |
+
"""
|
157 |
+
Return training and validation dataset for classification task from mltable.
|
158 |
+
|
159 |
+
:param testing_mltable: The training mltable path
|
160 |
+
:param settings: Settings dictionary
|
161 |
+
:param multi_label: True if multi label classification, False otherwise
|
162 |
+
:return: Data Frame with test image paths and labels
|
163 |
+
"""
|
164 |
+
mltable = _combine_mltables(testing_mltable)
|
165 |
+
|
166 |
+
dataset_wrapper: AmlDatasetBaseWrapper = cast(AmlDatasetBaseWrapper, AmlDatasetWrapper)
|
167 |
+
|
168 |
+
ws = get_workspace()
|
169 |
+
|
170 |
+
test_tabular_ds, valid_tabular_ds = utils.get_tabular_dataset(settings=settings, mltable_json=mltable)
|
171 |
+
|
172 |
+
utils.download_or_mount_image_files(
|
173 |
+
settings=settings,
|
174 |
+
train_ds=test_tabular_ds,
|
175 |
+
validation_ds=valid_tabular_ds,
|
176 |
+
dataset_class=dataset_wrapper,
|
177 |
+
workspace=ws,
|
178 |
+
)
|
179 |
+
|
180 |
+
label_column_name = settings.get(SettingLiterals.LABEL_COLUMN_NAME, None)
|
181 |
+
test_dataset_wrapper = AmlDatasetWrapper(
|
182 |
+
test_tabular_ds,
|
183 |
+
multilabel=multi_label,
|
184 |
+
label_column_name=label_column_name,
|
185 |
+
)
|
186 |
+
|
187 |
+
logger.info(
|
188 |
+
f"# test images: {len(test_dataset_wrapper)}, \
|
189 |
+
# labels: {test_dataset_wrapper.num_classes}"
|
190 |
+
)
|
191 |
+
|
192 |
+
df = pd.DataFrame(columns=[ImageDataFrameParams.IMAGE_COLUMN_NAME, ImageDataFrameParams.LABEL_COLUMN_NAME])
|
193 |
+
for index in range(len(test_dataset_wrapper)):
|
194 |
+
image_path = test_dataset_wrapper.get_image_full_path(index)
|
195 |
+
if is_valid_image(image_path):
|
196 |
+
df = df.append({
|
197 |
+
ImageDataFrameParams.IMAGE_COLUMN_NAME: base64.encodebytes(read_image(image_path)).decode("utf-8"),
|
198 |
+
ImageDataFrameParams.LABEL_COLUMN_NAME: test_dataset_wrapper.label_at_index(index)
|
199 |
+
}, ignore_index=True)
|
200 |
+
|
201 |
+
return df
|
202 |
+
|
203 |
+
|
204 |
+
def get_object_detection_dataset(
|
205 |
+
test_mltable: str,
|
206 |
+
settings: Dict = {},
|
207 |
+
masks_required: bool = False,
|
208 |
+
) -> Tuple[RuntimeDetectionDatasetAdapter, RuntimeDetectionDatasetAdapter]:
|
209 |
+
"""Return training and validation dataset for object detection and instance segmentation task from mltable.
|
210 |
+
|
211 |
+
:param training_mltable: The training mltable path
|
212 |
+
:type training_mltable: str
|
213 |
+
:param object_detection_dataset: The dataset adapter class name to be used for creating dataset objects.
|
214 |
+
:type object_detection_dataset: RuntimeDetectionDatasetAdapter
|
215 |
+
:param settings: Settings dictionary
|
216 |
+
:type settings: Dict
|
217 |
+
:param validation_mltable: The validation mltable path
|
218 |
+
:type validation_mltable: str
|
219 |
+
:param masks_required: mask required or not for segmentation. Optional, default False
|
220 |
+
:type masks_required: bool
|
221 |
+
:return: Training dataset, validation dataset
|
222 |
+
:rtype: Tuple[RuntimeDetectionDatasetAdapter, RuntimeDetectionDatasetAdapter]
|
223 |
+
"""
|
224 |
+
mltable = _combine_mltables(test_mltable, test_mltable)
|
225 |
+
|
226 |
+
dataset_wrapper: AmlDatasetBaseWrapper = cast(
|
227 |
+
AmlDatasetBaseWrapper, datasets.AmlDatasetObjectDetection
|
228 |
+
)
|
229 |
+
test_tabular_ds, _ = utils.get_tabular_dataset(
|
230 |
+
settings=settings, mltable_json=mltable
|
231 |
+
)
|
232 |
+
|
233 |
+
ws = get_workspace()
|
234 |
+
|
235 |
+
utils.download_or_mount_image_files(
|
236 |
+
settings=settings,
|
237 |
+
train_ds=test_tabular_ds,
|
238 |
+
validation_ds=None,
|
239 |
+
dataset_class=dataset_wrapper,
|
240 |
+
workspace=ws,
|
241 |
+
)
|
242 |
+
logger.info("# downloaded test images")
|
243 |
+
|
244 |
+
use_bg_label = settings.get(SettingLiterals.USE_BG_LABEL, False)
|
245 |
+
ignore_data_errors = settings.get(SettingLiterals.IGNORE_DATA_ERRORS, True)
|
246 |
+
|
247 |
+
test_dataset = datasets.AmlDatasetObjectDetection(dataset=test_tabular_ds, is_train=False,
|
248 |
+
ignore_data_errors=ignore_data_errors,
|
249 |
+
settings=settings, use_bg_label=use_bg_label,
|
250 |
+
masks_required=masks_required,)
|
251 |
+
logger.info(
|
252 |
+
f"# test images: {len(test_dataset)}, # labels: {test_dataset.num_classes}"
|
253 |
+
)
|
254 |
+
test_dataset_wrapper = RuntimeDetectionDatasetAdapter(test_dataset)
|
255 |
+
df = pd.DataFrame(columns=[ImageDataFrameParams.IMAGE_COLUMN_NAME,
|
256 |
+
ImageDataFrameParams.LABEL_COLUMN_NAME,
|
257 |
+
ImageDataFrameParams.IMAGE_META_INFO])
|
258 |
+
|
259 |
+
counter = 0
|
260 |
+
for index in range(len(test_dataset_wrapper)):
|
261 |
+
_, label, image_meta_info = test_dataset_wrapper[index]
|
262 |
+
image_path = test_dataset_wrapper._dataset._dataset_elements[index].image_url
|
263 |
+
|
264 |
+
if is_valid_image(image_path):
|
265 |
+
counter += 1
|
266 |
+
df = df.append({
|
267 |
+
ImageDataFrameParams.IMAGE_COLUMN_NAME: base64.encodebytes(read_image(image_path)).decode("utf-8"),
|
268 |
+
ImageDataFrameParams.LABEL_COLUMN_NAME: label,
|
269 |
+
ImageDataFrameParams.IMAGE_META_INFO: image_meta_info
|
270 |
+
}, ignore_index=True)
|
271 |
+
|
272 |
+
logger.info(f"Total number of valid images: {counter}")
|
273 |
+
return df
|
274 |
+
|
275 |
+
|
276 |
+
def get_image_dataset(task_type, test_mltable, settings={}):
|
277 |
+
"""
|
278 |
+
Return test dataset for image tasks from mltable.
|
279 |
+
|
280 |
+
:param testing_mltable: The training mltable path
|
281 |
+
:param settings: Settings dictionary
|
282 |
+
:param multi_label: True if multi label classification, False otherwise
|
283 |
+
:return: Data Frame with test image paths and labels
|
284 |
+
"""
|
285 |
+
if task_type in [constants.TASK.IMAGE_CLASSIFICATION, constants.TASK.IMAGE_CLASSIFICATION_MULTILABEL]:
|
286 |
+
multi_label = True if task_type == constants.TASK.IMAGE_CLASSIFICATION_MULTILABEL else False
|
287 |
+
return get_classification_dataset(
|
288 |
+
testing_mltable=test_mltable,
|
289 |
+
settings=settings,
|
290 |
+
multi_label=multi_label,
|
291 |
+
)
|
292 |
+
elif task_type in [constants.TASK.IMAGE_OBJECT_DETECTION, constants.TASK.IMAGE_INSTANCE_SEGMENTATION]:
|
293 |
+
masks_required = True if task_type == constants.TASK.IMAGE_INSTANCE_SEGMENTATION else False
|
294 |
+
return get_object_detection_dataset(
|
295 |
+
test_mltable=test_mltable,
|
296 |
+
settings=settings,
|
297 |
+
masks_required=masks_required,
|
298 |
+
)
|
299 |
+
else:
|
300 |
+
raise ValueError(f"Task type {task_type} not supported")
|
index.md
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Index
|
3 |
+
Azure Machine Learning is a cloud service that you use to train, deploy, automate,
|
4 |
+
and manage machine learning models. This index should assist in navigating the Azure
|
5 |
+
Machine Learning notebook samples and encourage efficient retrieval of topics and content.
|
6 |
+
![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/Index.png)
|
7 |
+
|
8 |
+
## Getting Started
|
9 |
+
|
10 |
+
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
11 |
+
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
12 |
+
| [Using Azure ML environments](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/using-environments/using-environments.ipynb) | Creating and registering environments | None | Local | None | None | None |
|
13 |
+
|
14 |
+
## Tutorials
|
15 |
+
|
16 |
+
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
17 |
+
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
18 |
+
| [Forecasting BikeShare Demand](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-bike-share/auto-ml-forecasting-bike-share.ipynb) | Forecasting | BikeShare | Remote | None | Azure ML AutoML | Forecasting |
|
19 |
+
| [Forecasting orange juice sales with deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-orange-juice-sales/auto-ml-forecasting-orange-juice-sales.ipynb) | Forecasting | Orange Juice Sales | Remote | Azure Container Instance | Azure ML AutoML | None |
|
20 |
+
| [Forecasting orange juice sales with deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-pipelines/auto-ml-forecasting-pipelines.ipynb) | Forecasting | Orange Juice Sales | Remote | Azure Container Instance | Azure ML AutoML | None |
|
21 |
+
| [Register a model and deploy locally](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local.ipynb) | Deployment | None | Local | Local | None | None |
|
22 |
+
| :star:[Data drift quickdemo](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datadrift-tutorial/datadrift-tutorial.ipynb) | Filtering | NOAA | Remote | None | Azure ML | Dataset, Timeseries, Drift |
|
23 |
+
| :star:[Datasets with ML Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datasets-tutorial/pipeline-with-datasets/pipeline-for-image-classification.ipynb) | Train | Fashion MNIST | Remote | None | Azure ML | Dataset, Pipeline, Estimator, ScriptRun |
|
24 |
+
| :star:[Filtering data using Tabular Timeseiries Dataset related API](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datasets-tutorial/timeseries-datasets/tabular-timeseries-dataset-filtering.ipynb) | Filtering | NOAA | Local | None | Azure ML | Dataset, Tabular Timeseries |
|
25 |
+
| :star:[Train with Datasets (Tabular and File)](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datasets-tutorial/train-with-datasets/train-with-datasets.ipynb) | Train | Iris, Diabetes | Remote | None | Azure ML | Dataset, Estimator, ScriptRun |
|
26 |
+
| [Forecasting away from training data](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-forecast-function/auto-ml-forecasting-function.ipynb) | Forecasting | None | Remote | None | Azure ML AutoML | Forecasting, Confidence Intervals |
|
27 |
+
| [Automated ML run with basic edition features.](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/classification-bank-marketing-all-features/auto-ml-classification-bank-marketing-all-features.ipynb) | Classification | Bankmarketing | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
|
28 |
+
| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.ipynb) | Classification | Creditcard | AML Compute | None | None | remote_run, AutomatedML |
|
29 |
+
| [Classification of credit card fraudulent transactions using Automated ML](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/experimental/classification-credit-card-fraud-local-managed/auto-ml-classification-credit-card-fraud-local-managed.ipynb) | Classification | Creditcard | AML Compute | None | None | AutomatedML |
|
30 |
+
| [Automated ML run with featurization and model explainability.](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/regression-explanation-featurization/auto-ml-regression-explanation-featurization.ipynb) | Regression | MachineData | AML | ACI | None | featurization, explainability, remote_run, AutomatedML |
|
31 |
+
| [auto-ml-forecasting-backtest-single-model](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb) | | None | Remote | None | Azure ML AutoML | |
|
32 |
+
| :star:[Azure Machine Learning Pipeline with DataTranferStep](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-data-transfer.ipynb) | Demonstrates the use of DataTranferStep | Custom | ADF | None | Azure ML | None |
|
33 |
+
| [Getting Started with Azure Machine Learning Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) | Getting Started notebook for ANML Pipelines | Custom | AML Compute | None | Azure ML | None |
|
34 |
+
| [Azure Machine Learning Pipeline with AzureBatchStep](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-azurebatch-to-run-a-windows-executable.ipynb) | Demonstrates the use of AzureBatchStep | Custom | Azure Batch | None | Azure ML | None |
|
35 |
+
| :star:[How to use ModuleStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-modulestep.ipynb) | Demonstrates the use of ModuleStep | Custom | AML Compute | None | Azure ML | None |
|
36 |
+
| :star:[How to use Pipeline Drafts to create a Published Pipeline](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-how-to-use-pipeline-drafts.ipynb) | Demonstrates the use of Pipeline Drafts | Custom | AML Compute | None | Azure ML | None |
|
37 |
+
| :star:[Azure Machine Learning Pipeline with HyperDriveStep](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-parameter-tuning-with-hyperdrive.ipynb) | Demonstrates the use of HyperDriveStep | Custom | AML Compute | None | Azure ML | None |
|
38 |
+
| :star:[How to Publish a Pipeline and Invoke the REST endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb) | Demonstrates the use of Published Pipelines | Custom | AML Compute | None | Azure ML | None |
|
39 |
+
| :star:[How to Setup a Schedule for a Published Pipeline or Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-schedule-for-a-published-pipeline.ipynb) | Demonstrates the use of Schedules for Published Pipelines and Pipeline endpoints | Custom | AML Compute | None | Azure ML | None |
|
40 |
+
| [How to setup a versioned Pipeline Endpoint](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-setup-versioned-pipeline-endpoints.ipynb) | Demonstrates the use of PipelineEndpoint to run a specific version of the Published Pipeline | Custom | AML Compute | None | Azure ML | None |
|
41 |
+
| :star:[How to use DataPath as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-datapath-and-pipelineparameter.ipynb) | Demonstrates the use of DataPath as a PipelineParameter | Custom | AML Compute | None | Azure ML | None |
|
42 |
+
| :star:[How to use Dataset as a PipelineParameter](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-showcasing-dataset-and-pipelineparameter.ipynb) | Demonstrates the use of Dataset as a PipelineParameter | Custom | AML Compute | None | Azure ML | None |
|
43 |
+
| [How to use AdlaStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-adla-as-compute-target.ipynb) | Demonstrates the use of AdlaStep | Custom | Azure Data Lake Analytics | None | Azure ML | None |
|
44 |
+
| :star:[How to use DatabricksStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-databricks-as-compute-target.ipynb) | Demonstrates the use of DatabricksStep | Custom | Azure Databricks | None | Azure ML, Azure Databricks | None |
|
45 |
+
| :star:[How to use KustoStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-use-kusto-as-compute-target.ipynb) | Demonstrates the use of KustoStep | Custom | Kusto | None | Azure ML, Kusto | None |
|
46 |
+
| :star:[How to use AutoMLStep with AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb) | Demonstrates the use of AutoMLStep | Custom | AML Compute | None | Automated Machine Learning | None |
|
47 |
+
| [Azure Machine Learning Pipeline with CommandStep for R](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep-r.ipynb) | Demonstrates the use of CommandStep for running R scripts | Custom | AML Compute | None | Azure ML | None |
|
48 |
+
| [Azure Machine Learning Pipeline with CommandStep](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-commandstep.ipynb) | Demonstrates the use of CommandStep | Custom | AML Compute | None | Azure ML | None |
|
49 |
+
| :star:[Azure Machine Learning Pipelines with Data Dependency](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-data-dependency-steps.ipynb) | Demonstrates how to construct a Pipeline with data dependency between steps | Custom | AML Compute | None | Azure ML | None |
|
50 |
+
| [How to use run a notebook as a step in AML Pipelines](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-notebook-runner-step.ipynb) | Demonstrates the use of NotebookRunnerStep | Custom | AML Compute | None | Azure ML | None |
|
51 |
+
| [Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-keras-auto-logging/train-and-deploy-keras-auto-logging.ipynb) | Use MLflow with Azure Machine Learning to Train and Deploy Keras Image Classifier, leveraging MLflow auto logging | MNIST | Local, AML Compute | Azure Container Instance | Keras | mlflow, keras |
|
52 |
+
| [Use MLflow with Azure Machine Learning to Train and Deploy PyTorch Image Classifier](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-pytorch/train-and-deploy-pytorch.ipynb) | Use MLflow with Azure Machine Learning to train and deploy PyTorch image classifier model | MNIST | Local, AML Compute | Azure Container Instance | PyTorch | mlflow, pytorch |
|
53 |
+
| [Use MLflow projects with Azure Machine Learning to train a model with local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-local/train-projects-local.ipynb) | Use MLflow projects with Azure Machine Learning to train a model using local compute | | Local | | ScikitLearn | mlflow, scikit |
|
54 |
+
| [Use MLflow projects with Azure Machine Learning to train a model](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-projects-remote/train-projects-remote.ipynb) | Use MLflow projects with Azure Machine Learning to train a model using azureml compute | | AML Compute | | Scikit | mlflow, scikit |
|
55 |
+
| [How to use ScriptRun with data input and output](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/work-with-data/datasets-tutorial/scriptrun-with-data-input-output/how-to-use-scriptrun.ipynb) | Demonstrates the use of Scriptrun with datasets | Custom | AML Compute | None | Azure ML | Dataset, ScriptRun |
|
56 |
+
|
57 |
+
## Training
|
58 |
+
|
59 |
+
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
60 |
+
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
61 |
+
| [Train a model with a custom Docker image](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/fastai/fastai-with-custom-docker/fastai-with-custom-docker.ipynb) | Train with custom Docker image | Oxford IIIT Pet | AML Compute | None | Pytorch | None |
|
62 |
+
| [Train a DNN using hyperparameter tuning and deploying with Keras](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/keras/train-hyperparameter-tune-deploy-with-keras/train-hyperparameter-tune-deploy-with-keras.ipynb) | Create a multi-class classifier | MNIST | AML Compute | Azure Container Instance | TensorFlow | None |
|
63 |
+
| [Distributed training with PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-distributeddataparallel/distributed-pytorch-with-distributeddataparallel.ipynb) | Train a model using distributed training via PyTorch DistributedDataParallel | CIFAR-10 | AML Compute | None | PyTorch | None |
|
64 |
+
| [Distributed PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/pytorch/distributed-pytorch-with-horovod/distributed-pytorch-with-horovod.ipynb) | Train a model using the distributed training via Horovod | MNIST | AML Compute | None | PyTorch | None |
|
65 |
+
| [Training with hyperparameter tuning using PyTorch](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/pytorch/train-hyperparameter-tune-deploy-with-pytorch/train-hyperparameter-tune-deploy-with-pytorch.ipynb) | Train an image classification model using transfer learning with the PyTorch estimator | ImageNet | AML Compute | Azure Container Instance | PyTorch | None |
|
66 |
+
| [Training and hyperparameter tuning with Scikit-learn](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train-hyperparameter-tune-deploy-with-sklearn.ipynb) | Train a support vector machine (SVM) to perform classification | Iris | AML Compute | None | Scikit-learn | None |
|
67 |
+
| [Distributed training using TensorFlow with Horovod](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/distributed-tensorflow-with-horovod/distributed-tensorflow-with-horovod.ipynb) | Use the TensorFlow estimator to train a word2vec model | None | AML Compute | None | TensorFlow | None |
|
68 |
+
| [Hyperparameter tuning and warm start using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow/hyperparameter-tune-and-warm-start-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None |
|
69 |
+
| [Training and hyperparameter tuning using the TensorFlow estimator](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/train-hyperparameter-tune-deploy-with-tensorflow/train-hyperparameter-tune-deploy-with-tensorflow.ipynb) | Train a deep neural network | MNIST | AML Compute | Azure Container Instance | TensorFlow | None |
|
70 |
+
| [Resuming a model](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/tensorflow/train-tensorflow-resume-training/train-tensorflow-resume-training.ipynb) | Resume a model in TensorFlow from a previously submitted run | MNIST | AML Compute | None | TensorFlow | None |
|
71 |
+
| [Using Tensorboard](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/tensorboard/export-run-history-to-tensorboard/export-run-history-to-tensorboard.ipynb) | Export the run history as Tensorboard logs | None | None | None | TensorFlow | None |
|
72 |
+
| [Training in Spark](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/train-in-spark/train-in-spark.ipynb) | Submiting a run on a spark cluster | None | HDI cluster | None | PySpark | None |
|
73 |
+
| [Train on Azure Machine Learning Compute](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/train-on-amlcompute/train-on-amlcompute.ipynb) | Submit a run on Azure Machine Learning Compute. | Diabetes | AML Compute | None | None | None |
|
74 |
+
| [Train on local compute](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/train-on-local/train-on-local.ipynb) | Train a model locally | Diabetes | Local | None | None | None |
|
75 |
+
| [Train in a remote Linux virtual machine](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/training/train-on-remote-vm/train-on-remote-vm.ipynb) | Configure and execute a run | Diabetes | Data Science Virtual Machine | None | None | None |
|
76 |
+
| [Managing your training runs](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/manage-runs/manage-runs.ipynb) | Monitor and complete runs | None | Local | None | None | None |
|
77 |
+
| [Tensorboard integration with run history](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/tensorboard/tensorboard/tensorboard.ipynb) | Run a TensorFlow job and view its Tensorboard output live | None | Local, DSVM, AML Compute | None | TensorFlow | None |
|
78 |
+
| [Use MLflow with AML for a local training run](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-local/train-local.ipynb) | Use MLflow tracking APIs together with Azure Machine Learning for storing your metrics and artifacts | Diabetes | Local | None | None | None |
|
79 |
+
| [Use MLflow with AML for a remote training run](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train-remote.ipynb) | Use MLflow tracking APIs together with AML for storing your metrics and artifacts | Diabetes | AML Compute | None | None | None |
|
80 |
+
|
81 |
+
|
82 |
+
## Deployment
|
83 |
+
|
84 |
+
|
85 |
+
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
86 |
+
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
87 |
+
| [Deploy MNIST digit recognition with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-inference-mnist-deploy.ipynb) | Image Classification | MNIST | Local | Azure Container Instance | ONNX | ONNX Model Zoo |
|
88 |
+
| [Deploy Facial Expression Recognition (FER+) with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-inference-facial-expression-recognition-deploy.ipynb) | Facial Expression Recognition | Emotion FER | Local | Azure Container Instance | ONNX | ONNX Model Zoo |
|
89 |
+
| :star:[Register model and deploy as webservice](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-to-cloud/model-register-and-deploy.ipynb) | Deploy a model with Azure Machine Learning | Diabetes | None | Azure Container Instance | Scikit-learn | None |
|
90 |
+
| [Train MNIST in PyTorch, convert, and deploy with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-train-pytorch-aml-deploy-mnist.ipynb) | Image Classification | MNIST | AML Compute | Azure Container Instance | ONNX | ONNX Converter |
|
91 |
+
| [Deploy ResNet50 with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-modelzoo-aml-deploy-resnet50.ipynb) | Image Classification | ImageNet | Local | Azure Container Instance | ONNX | ONNX Model Zoo |
|
92 |
+
| :star:[Convert and deploy TinyYolo with ONNX Runtime](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-convert-aml-deploy-tinyyolo.ipynb) | Object Detection | PASCAL VOC | local | Azure Container Instance | ONNX | ONNX Converter |
|
93 |
+
| [Register Spark model and deploy as webservice](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/spark/model-register-and-deploy-spark.ipynb) | | Iris | None | Azure Container Instance | PySpark | |
|
94 |
+
|
95 |
+
|
96 |
+
## Other Notebooks
|
97 |
+
|Title| Task | Dataset | Training Compute | Deployment Target | ML Framework | Tags |
|
98 |
+
|:----|:-----|:-------:|:----------------:|:-----------------:|:------------:|:------------:|
|
99 |
+
| [DNN Text Featurization](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/classification-text-dnn/auto-ml-classification-text-dnn.ipynb) | Text featurization using DNNs for classification | None | AML Compute | None | None | None |
|
100 |
+
| [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) | | | | | | |
|
101 |
+
| [fairlearn-azureml-mitigation](https://github.com/Azure/MachineLearningNotebooks/blob/master/contrib/fairness/fairlearn-azureml-mitigation.ipynb) | | | | | | |
|
102 |
+
| [upload-fairness-dashboard](https://github.com/Azure/MachineLearningNotebooks/blob/master/contrib/fairness/upload-fairness-dashboard.ipynb) | | | | | | |
|
103 |
+
| [azure-ml-with-nvidia-rapids](https://github.com/Azure/MachineLearningNotebooks/blob/master/contrib/RAPIDS/azure-ml-with-nvidia-rapids.ipynb) | | | | | | |
|
104 |
+
| [auto-ml-continuous-retraining](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb) | | | | | | |
|
105 |
+
| [codegen-for-autofeaturization](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-codegen/codegen-for-autofeaturization.ipynb) | | | | | | |
|
106 |
+
| [custom-model-training-from-autofeaturization-run](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/experimental/autofeaturization-custom-model-training/custom-model-training-from-autofeaturization-run.ipynb) | | | | | | |
|
107 |
+
| [auto-ml-regression-model-proxy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/experimental/regression-model-proxy/auto-ml-regression-model-proxy.ipynb) | | | | | | |
|
108 |
+
| [auto-ml-forecasting-backtest-many-models](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) | | | | | | |
|
109 |
+
| [auto-ml-forecasting-energy-demand](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb) | | | | | | |
|
110 |
+
| [auto-ml-forecasting-github-dau](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-github-dau/auto-ml-forecasting-github-dau.ipynb) | | | | | | |
|
111 |
+
| [auto-ml-forecasting-hierarchical-timeseries](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-hierarchical-timeseries/auto-ml-forecasting-hierarchical-timeseries.ipynb) | | | | | | |
|
112 |
+
| [auto-ml-forecasting-many-models](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-many-models/auto-ml-forecasting-many-models.ipynb) | | | | | | |
|
113 |
+
| [auto-ml-forecasting-univariate-recipe-experiment-settings](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-experiment-settings.ipynb) | | | | | | |
|
114 |
+
| [auto-ml-forecasting-univariate-recipe-run-experiment](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/forecasting-recipes-univariate/auto-ml-forecasting-univariate-recipe-run-experiment.ipynb) | | | | | | |
|
115 |
+
| [auto-ml-regression](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/automated-machine-learning/regression/auto-ml-regression.ipynb) | | | | | | |
|
116 |
+
| [automl-databricks-local-01](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-databricks/automl/automl-databricks-local-01.ipynb) | | | | | | |
|
117 |
+
| [automl-databricks-local-with-deployment](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-databricks/automl/automl-databricks-local-with-deployment.ipynb) | | | | | | |
|
118 |
+
| [spark_job_on_synapse_spark_pool](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-synapse/spark_job_on_synapse_spark_pool.ipynb) | | | | | | |
|
119 |
+
| [spark_session_on_synapse_spark_pool](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-synapse/spark_session_on_synapse_spark_pool.ipynb) | | | | | | |
|
120 |
+
| [Synapse_Job_Scala_Support](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-synapse/Synapse_Job_Scala_Support.ipynb) | | | | | | |
|
121 |
+
| [Synapse_Session_Scala_Support](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/azure-synapse/Synapse_Session_Scala_Support.ipynb) | | | | | | |
|
122 |
+
| [multi-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-multi-model/multi-model-register-and-deploy.ipynb) | | | | | | |
|
123 |
+
| [register-model-deploy-local-advanced](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/deploy-to-local/register-model-deploy-local-advanced.ipynb) | | | | | | |
|
124 |
+
| [enable-app-insights-in-production-service](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/enable-app-insights-in-production-service/enable-app-insights-in-production-service.ipynb) | | | | | | |
|
125 |
+
| [onnx-model-register-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/onnx/onnx-model-register-and-deploy.ipynb) | | | | | | |
|
126 |
+
| [production-deploy-to-aks-ssl](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks-ssl.ipynb) | | | | | | |
|
127 |
+
| [production-deploy-to-aks](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/production-deploy-to-aks/production-deploy-to-aks.ipynb) | | | | | | |
|
128 |
+
| [production-deploy-to-aks-gpu](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/deployment/production-deploy-to-aks-gpu/production-deploy-to-aks-gpu.ipynb) | | | | | | |
|
129 |
+
| [train-explain-model-gpu-tree-explainer](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/gpu-explanation/train-explain-model-gpu-tree-explainer.ipynb) | | | | | | |
|
130 |
+
| [explain-model-on-amlcompute](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/remote-explanation/explain-model-on-amlcompute.ipynb) | | | | | | |
|
131 |
+
| [save-retrieve-explanations-run-history](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/run-history/save-retrieve-explanations-run-history.ipynb) | | | | | | |
|
132 |
+
| [train-explain-model-locally-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-locally-and-deploy.ipynb) | | | | | | |
|
133 |
+
| [train-explain-model-on-amlcompute-and-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/explain-model/azure-integration/scoring-time/train-explain-model-on-amlcompute-and-deploy.ipynb) | | | | | | |
|
134 |
+
| [training_notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/notebook_runner/training_notebook.ipynb) | | | | | | |
|
135 |
+
| [nyc-taxi-data-regression-model-building](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb) | | | | | | |
|
136 |
+
| [authentication-in-azureml](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azureml.ipynb) | | | | | | |
|
137 |
+
| [pong_rllib](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/reinforcement-learning/atari-on-distributed-compute/pong_rllib.ipynb) | | | | | | |
|
138 |
+
| [cartpole_ci](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/reinforcement-learning/cartpole-on-compute-instance/cartpole_ci.ipynb) | | | | | | |
|
139 |
+
| [cartpole_sc](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/reinforcement-learning/cartpole-on-single-compute/cartpole_sc.ipynb) | | | | | | |
|
140 |
+
| [rai-loan-decision](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.ipynb) | | | | | | |
|
141 |
+
| [Logging APIs](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/track-and-monitor-experiments/logging-api/logging-api.ipynb) | Logging APIs and analyzing results | None | None | None | None | None |
|
142 |
+
| [configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/setup-environment/configuration.ipynb) | | | | | | |
|
143 |
+
| [quickstart-azureml-automl](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/compute-instance-quickstarts/quickstart-azureml-automl/quickstart-azureml-automl.ipynb) | | | | | | |
|
144 |
+
| [quickstart-azureml-in-10mins](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/compute-instance-quickstarts/quickstart-azureml-in-10mins/quickstart-azureml-in-10mins.ipynb) | | | | | | |
|
145 |
+
| [quickstart-azureml-python-sdk](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/compute-instance-quickstarts/quickstart-azureml-python-sdk/quickstart-azureml-python-sdk.ipynb) | | | | | | |
|
146 |
+
| [tutorial-1st-experiment-sdk-train](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/create-first-ml-experiment/tutorial-1st-experiment-sdk-train.ipynb) | | | | | | |
|
147 |
+
| [img-classification-part1-training](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/image-classification-mnist-data/img-classification-part1-training.ipynb) | | | | | | |
|
148 |
+
| [img-classification-part2-deploy](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/image-classification-mnist-data/img-classification-part2-deploy.ipynb) | | | | | | |
|
149 |
+
| [img-classification-part3-deploy-encrypted](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/image-classification-mnist-data/img-classification-part3-deploy-encrypted.ipynb) | | | | | | |
|
150 |
+
| [tutorial-pipeline-batch-scoring-classification](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb) | | | | | | |
|
151 |
+
| [regression-automated-ml](https://github.com/Azure/MachineLearningNotebooks/blob/master/tutorials/regression-automl-nyc-taxi-data/regression-automated-ml.ipynb) | | | | | | |
|
license_header.txt
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2021-present, the Recognai S.L. team.
|
2 |
+
|
3 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
you may not use this file except in compliance with the License.
|
5 |
+
You may obtain a copy of the License at
|
6 |
+
|
7 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
|
9 |
+
Unless required by applicable law or agreed to in writing, software
|
10 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
See the License for the specific language governing permissions and
|
13 |
+
limitations under the License.
|
logging_utilities.py
ADDED
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""Logging Utitilies."""
|
5 |
+
import logging
|
6 |
+
import platform
|
7 |
+
import constants
|
8 |
+
import uuid
|
9 |
+
import json
|
10 |
+
import azureml.core
|
11 |
+
import traceback
|
12 |
+
|
13 |
+
from azureml.telemetry.logging_handler import get_appinsights_log_handler
|
14 |
+
from azureml.telemetry import INSTRUMENTATION_KEY
|
15 |
+
from azureml.exceptions import AzureMLException
|
16 |
+
from run_utils import TestRun
|
17 |
+
from typing import Tuple, Union
|
18 |
+
|
19 |
+
NON_PII_MESSAGE = '[Hidden as it may contain PII]'
|
20 |
+
|
21 |
+
|
22 |
+
class AppInsightsPIIStrippingFormatter(logging.Formatter):
|
23 |
+
"""Formatter for App Insights Logging.
|
24 |
+
|
25 |
+
Args:
|
26 |
+
logging (_type_): _description_
|
27 |
+
"""
|
28 |
+
|
29 |
+
def format(self, record: logging.LogRecord) -> str:
|
30 |
+
"""Format incoming log record.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
record (logging.LogRecord): _description_
|
34 |
+
|
35 |
+
Returns:
|
36 |
+
str: _description_
|
37 |
+
"""
|
38 |
+
exception_tb = getattr(record, 'exception_tb_obj', None)
|
39 |
+
if exception_tb is None:
|
40 |
+
return super().format(record)
|
41 |
+
|
42 |
+
not_available_message = '[Not available]'
|
43 |
+
|
44 |
+
properties = getattr(record, 'properties', {})
|
45 |
+
|
46 |
+
message = properties.get('exception_message', NON_PII_MESSAGE)
|
47 |
+
traceback_msg = properties.get('exception_traceback', not_available_message)
|
48 |
+
|
49 |
+
record.message = record.msg = '\n'.join([
|
50 |
+
'Type: {}'.format(properties.get('error_type', constants.ExceptionTypes.Unclassified)),
|
51 |
+
'Class: {}'.format(properties.get('exception_class', not_available_message)),
|
52 |
+
'Message: {}'.format(message),
|
53 |
+
'Traceback: {}'.format(traceback_msg),
|
54 |
+
'ExceptionTarget: {}'.format(properties.get('exception_target', not_available_message))
|
55 |
+
])
|
56 |
+
|
57 |
+
# Update exception message and traceback in extra properties as well
|
58 |
+
properties['exception_message'] = message
|
59 |
+
|
60 |
+
return super().format(record)
|
61 |
+
|
62 |
+
|
63 |
+
class CustomDimensions:
|
64 |
+
"""Custom Dimensions Class for App Insights."""
|
65 |
+
|
66 |
+
def __init__(self,
|
67 |
+
run_details,
|
68 |
+
app_name=constants.TelemetryConstants.COMPONENT_NAME,
|
69 |
+
model_evaluation_version="0.0.10",
|
70 |
+
os_info=platform.system(),
|
71 |
+
task_type="") -> None:
|
72 |
+
"""__init__.
|
73 |
+
|
74 |
+
Args:
|
75 |
+
run_details (_type_, optional): _description_. Defaults to None.
|
76 |
+
app_name (_type_, optional): _description_. Defaults to constants.TelemetryConstants.COMPONENT_NAME.
|
77 |
+
model_evaluation_version (str, optional): _description_. Defaults to "0.0.10".
|
78 |
+
os_info (_type_, optional): _description_. Defaults to platform.system().
|
79 |
+
task_type (str, optional): _description_. Defaults to "".
|
80 |
+
"""
|
81 |
+
self.app_name = app_name
|
82 |
+
self.run_id = run_details.run.id
|
83 |
+
self.common_core_version = azureml.core.__version__
|
84 |
+
self.compute_target = run_details.compute
|
85 |
+
self.experiment_id = run_details.experiment.id
|
86 |
+
self.parent_run_id = run_details.parent_run.id
|
87 |
+
self.root_run_id = run_details.root_run.id
|
88 |
+
self.os_info = os_info
|
89 |
+
self.region = run_details.region
|
90 |
+
self.subscription_id = run_details.subscription
|
91 |
+
self.task_type = task_type
|
92 |
+
self.rootAttribution = run_details.root_attribute
|
93 |
+
run_info = run_details.get_extra_run_info
|
94 |
+
self.moduleVersion = run_info.get("moduleVersion", model_evaluation_version)
|
95 |
+
self.location = run_info.get("location", "")
|
96 |
+
|
97 |
+
if run_info.get("model_asset_id", None):
|
98 |
+
self.model_asset_id = run_info.get("model_asset_id")
|
99 |
+
if run_info.get("model_source", None):
|
100 |
+
self.model_source = run_info.get("model_source")
|
101 |
+
if run_info.get("model_registry_name", None):
|
102 |
+
self.model_registry_name = run_info.get("model_registry_name")
|
103 |
+
if run_info.get("model_name", None):
|
104 |
+
self.model_name = run_info.get("model_name")
|
105 |
+
if run_info.get("model_version", None):
|
106 |
+
self.model_version = run_info.get("model_version")
|
107 |
+
|
108 |
+
if run_info.get("pipeline_type", None):
|
109 |
+
self.pipeline_type = run_info.get("pipeline_type")
|
110 |
+
if run_info.get("source", None):
|
111 |
+
self.source = run_info.get("source")
|
112 |
+
if self.task_type == "":
|
113 |
+
import sys
|
114 |
+
args = sys.argv
|
115 |
+
if "--task" in args:
|
116 |
+
ind = args.index("--task")
|
117 |
+
self.task_type = sys.argv[ind + 1]
|
118 |
+
|
119 |
+
|
120 |
+
current_run = TestRun()
|
121 |
+
custom_dimensions = CustomDimensions(current_run)
|
122 |
+
|
123 |
+
|
124 |
+
class ModelEvaluationHandler(logging.StreamHandler):
|
125 |
+
"""Remote/Local Run Logging Handler.
|
126 |
+
|
127 |
+
Args:
|
128 |
+
logging (_type_): _description_
|
129 |
+
"""
|
130 |
+
|
131 |
+
def emit(self, record: logging.LogRecord) -> None:
|
132 |
+
"""Log record to output stream.
|
133 |
+
|
134 |
+
Args:
|
135 |
+
record (logging.LogRecord): _description_
|
136 |
+
"""
|
137 |
+
new_properties = getattr(record, "properties", {})
|
138 |
+
new_properties.update({'log_id': str(uuid.uuid4())})
|
139 |
+
custom_dims_dict = vars(custom_dimensions)
|
140 |
+
cust_dim_copy = custom_dims_dict.copy()
|
141 |
+
cust_dim_copy.update(new_properties)
|
142 |
+
setattr(record, "properties", cust_dim_copy)
|
143 |
+
msg = self.format(record)
|
144 |
+
if record.levelname == 'ERROR' and 'AzureMLException' not in record.message:
|
145 |
+
setattr(record, "exception_tb_obj", "non-azureml exception raised so scrubbing")
|
146 |
+
stream = self.stream
|
147 |
+
stream.write(msg)
|
148 |
+
|
149 |
+
|
150 |
+
def get_logger(logging_level: str = 'DEBUG',
|
151 |
+
custom_dimensions: dict = vars(custom_dimensions),
|
152 |
+
name: str = constants.TelemetryConstants.LOGGER_NAME):
|
153 |
+
"""Get logger.
|
154 |
+
|
155 |
+
Args:
|
156 |
+
logging_level (str, optional): _description_. Defaults to 'DEBUG'.
|
157 |
+
custom_dimensions (dict, optional): _description_. Defaults to vars(custom_dimensions).
|
158 |
+
name (str, optional): _description_. Defaults to constants.TelemetryConstants.LOGGER_NAME.
|
159 |
+
|
160 |
+
Raises:
|
161 |
+
ValueError: _description_
|
162 |
+
|
163 |
+
Returns:
|
164 |
+
_type_: _description_
|
165 |
+
"""
|
166 |
+
numeric_log_level = getattr(logging, logging_level.upper(), None)
|
167 |
+
if not isinstance(numeric_log_level, int):
|
168 |
+
raise ValueError('Invalid log level: %s' % logging_level)
|
169 |
+
|
170 |
+
logger = logging.getLogger(name)
|
171 |
+
logger.propagate = True
|
172 |
+
logger.setLevel(numeric_log_level)
|
173 |
+
handler_names = [handler.get_name() for handler in logger.handlers]
|
174 |
+
|
175 |
+
run_id = custom_dimensions["run_id"]
|
176 |
+
app_name = constants.TelemetryConstants.COMPONENT_NAME
|
177 |
+
|
178 |
+
if (constants.TelemetryConstants.MODEL_EVALUATION_HANDLER_NAME not in handler_names):
|
179 |
+
formatter = logging.Formatter(
|
180 |
+
'%(asctime)s [{}] [{}] [%(module)s] %(funcName)s +%(lineno)s: %(levelname)-8s \
|
181 |
+
[%(process)d] %(message)s \n'.format(app_name, run_id)
|
182 |
+
)
|
183 |
+
stream_handler = ModelEvaluationHandler()
|
184 |
+
stream_handler.setFormatter(formatter)
|
185 |
+
stream_handler.setLevel(numeric_log_level)
|
186 |
+
stream_handler.set_name(constants.TelemetryConstants.MODEL_EVALUATION_HANDLER_NAME)
|
187 |
+
logger.addHandler(stream_handler)
|
188 |
+
|
189 |
+
if (constants.TelemetryConstants.APP_INSIGHT_HANDLER_NAME not in handler_names):
|
190 |
+
child_namespace = __name__
|
191 |
+
current_logger = logging.getLogger("azureml.telemetry").getChild(child_namespace)
|
192 |
+
current_logger.propagate = False
|
193 |
+
current_logger.setLevel(logging.CRITICAL)
|
194 |
+
appinsights_handler = get_appinsights_log_handler(
|
195 |
+
instrumentation_key=INSTRUMENTATION_KEY,
|
196 |
+
logger=current_logger, properties=custom_dimensions
|
197 |
+
)
|
198 |
+
formatter = AppInsightsPIIStrippingFormatter(
|
199 |
+
fmt='%(asctime)s [{}] [{}] [%(module)s] %(funcName)s +%(lineno)s: %(levelname)-8s \
|
200 |
+
[%(process)d] %(message)s \n'.format(app_name, run_id)
|
201 |
+
)
|
202 |
+
appinsights_handler.setFormatter(formatter)
|
203 |
+
appinsights_handler.setLevel(numeric_log_level)
|
204 |
+
appinsights_handler.set_name(constants.TelemetryConstants.APP_INSIGHT_HANDLER_NAME)
|
205 |
+
logger.addHandler(appinsights_handler)
|
206 |
+
|
207 |
+
return logger
|
208 |
+
|
209 |
+
|
210 |
+
def _get_error_details(
|
211 |
+
exception: BaseException, logger: Union[logging.Logger, logging.LoggerAdapter]
|
212 |
+
) -> Tuple[str, str, str]:
|
213 |
+
"""
|
214 |
+
Extract the error details from the base exception.
|
215 |
+
|
216 |
+
For exceptions outside AzureML (e.g. Python errors), all properties are set as 'Unclassified'
|
217 |
+
|
218 |
+
:param exception: The exception from which to extract the error details
|
219 |
+
:param logger: The logger object to log to
|
220 |
+
:return: An error code, error type (i.e. UserError or SystemError) and exception's target
|
221 |
+
"""
|
222 |
+
default_target = "Unspecified"
|
223 |
+
error_code = constants.ExceptionTypes.Unclassified
|
224 |
+
error_type = constants.ExceptionTypes.Unclassified
|
225 |
+
exception_target = default_target
|
226 |
+
|
227 |
+
if isinstance(exception, AzureMLException):
|
228 |
+
try:
|
229 |
+
serialized_ex = json.loads(exception._serialize_json())
|
230 |
+
error = serialized_ex.get(
|
231 |
+
"error", {"code": constants.ExceptionTypes.Unclassified, "inner_error": {}, "target": default_target}
|
232 |
+
)
|
233 |
+
|
234 |
+
# This would be the complete hierarchy of the error
|
235 |
+
error_code = str(error.get("inner_error", constants.ExceptionTypes.Unclassified))
|
236 |
+
|
237 |
+
# This is one of 'UserError' or 'SystemError'
|
238 |
+
error_type = error.get("code")
|
239 |
+
|
240 |
+
exception_target = error.get("target")
|
241 |
+
return error_code, error_type, exception_target
|
242 |
+
except Exception:
|
243 |
+
logger.warning(
|
244 |
+
"Failed to parse error details while logging traceback from exception of type {}".format(exception)
|
245 |
+
)
|
246 |
+
|
247 |
+
return error_code, error_type, exception_target
|
248 |
+
|
249 |
+
|
250 |
+
def log_traceback(exception: AzureMLException, logger, message=None, is_critical=False):
|
251 |
+
"""Log exceptions without PII in APP Insights and full tracebacks in logger.
|
252 |
+
|
253 |
+
Args:
|
254 |
+
exception (_type_): _description_
|
255 |
+
logger (_type_): _description_
|
256 |
+
message (_type_): _description_
|
257 |
+
is_critical (bool, optional): _description_. Defaults to False.
|
258 |
+
"""
|
259 |
+
if message is None:
|
260 |
+
message = exception.message
|
261 |
+
exception_class_name = exception.__class__.__name__
|
262 |
+
|
263 |
+
error_code, error_type, exception_target = _get_error_details(exception, logger)
|
264 |
+
traceback_obj = exception.__traceback__
|
265 |
+
traceback_message = message
|
266 |
+
if traceback_obj is None:
|
267 |
+
if getattr(traceback_obj, "inner_exception", None):
|
268 |
+
traceback_obj = exception.inner_exception.__traceback__
|
269 |
+
if traceback_obj is not None:
|
270 |
+
traceback_message = "\n".join(traceback.format_tb(traceback_obj))
|
271 |
+
logger_message = "\n".join([
|
272 |
+
"Type: {}".format(error_type),
|
273 |
+
"Code: {}".format(error_code),
|
274 |
+
"Class: {}".format(exception_class_name),
|
275 |
+
"Message: {}".format(message),
|
276 |
+
"Traceback: {}".format(traceback_message),
|
277 |
+
"ExceptionTarget: {}".format(exception_target)
|
278 |
+
])
|
279 |
+
|
280 |
+
extra = {
|
281 |
+
"properties": {
|
282 |
+
"error_code": error_code,
|
283 |
+
"error_type": error_type,
|
284 |
+
"exception_class": exception_class_name,
|
285 |
+
"exception_message": message,
|
286 |
+
"exception_traceback": traceback_message,
|
287 |
+
"exception_target": exception_target,
|
288 |
+
},
|
289 |
+
"exception_tb_obj": traceback_obj,
|
290 |
+
}
|
291 |
+
|
292 |
+
logger.error(logger_message, extra=extra)
|
model_prediction.py
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT License.
|
3 |
+
|
4 |
+
"""Entry script for Model Evaluation Component."""
|
5 |
+
|
6 |
+
import azureml.evaluate.mlflow as aml_mlflow
|
7 |
+
import pandas as pd
|
8 |
+
import constants
|
9 |
+
import torch
|
10 |
+
import ast
|
11 |
+
from itertools import repeat
|
12 |
+
|
13 |
+
from exceptions import (
|
14 |
+
ModelValidationException,
|
15 |
+
DataLoaderException,
|
16 |
+
PredictException,
|
17 |
+
ModelEvaluationException,
|
18 |
+
swallow_all_exceptions
|
19 |
+
)
|
20 |
+
from error_definitions import ModelPredictionInternalError, BadModel, BadInputData
|
21 |
+
from logging_utilities import custom_dimensions, current_run, get_logger, log_traceback
|
22 |
+
from azureml.telemetry.activity import log_activity
|
23 |
+
from image_constants import ImageDataFrameParams
|
24 |
+
from utils import (
|
25 |
+
ArgumentParser,
|
26 |
+
check_and_return_if_mltable,
|
27 |
+
get_predictor,
|
28 |
+
read_data,
|
29 |
+
prepare_data,
|
30 |
+
filter_pipeline_params
|
31 |
+
)
|
32 |
+
from validation import _validate, validate_args
|
33 |
+
from azureml._common._error_definition.azureml_error import AzureMLError
|
34 |
+
|
35 |
+
logger = get_logger(name=__name__)
|
36 |
+
custom_dimensions.app_name = constants.TelemetryConstants.MODEL_PREDICTION_NAME
|
37 |
+
# current_run = TestRun()
|
38 |
+
test_run = current_run.run
|
39 |
+
root_run = current_run.root_run
|
40 |
+
ws = current_run.workspace
|
41 |
+
aml_mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
|
42 |
+
custom_dims_dict = vars(custom_dimensions)
|
43 |
+
|
44 |
+
|
45 |
+
class ModelPredictionRunner:
|
46 |
+
"""Main Class for Inferencing all tasks modes."""
|
47 |
+
|
48 |
+
def __init__(self, model_uri, task, device,
|
49 |
+
batch_size, config):
|
50 |
+
"""__init__.
|
51 |
+
|
52 |
+
Args:
|
53 |
+
model_uri (_type_): _description_
|
54 |
+
task (_type_): _description_
|
55 |
+
device (_type_): _description_
|
56 |
+
|
57 |
+
Raises:
|
58 |
+
ModelValidationException: _description_
|
59 |
+
"""
|
60 |
+
self.model_uri = model_uri
|
61 |
+
self.task = task
|
62 |
+
self.config = config
|
63 |
+
self.multilabel = task in constants.MULTILABEL_SET
|
64 |
+
|
65 |
+
self.device = device
|
66 |
+
if device == constants.DEVICE.CPU:
|
67 |
+
self.device = -1
|
68 |
+
elif device == constants.DEVICE.GPU:
|
69 |
+
if torch.cuda.is_available():
|
70 |
+
self.device = torch.cuda.current_device()
|
71 |
+
else:
|
72 |
+
logger.warning("No GPU found. Using CPU instead.")
|
73 |
+
self.device = -1
|
74 |
+
self.batch_size = batch_size
|
75 |
+
self.masks_required = True if task == constants.TASK.IMAGE_INSTANCE_SEGMENTATION else False
|
76 |
+
|
77 |
+
with log_activity(logger, constants.TelemetryConstants.LOAD_MODEL,
|
78 |
+
custom_dimensions=custom_dims_dict):
|
79 |
+
logger.info("Loading model.")
|
80 |
+
try:
|
81 |
+
predictor_cls = get_predictor(self.task)
|
82 |
+
self.predictor = predictor_cls(self.model_uri, self.task, self.device)
|
83 |
+
logger.info(
|
84 |
+
f"Model loaded, Device: {getattr(self.predictor.model, 'device', 'not present')}")
|
85 |
+
except Exception as e:
|
86 |
+
exception = ModelValidationException._with_error(
|
87 |
+
AzureMLError.create(BadModel, error=repr(e)),
|
88 |
+
inner_exception=e
|
89 |
+
)
|
90 |
+
log_traceback(exception, logger)
|
91 |
+
raise exception
|
92 |
+
|
93 |
+
def load_data(self, test_data, label_column_name=None, input_column_names=None, is_mltable=True):
|
94 |
+
"""Load Data.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
test_data (_type_): _description_
|
98 |
+
label_column_name (_type_, optional): _description_. Defaults to None.
|
99 |
+
input_column_names (list): Name of input column names
|
100 |
+
|
101 |
+
Raises:
|
102 |
+
DataLoaderException: _description_
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
_type_: _description_
|
106 |
+
"""
|
107 |
+
if self.task in constants.IMAGE_TASKS:
|
108 |
+
from image_dataset import get_image_dataset
|
109 |
+
df = get_image_dataset(task_type=self.task, test_mltable=test_data)
|
110 |
+
data = iter([df])
|
111 |
+
input_column_names = [ImageDataFrameParams.IMAGE_COLUMN_NAME]
|
112 |
+
label_column_name = ImageDataFrameParams.LABEL_COLUMN_NAME
|
113 |
+
if self.task in [constants.TASK.IMAGE_OBJECT_DETECTION,
|
114 |
+
constants.TASK.IMAGE_INSTANCE_SEGMENTATION]:
|
115 |
+
input_column_names.append(ImageDataFrameParams.IMAGE_META_INFO)
|
116 |
+
else:
|
117 |
+
data = read_data(test_data, is_mltable)
|
118 |
+
data = map(_validate, data, repeat(input_column_names), repeat(label_column_name))
|
119 |
+
data = map(prepare_data, data, repeat(self.task), repeat(label_column_name))
|
120 |
+
return data # X_test, y_test
|
121 |
+
|
122 |
+
def predict(self, data, label_column_name):
|
123 |
+
"""Predict.
|
124 |
+
|
125 |
+
Args:
|
126 |
+
data (_type_): _description_
|
127 |
+
label_column_name (_type_): _description_
|
128 |
+
|
129 |
+
Returns:
|
130 |
+
_type_: _description_
|
131 |
+
"""
|
132 |
+
predictions, pred_probas, y_test = pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
|
133 |
+
|
134 |
+
for idx, (X_test, y_test_chunk) in enumerate(data):
|
135 |
+
logger.info("batch: " + str(idx))
|
136 |
+
y_transformer = None
|
137 |
+
|
138 |
+
pred_probas_chunk = None
|
139 |
+
pipeline_params = filter_pipeline_params(self.config)
|
140 |
+
torch_error_message = "Model prediction Failed.\nPossible Reason:\n" \
|
141 |
+
"1. Your input text exceeds max length of model.\n" \
|
142 |
+
"\t\tYou can either keep truncation=True in tokenizer while logging model.\n" \
|
143 |
+
"\t\tOr you can pass tokenizer_config in evaluation_config.\n" \
|
144 |
+
"2. Your tokenizer's vocab size doesn't match with model's vocab size.\n" \
|
145 |
+
"\t\tTo fix this check your model/tokenizer config.\n" \
|
146 |
+
"3. If it is Cuda Assertion Error, check your test data." \
|
147 |
+
"Whether that input can be passed directly to model or not."
|
148 |
+
try:
|
149 |
+
if self.task == constants.TASK.TRANSLATION:
|
150 |
+
source_lang = self.config.get("source_lang", None)
|
151 |
+
target_lang = self.config.get("target_lang", None)
|
152 |
+
predictions_chunk = self.predictor.predict(X_test, y_transformer=y_transformer,
|
153 |
+
multilabel=self.multilabel,
|
154 |
+
source_lang=source_lang, target_lang=target_lang)
|
155 |
+
else:
|
156 |
+
# batching is handled in mlflow predict for image tasks.
|
157 |
+
if self.task in constants.IMAGE_TASKS:
|
158 |
+
pipeline_params.update(self.config)
|
159 |
+
if self.batch_size:
|
160 |
+
pipeline_params.update({"batch_size": self.batch_size})
|
161 |
+
predictions_chunk = self.predictor.predict(X_test, y_transformer=y_transformer,
|
162 |
+
multilabel=self.multilabel,
|
163 |
+
**pipeline_params)
|
164 |
+
if self.task in constants.CLASSIFICATION_SET:
|
165 |
+
pred_probas_chunk = self.predictor.predict_proba(X_test, y_transformer=y_transformer,
|
166 |
+
multilabel=self.multilabel,
|
167 |
+
**pipeline_params)
|
168 |
+
|
169 |
+
if not isinstance(predictions_chunk, pd.DataFrame):
|
170 |
+
predictions_df = pd.DataFrame()
|
171 |
+
predictions_df["predictions"] = predictions_chunk
|
172 |
+
predictions_chunk = predictions_df
|
173 |
+
if pred_probas_chunk is not None and not isinstance(pred_probas_chunk, pd.DataFrame):
|
174 |
+
pred_probas_chunk = pd.DataFrame(pred_probas_chunk)
|
175 |
+
if y_test_chunk is not None:
|
176 |
+
y_test_chunk = pd.DataFrame(y_test_chunk, index=X_test.index, columns=[label_column_name])
|
177 |
+
if isinstance(y_test_chunk[label_column_name].iloc[0], str) \
|
178 |
+
and self.task in constants.MULTIPLE_OUTPUTS_SET:
|
179 |
+
y_test_chunk[label_column_name] = y_test_chunk[label_column_name].apply(
|
180 |
+
lambda x: ast.literal_eval(x)
|
181 |
+
)
|
182 |
+
else:
|
183 |
+
y_test_chunk = pd.DataFrame({})
|
184 |
+
predictions_chunk.index = X_test.index
|
185 |
+
|
186 |
+
if pred_probas_chunk is not None:
|
187 |
+
pred_probas_chunk.index = X_test.index
|
188 |
+
else:
|
189 |
+
pred_probas_chunk = pd.DataFrame({})
|
190 |
+
predictions = pd.concat([predictions, predictions_chunk], axis=0)
|
191 |
+
pred_probas = pd.concat([pred_probas, pred_probas_chunk], axis=0)
|
192 |
+
y_test = pd.concat([y_test, y_test_chunk], axis=0)
|
193 |
+
if self.task in [constants.TASK.IMAGE_OBJECT_DETECTION, constants.TASK.IMAGE_INSTANCE_SEGMENTATION]:
|
194 |
+
y_test["image_meta_info"] = X_test["image_meta_info"]
|
195 |
+
|
196 |
+
except Exception as e:
|
197 |
+
if isinstance(e, ModelEvaluationException):
|
198 |
+
exception = e
|
199 |
+
else:
|
200 |
+
exception = PredictException._with_error(
|
201 |
+
AzureMLError.create(ModelPredictionInternalError, error=repr(e)),
|
202 |
+
inner_exception=e
|
203 |
+
)
|
204 |
+
if type(e) in [IndexError, RuntimeError]:
|
205 |
+
log_traceback(exception, logger, torch_error_message)
|
206 |
+
else:
|
207 |
+
log_traceback(exception, logger)
|
208 |
+
raise exception
|
209 |
+
|
210 |
+
return predictions, pred_probas, y_test
|
211 |
+
|
212 |
+
|
213 |
+
@swallow_all_exceptions(logger)
|
214 |
+
def run():
|
215 |
+
"""Entry function of model_test script."""
|
216 |
+
parser = ArgumentParser()
|
217 |
+
# Inputs
|
218 |
+
parser.add_argument("--task", type=str, dest="task", required=True, choices=constants.ALL_TASKS)
|
219 |
+
parser.add_argument("--data", type=str, dest="data", required=True)
|
220 |
+
parser.add_argument("--mlflow-model", type=str, dest="mlflow_model", required=True)
|
221 |
+
parser.add_argument("--label-column-name", type=str, dest="label_column_name", required=False, default=None)
|
222 |
+
parser.add_argument("--input-column-names",
|
223 |
+
type=lambda x: [i.strip() for i in x.split(",") if i and not i.isspace()],
|
224 |
+
dest="input_column_names", required=False, default=None)
|
225 |
+
parser.add_argument("--config-file-name", type=str, dest="config_file_name", required=False, default=None)
|
226 |
+
parser.add_argument("--config_str", type=str, dest="config_str", required=False, default=None)
|
227 |
+
|
228 |
+
parser.add_argument("--model-uri", type=str, dest="model_uri", required=False, default="")
|
229 |
+
parser.add_argument("--device", type=str, dest="device", required=True, choices=constants.ALL_DEVICES,
|
230 |
+
default=constants.DEVICE.AUTO)
|
231 |
+
parser.add_argument("--batch-size", type=int, dest="batch_size", required=False, default=None)
|
232 |
+
|
233 |
+
# Outputs
|
234 |
+
parser.add_argument("--predictions", type=str, dest="predictions")
|
235 |
+
parser.add_argument("--prediction-probabilities", type=str, dest="prediction_probabilities",
|
236 |
+
required=False, default=None)
|
237 |
+
parser.add_argument("--ground-truth", type=str, dest="ground_truth", required=False, default=None)
|
238 |
+
|
239 |
+
args, unknown_args_ = parser.parse_known_args()
|
240 |
+
|
241 |
+
with log_activity(logger, constants.TelemetryConstants.VALIDATION_NAME,
|
242 |
+
custom_dimensions=custom_dims_dict):
|
243 |
+
logger.info("Validating arguments: " + repr(args.__dict__))
|
244 |
+
validate_args(args)
|
245 |
+
|
246 |
+
model_uri = args.model_uri.strip()
|
247 |
+
mlflow_model = args.mlflow_model
|
248 |
+
if mlflow_model:
|
249 |
+
model_uri = mlflow_model
|
250 |
+
|
251 |
+
runner = ModelPredictionRunner(
|
252 |
+
task=args.task,
|
253 |
+
model_uri=model_uri,
|
254 |
+
device=args.device,
|
255 |
+
batch_size=args.batch_size,
|
256 |
+
config=args.config
|
257 |
+
)
|
258 |
+
|
259 |
+
with log_activity(logger, constants.TelemetryConstants.DATA_LOADING,
|
260 |
+
custom_dimensions=custom_dims_dict):
|
261 |
+
logger.info("Loading Data.")
|
262 |
+
try:
|
263 |
+
is_mltable = check_and_return_if_mltable(args.data)
|
264 |
+
data = runner.load_data(args.data, args.label_column_name, args.input_column_names, is_mltable)
|
265 |
+
except Exception as e:
|
266 |
+
exception = DataLoaderException._with_error(
|
267 |
+
AzureMLError.create(BadInputData, error=repr(e)),
|
268 |
+
inner_exception=e
|
269 |
+
)
|
270 |
+
log_traceback(exception, logger)
|
271 |
+
raise exception
|
272 |
+
|
273 |
+
with log_activity(logger, constants.TelemetryConstants.PREDICT_NAME,
|
274 |
+
custom_dimensions=custom_dims_dict):
|
275 |
+
logger.info("Model Prediction.")
|
276 |
+
preds, pred_probas, ground_truth = runner.predict(data, args.label_column_name)
|
277 |
+
|
278 |
+
logger.info("Saving outputs.")
|
279 |
+
preds.to_json(args.predictions, orient="records", lines=True)
|
280 |
+
if pred_probas is not None:
|
281 |
+
pred_probas.to_json(args.prediction_probabilities, orient="records", lines=True)
|
282 |
+
if ground_truth is not None:
|
283 |
+
ground_truth.to_json(args.ground_truth, orient="records", lines=True)
|
284 |
+
try:
|
285 |
+
root_run.add_properties(properties=constants.ROOT_RUN_PROPERTIES)
|
286 |
+
except Exception:
|
287 |
+
logger.info("PipelineType is already a property at Root Pipeline Run.")
|
288 |
+
test_run.complete()
|
289 |
+
return
|
290 |
+
|
291 |
+
|
292 |
+
if __name__ in "__main__":
|
293 |
+
run()
|
modeling_chatglm.py
ADDED
@@ -0,0 +1,1197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" PyTorch ChatGLM model. """
|
2 |
+
|
3 |
+
import math
|
4 |
+
import copy
|
5 |
+
import warnings
|
6 |
+
import sys
|
7 |
+
import torch
|
8 |
+
import torch.utils.checkpoint
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from torch import nn
|
11 |
+
from torch.nn import CrossEntropyLoss, LayerNorm
|
12 |
+
from torch.nn.utils import skip_init
|
13 |
+
from typing import Optional, Tuple, Union, List, Callable, Dict, Any
|
14 |
+
|
15 |
+
from transformers.modeling_outputs import (
|
16 |
+
BaseModelOutputWithPast,
|
17 |
+
CausalLMOutputWithPast,
|
18 |
+
)
|
19 |
+
|
20 |
+
from transformers.modeling_utils import PreTrainedModel
|
21 |
+
from transformers.utils import logging
|
22 |
+
from transformers.generation.logits_process import LogitsProcessor
|
23 |
+
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
|
24 |
+
|
25 |
+
from .configuration_chatglm import ChatGLMConfig
|
26 |
+
|
27 |
+
# flags required to enable jit fusion kernels
|
28 |
+
|
29 |
+
if sys.platform != 'darwin':
|
30 |
+
torch._C._jit_set_profiling_mode(False)
|
31 |
+
torch._C._jit_set_profiling_executor(False)
|
32 |
+
torch._C._jit_override_can_fuse_on_cpu(True)
|
33 |
+
torch._C._jit_override_can_fuse_on_gpu(True)
|
34 |
+
|
35 |
+
logger = logging.get_logger(__name__)
|
36 |
+
|
37 |
+
_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM2-6B"
|
38 |
+
_CONFIG_FOR_DOC = "ChatGLM6BConfig"
|
39 |
+
|
40 |
+
CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
41 |
+
"THUDM/chatglm2-6b",
|
42 |
+
# See all ChatGLM models at https://huggingface.co/models?filter=chatglm
|
43 |
+
]
|
44 |
+
|
45 |
+
|
46 |
+
def default_init(cls, *args, **kwargs):
|
47 |
+
return cls(*args, **kwargs)
|
48 |
+
|
49 |
+
|
50 |
+
class InvalidScoreLogitsProcessor(LogitsProcessor):
|
51 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
52 |
+
if torch.isnan(scores).any() or torch.isinf(scores).any():
|
53 |
+
scores.zero_()
|
54 |
+
scores[..., 5] = 5e4
|
55 |
+
return scores
|
56 |
+
|
57 |
+
|
58 |
+
class PrefixEncoder(torch.nn.Module):
|
59 |
+
"""
|
60 |
+
The torch.nn model to encode the prefix
|
61 |
+
Input shape: (batch-size, prefix-length)
|
62 |
+
Output shape: (batch-size, prefix-length, 2*layers*hidden)
|
63 |
+
"""
|
64 |
+
|
65 |
+
def __init__(self, config: ChatGLMConfig):
|
66 |
+
super().__init__()
|
67 |
+
self.prefix_projection = config.prefix_projection
|
68 |
+
if self.prefix_projection:
|
69 |
+
# Use a two-layer MLP to encode the prefix
|
70 |
+
kv_size = config.num_layers * config.kv_channels * config.multi_query_group_num * 2
|
71 |
+
self.embedding = torch.nn.Embedding(config.pre_seq_len, kv_size)
|
72 |
+
self.trans = torch.nn.Sequential(
|
73 |
+
torch.nn.Linear(kv_size, config.hidden_size),
|
74 |
+
torch.nn.Tanh(),
|
75 |
+
torch.nn.Linear(config.hidden_size, kv_size)
|
76 |
+
)
|
77 |
+
else:
|
78 |
+
self.embedding = torch.nn.Embedding(config.pre_seq_len,
|
79 |
+
config.num_layers * config.kv_channels * config.multi_query_group_num * 2)
|
80 |
+
|
81 |
+
def forward(self, prefix: torch.Tensor):
|
82 |
+
if self.prefix_projection:
|
83 |
+
prefix_tokens = self.embedding(prefix)
|
84 |
+
past_key_values = self.trans(prefix_tokens)
|
85 |
+
else:
|
86 |
+
past_key_values = self.embedding(prefix)
|
87 |
+
return past_key_values
|
88 |
+
|
89 |
+
|
90 |
+
def split_tensor_along_last_dim(
|
91 |
+
tensor: torch.Tensor,
|
92 |
+
num_partitions: int,
|
93 |
+
contiguous_split_chunks: bool = False,
|
94 |
+
) -> List[torch.Tensor]:
|
95 |
+
"""Split a tensor along its last dimension.
|
96 |
+
|
97 |
+
Arguments:
|
98 |
+
tensor: input tensor.
|
99 |
+
num_partitions: number of partitions to split the tensor
|
100 |
+
contiguous_split_chunks: If True, make each chunk contiguous
|
101 |
+
in memory.
|
102 |
+
|
103 |
+
Returns:
|
104 |
+
A list of Tensors
|
105 |
+
"""
|
106 |
+
# Get the size and dimension.
|
107 |
+
last_dim = tensor.dim() - 1
|
108 |
+
last_dim_size = tensor.size()[last_dim] // num_partitions
|
109 |
+
# Split.
|
110 |
+
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
|
111 |
+
# Note: torch.split does not create contiguous tensors by default.
|
112 |
+
if contiguous_split_chunks:
|
113 |
+
return tuple(chunk.contiguous() for chunk in tensor_list)
|
114 |
+
|
115 |
+
return tensor_list
|
116 |
+
|
117 |
+
|
118 |
+
class RotaryEmbedding(nn.Module):
|
119 |
+
def __init__(self, dim, original_impl=False, device=None, dtype=None):
|
120 |
+
super().__init__()
|
121 |
+
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim))
|
122 |
+
self.register_buffer("inv_freq", inv_freq)
|
123 |
+
self.dim = dim
|
124 |
+
self.original_impl = original_impl
|
125 |
+
|
126 |
+
def forward_impl(
|
127 |
+
self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000
|
128 |
+
):
|
129 |
+
"""Enhanced Transformer with Rotary Position Embedding.
|
130 |
+
|
131 |
+
Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
|
132 |
+
transformers/rope/__init__.py. MIT License:
|
133 |
+
https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
|
134 |
+
"""
|
135 |
+
# $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
|
136 |
+
theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem))
|
137 |
+
|
138 |
+
# Create position indexes `[0, 1, ..., seq_len - 1]`
|
139 |
+
seq_idx = torch.arange(seq_len, dtype=dtype, device=device)
|
140 |
+
|
141 |
+
# Calculate the product of position index and $\theta_i$
|
142 |
+
idx_theta = torch.outer(seq_idx, theta).float()
|
143 |
+
|
144 |
+
cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
|
145 |
+
|
146 |
+
# this is to mimic the behaviour of complex32, else we will get different results
|
147 |
+
if dtype in (torch.float16, torch.bfloat16, torch.int8):
|
148 |
+
cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()
|
149 |
+
return cache
|
150 |
+
|
151 |
+
def forward(self, max_seq_len, offset=0):
|
152 |
+
return self.forward_impl(
|
153 |
+
max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device
|
154 |
+
)
|
155 |
+
|
156 |
+
|
157 |
+
@torch.jit.script
|
158 |
+
def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
|
159 |
+
# x: [sq, b, np, hn]
|
160 |
+
sq, b, np, hn = x.size(0), x.size(1), x.size(2), x.size(3)
|
161 |
+
rot_dim = rope_cache.shape[-2] * 2
|
162 |
+
x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
|
163 |
+
# truncate to support variable sizes
|
164 |
+
rope_cache = rope_cache[:sq]
|
165 |
+
xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2)
|
166 |
+
rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2)
|
167 |
+
x_out2 = torch.stack(
|
168 |
+
[
|
169 |
+
xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
|
170 |
+
xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
|
171 |
+
],
|
172 |
+
-1,
|
173 |
+
)
|
174 |
+
x_out2 = x_out2.flatten(3)
|
175 |
+
return torch.cat((x_out2, x_pass), dim=-1)
|
176 |
+
|
177 |
+
|
178 |
+
class RMSNorm(torch.nn.Module):
|
179 |
+
def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
|
180 |
+
super().__init__()
|
181 |
+
self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))
|
182 |
+
self.eps = eps
|
183 |
+
|
184 |
+
def forward(self, hidden_states: torch.Tensor):
|
185 |
+
if hidden_states.dtype == torch.bfloat16:
|
186 |
+
norm_x = torch.mean(hidden_states * hidden_states, dim=-1, keepdim=True)
|
187 |
+
x_normed = hidden_states * torch.rsqrt(norm_x + self.eps)
|
188 |
+
return self.weight * x_normed
|
189 |
+
else:
|
190 |
+
input_dtype = hidden_states.dtype
|
191 |
+
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
|
192 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
|
193 |
+
|
194 |
+
return (self.weight * hidden_states).to(input_dtype)
|
195 |
+
|
196 |
+
|
197 |
+
class CoreAttention(torch.nn.Module):
|
198 |
+
def __init__(self, config: ChatGLMConfig, layer_number):
|
199 |
+
super(CoreAttention, self).__init__()
|
200 |
+
|
201 |
+
self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
|
202 |
+
self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
|
203 |
+
if self.apply_query_key_layer_scaling:
|
204 |
+
self.attention_softmax_in_fp32 = True
|
205 |
+
self.layer_number = max(1, layer_number)
|
206 |
+
|
207 |
+
projection_size = config.kv_channels * config.num_attention_heads
|
208 |
+
|
209 |
+
# Per attention head and per partition values.
|
210 |
+
self.hidden_size_per_partition = projection_size
|
211 |
+
self.hidden_size_per_attention_head = projection_size // config.num_attention_heads
|
212 |
+
self.num_attention_heads_per_partition = config.num_attention_heads
|
213 |
+
|
214 |
+
coeff = None
|
215 |
+
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
|
216 |
+
if self.apply_query_key_layer_scaling:
|
217 |
+
coeff = self.layer_number
|
218 |
+
self.norm_factor *= coeff
|
219 |
+
self.coeff = coeff
|
220 |
+
|
221 |
+
self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
|
222 |
+
|
223 |
+
def forward(self, query_layer, key_layer, value_layer, attention_mask):
|
224 |
+
pytorch_major_version = int(torch.__version__.split('.')[0])
|
225 |
+
if pytorch_major_version >= 2:
|
226 |
+
query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]]
|
227 |
+
if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:
|
228 |
+
context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
|
229 |
+
is_causal=True)
|
230 |
+
else:
|
231 |
+
if attention_mask is not None:
|
232 |
+
attention_mask = ~attention_mask
|
233 |
+
context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
|
234 |
+
attention_mask)
|
235 |
+
context_layer = context_layer.permute(2, 0, 1, 3)
|
236 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
|
237 |
+
context_layer = context_layer.reshape(*new_context_layer_shape)
|
238 |
+
else:
|
239 |
+
# Raw attention scores
|
240 |
+
|
241 |
+
# [b, np, sq, sk]
|
242 |
+
output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
|
243 |
+
|
244 |
+
# [sq, b, np, hn] -> [sq, b * np, hn]
|
245 |
+
query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
|
246 |
+
# [sk, b, np, hn] -> [sk, b * np, hn]
|
247 |
+
key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
|
248 |
+
|
249 |
+
# preallocting input tensor: [b * np, sq, sk]
|
250 |
+
matmul_input_buffer = torch.empty(
|
251 |
+
output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,
|
252 |
+
device=query_layer.device
|
253 |
+
)
|
254 |
+
|
255 |
+
# Raw attention scores. [b * np, sq, sk]
|
256 |
+
matmul_result = torch.baddbmm(
|
257 |
+
matmul_input_buffer,
|
258 |
+
query_layer.transpose(0, 1), # [b * np, sq, hn]
|
259 |
+
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
|
260 |
+
beta=0.0,
|
261 |
+
alpha=(1.0 / self.norm_factor),
|
262 |
+
)
|
263 |
+
|
264 |
+
# change view to [b, np, sq, sk]
|
265 |
+
attention_scores = matmul_result.view(*output_size)
|
266 |
+
|
267 |
+
# ===========================
|
268 |
+
# Attention probs and dropout
|
269 |
+
# ===========================
|
270 |
+
|
271 |
+
# attention scores and attention mask [b, np, sq, sk]
|
272 |
+
if self.attention_softmax_in_fp32:
|
273 |
+
attention_scores = attention_scores.float()
|
274 |
+
if self.coeff is not None:
|
275 |
+
attention_scores = attention_scores * self.coeff
|
276 |
+
if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:
|
277 |
+
attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],
|
278 |
+
device=attention_scores.device, dtype=torch.bool)
|
279 |
+
attention_mask.tril_()
|
280 |
+
attention_mask = ~attention_mask
|
281 |
+
if attention_mask is not None:
|
282 |
+
attention_scores = attention_scores.masked_fill(attention_mask, float("-inf"))
|
283 |
+
attention_probs = F.softmax(attention_scores, dim=-1)
|
284 |
+
attention_probs = attention_probs.type_as(value_layer)
|
285 |
+
|
286 |
+
# This is actually dropping out entire tokens to attend to, which might
|
287 |
+
# seem a bit unusual, but is taken from the original Transformer paper.
|
288 |
+
attention_probs = self.attention_dropout(attention_probs)
|
289 |
+
# =========================
|
290 |
+
# Context layer. [sq, b, hp]
|
291 |
+
# =========================
|
292 |
+
|
293 |
+
# value_layer -> context layer.
|
294 |
+
# [sk, b, np, hn] --> [b, np, sq, hn]
|
295 |
+
|
296 |
+
# context layer shape: [b, np, sq, hn]
|
297 |
+
output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
|
298 |
+
# change view [sk, b * np, hn]
|
299 |
+
value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
|
300 |
+
# change view [b * np, sq, sk]
|
301 |
+
attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
|
302 |
+
# matmul: [b * np, sq, hn]
|
303 |
+
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
|
304 |
+
# change view [b, np, sq, hn]
|
305 |
+
context_layer = context_layer.view(*output_size)
|
306 |
+
# [b, np, sq, hn] --> [sq, b, np, hn]
|
307 |
+
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
|
308 |
+
# [sq, b, np, hn] --> [sq, b, hp]
|
309 |
+
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
|
310 |
+
context_layer = context_layer.view(*new_context_layer_shape)
|
311 |
+
|
312 |
+
return context_layer
|
313 |
+
|
314 |
+
|
315 |
+
class SelfAttention(torch.nn.Module):
|
316 |
+
"""Parallel self-attention layer abstract class.
|
317 |
+
|
318 |
+
Self-attention layer takes input with size [s, b, h]
|
319 |
+
and returns output of the same size.
|
320 |
+
"""
|
321 |
+
|
322 |
+
def __init__(self, config: ChatGLMConfig, layer_number, device=None):
|
323 |
+
super(SelfAttention, self).__init__()
|
324 |
+
self.layer_number = max(1, layer_number)
|
325 |
+
|
326 |
+
self.projection_size = config.kv_channels * config.num_attention_heads
|
327 |
+
|
328 |
+
# Per attention head and per partition values.
|
329 |
+
self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads
|
330 |
+
self.num_attention_heads_per_partition = config.num_attention_heads
|
331 |
+
|
332 |
+
self.multi_query_attention = config.multi_query_attention
|
333 |
+
self.qkv_hidden_size = 3 * self.projection_size
|
334 |
+
if self.multi_query_attention:
|
335 |
+
self.num_multi_query_groups_per_partition = config.multi_query_group_num
|
336 |
+
self.qkv_hidden_size = (
|
337 |
+
self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num
|
338 |
+
)
|
339 |
+
self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,
|
340 |
+
bias=config.add_bias_linear or config.add_qkv_bias,
|
341 |
+
device=device, **_config_to_kwargs(config)
|
342 |
+
)
|
343 |
+
|
344 |
+
self.core_attention = CoreAttention(config, self.layer_number)
|
345 |
+
|
346 |
+
# Output.
|
347 |
+
self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear,
|
348 |
+
device=device, **_config_to_kwargs(config)
|
349 |
+
)
|
350 |
+
|
351 |
+
def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):
|
352 |
+
if self.multi_query_attention:
|
353 |
+
num_attention_heads = self.num_multi_query_groups_per_partition
|
354 |
+
else:
|
355 |
+
num_attention_heads = self.num_attention_heads_per_partition
|
356 |
+
return torch.empty(
|
357 |
+
inference_max_sequence_len,
|
358 |
+
batch_size,
|
359 |
+
num_attention_heads,
|
360 |
+
self.hidden_size_per_attention_head,
|
361 |
+
dtype=dtype,
|
362 |
+
device=device,
|
363 |
+
)
|
364 |
+
|
365 |
+
def forward(
|
366 |
+
self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
|
367 |
+
):
|
368 |
+
# hidden_states: [sq, b, h]
|
369 |
+
|
370 |
+
# =================================================
|
371 |
+
# Pre-allocate memory for key-values for inference.
|
372 |
+
# =================================================
|
373 |
+
# =====================
|
374 |
+
# Query, Key, and Value
|
375 |
+
# =====================
|
376 |
+
|
377 |
+
# Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
|
378 |
+
mixed_x_layer = self.query_key_value(hidden_states)
|
379 |
+
|
380 |
+
if self.multi_query_attention:
|
381 |
+
(query_layer, key_layer, value_layer) = mixed_x_layer.split(
|
382 |
+
[
|
383 |
+
self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,
|
384 |
+
self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
|
385 |
+
self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
|
386 |
+
],
|
387 |
+
dim=-1,
|
388 |
+
)
|
389 |
+
query_layer = query_layer.view(
|
390 |
+
query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
|
391 |
+
)
|
392 |
+
key_layer = key_layer.view(
|
393 |
+
key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
|
394 |
+
)
|
395 |
+
value_layer = value_layer.view(
|
396 |
+
value_layer.size()[:-1]
|
397 |
+
+ (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
|
398 |
+
)
|
399 |
+
else:
|
400 |
+
new_tensor_shape = mixed_x_layer.size()[:-1] + \
|
401 |
+
(self.num_attention_heads_per_partition,
|
402 |
+
3 * self.hidden_size_per_attention_head)
|
403 |
+
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
|
404 |
+
|
405 |
+
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
|
406 |
+
(query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
|
407 |
+
|
408 |
+
# apply relative positional encoding (rotary embedding)
|
409 |
+
if rotary_pos_emb is not None:
|
410 |
+
query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)
|
411 |
+
key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)
|
412 |
+
|
413 |
+
# adjust key and value for inference
|
414 |
+
if kv_cache is not None:
|
415 |
+
cache_k, cache_v = kv_cache
|
416 |
+
key_layer = torch.cat((cache_k, key_layer), dim=0)
|
417 |
+
value_layer = torch.cat((cache_v, value_layer), dim=0)
|
418 |
+
if use_cache:
|
419 |
+
kv_cache = (key_layer, value_layer)
|
420 |
+
else:
|
421 |
+
kv_cache = None
|
422 |
+
|
423 |
+
if self.multi_query_attention:
|
424 |
+
key_layer = key_layer.unsqueeze(-2)
|
425 |
+
key_layer = key_layer.expand(
|
426 |
+
-1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1
|
427 |
+
)
|
428 |
+
key_layer = key_layer.contiguous().view(
|
429 |
+
key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
|
430 |
+
)
|
431 |
+
value_layer = value_layer.unsqueeze(-2)
|
432 |
+
value_layer = value_layer.expand(
|
433 |
+
-1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1
|
434 |
+
)
|
435 |
+
value_layer = value_layer.contiguous().view(
|
436 |
+
value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
|
437 |
+
)
|
438 |
+
|
439 |
+
# ==================================
|
440 |
+
# core attention computation
|
441 |
+
# ==================================
|
442 |
+
|
443 |
+
context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask)
|
444 |
+
|
445 |
+
# =================
|
446 |
+
# Output. [sq, b, h]
|
447 |
+
# =================
|
448 |
+
|
449 |
+
output = self.dense(context_layer)
|
450 |
+
|
451 |
+
return output, kv_cache
|
452 |
+
|
453 |
+
|
454 |
+
def _config_to_kwargs(args):
|
455 |
+
common_kwargs = {
|
456 |
+
"dtype": args.torch_dtype,
|
457 |
+
}
|
458 |
+
return common_kwargs
|
459 |
+
|
460 |
+
|
461 |
+
class MLP(torch.nn.Module):
|
462 |
+
"""MLP.
|
463 |
+
|
464 |
+
MLP will take the input with h hidden state, project it to 4*h
|
465 |
+
hidden dimension, perform nonlinear transformation, and project the
|
466 |
+
state back into h hidden dimension.
|
467 |
+
"""
|
468 |
+
|
469 |
+
def __init__(self, config: ChatGLMConfig, device=None):
|
470 |
+
super(MLP, self).__init__()
|
471 |
+
|
472 |
+
self.add_bias = config.add_bias_linear
|
473 |
+
|
474 |
+
# Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
|
475 |
+
self.dense_h_to_4h = nn.Linear(
|
476 |
+
config.hidden_size,
|
477 |
+
config.ffn_hidden_size * 2,
|
478 |
+
bias=self.add_bias,
|
479 |
+
device=device,
|
480 |
+
**_config_to_kwargs(config)
|
481 |
+
)
|
482 |
+
|
483 |
+
def swiglu(x):
|
484 |
+
x = torch.chunk(x, 2, dim=-1)
|
485 |
+
return F.silu(x[0]) * x[1]
|
486 |
+
|
487 |
+
self.activation_func = swiglu
|
488 |
+
|
489 |
+
# Project back to h.
|
490 |
+
self.dense_4h_to_h = nn.Linear(
|
491 |
+
config.ffn_hidden_size,
|
492 |
+
config.hidden_size,
|
493 |
+
bias=self.add_bias,
|
494 |
+
device=device,
|
495 |
+
**_config_to_kwargs(config)
|
496 |
+
)
|
497 |
+
|
498 |
+
def forward(self, hidden_states):
|
499 |
+
# [s, b, 4hp]
|
500 |
+
intermediate_parallel = self.dense_h_to_4h(hidden_states)
|
501 |
+
intermediate_parallel = self.activation_func(intermediate_parallel)
|
502 |
+
# [s, b, h]
|
503 |
+
output = self.dense_4h_to_h(intermediate_parallel)
|
504 |
+
return output
|
505 |
+
|
506 |
+
|
507 |
+
class GLMBlock(torch.nn.Module):
|
508 |
+
"""A single transformer layer.
|
509 |
+
|
510 |
+
Transformer layer takes input with size [s, b, h] and returns an
|
511 |
+
output of the same size.
|
512 |
+
"""
|
513 |
+
|
514 |
+
def __init__(self, config: ChatGLMConfig, layer_number, device=None):
|
515 |
+
super(GLMBlock, self).__init__()
|
516 |
+
self.layer_number = layer_number
|
517 |
+
|
518 |
+
self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
|
519 |
+
|
520 |
+
self.fp32_residual_connection = config.fp32_residual_connection
|
521 |
+
|
522 |
+
LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
|
523 |
+
# Layernorm on the input data.
|
524 |
+
self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
|
525 |
+
dtype=config.torch_dtype)
|
526 |
+
|
527 |
+
# Self attention.
|
528 |
+
self.self_attention = SelfAttention(config, layer_number, device=device)
|
529 |
+
self.hidden_dropout = config.hidden_dropout
|
530 |
+
|
531 |
+
# Layernorm on the attention output
|
532 |
+
self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
|
533 |
+
dtype=config.torch_dtype)
|
534 |
+
|
535 |
+
# MLP
|
536 |
+
self.mlp = MLP(config, device=device)
|
537 |
+
|
538 |
+
def forward(
|
539 |
+
self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,
|
540 |
+
):
|
541 |
+
# hidden_states: [s, b, h]
|
542 |
+
|
543 |
+
# Layer norm at the beginning of the transformer layer.
|
544 |
+
layernorm_output = self.input_layernorm(hidden_states)
|
545 |
+
# Self attention.
|
546 |
+
attention_output, kv_cache = self.self_attention(
|
547 |
+
layernorm_output,
|
548 |
+
attention_mask,
|
549 |
+
rotary_pos_emb,
|
550 |
+
kv_cache=kv_cache,
|
551 |
+
use_cache=use_cache
|
552 |
+
)
|
553 |
+
|
554 |
+
# Residual connection.
|
555 |
+
if self.apply_residual_connection_post_layernorm:
|
556 |
+
residual = layernorm_output
|
557 |
+
else:
|
558 |
+
residual = hidden_states
|
559 |
+
|
560 |
+
layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)
|
561 |
+
layernorm_input = residual + layernorm_input
|
562 |
+
|
563 |
+
# Layer norm post the self attention.
|
564 |
+
layernorm_output = self.post_attention_layernorm(layernorm_input)
|
565 |
+
|
566 |
+
# MLP.
|
567 |
+
mlp_output = self.mlp(layernorm_output)
|
568 |
+
|
569 |
+
# Second residual connection.
|
570 |
+
if self.apply_residual_connection_post_layernorm:
|
571 |
+
residual = layernorm_output
|
572 |
+
else:
|
573 |
+
residual = layernorm_input
|
574 |
+
|
575 |
+
output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)
|
576 |
+
output = residual + output
|
577 |
+
|
578 |
+
return output, kv_cache
|
579 |
+
|
580 |
+
|
581 |
+
class GLMTransformer(torch.nn.Module):
|
582 |
+
"""Transformer class."""
|
583 |
+
|
584 |
+
def __init__(self, config: ChatGLMConfig, device=None):
|
585 |
+
super(GLMTransformer, self).__init__()
|
586 |
+
|
587 |
+
self.fp32_residual_connection = config.fp32_residual_connection
|
588 |
+
self.post_layer_norm = config.post_layer_norm
|
589 |
+
|
590 |
+
# Number of layers.
|
591 |
+
self.num_layers = config.num_layers
|
592 |
+
|
593 |
+
# Transformer layers.
|
594 |
+
def build_layer(layer_number):
|
595 |
+
return GLMBlock(config, layer_number, device=device)
|
596 |
+
|
597 |
+
self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])
|
598 |
+
|
599 |
+
if self.post_layer_norm:
|
600 |
+
LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
|
601 |
+
# Final layer norm before output.
|
602 |
+
self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
|
603 |
+
dtype=config.torch_dtype)
|
604 |
+
|
605 |
+
self.gradient_checkpointing = False
|
606 |
+
|
607 |
+
def _get_layer(self, layer_number):
|
608 |
+
return self.layers[layer_number]
|
609 |
+
|
610 |
+
def forward(
|
611 |
+
self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,
|
612 |
+
use_cache: Optional[bool] = True,
|
613 |
+
output_hidden_states: Optional[bool] = False,
|
614 |
+
):
|
615 |
+
if not kv_caches:
|
616 |
+
kv_caches = [None for _ in range(self.num_layers)]
|
617 |
+
presents = () if use_cache else None
|
618 |
+
if self.gradient_checkpointing and self.training:
|
619 |
+
if use_cache:
|
620 |
+
logger.warning_once(
|
621 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
622 |
+
)
|
623 |
+
use_cache = False
|
624 |
+
|
625 |
+
all_self_attentions = None
|
626 |
+
all_hidden_states = () if output_hidden_states else None
|
627 |
+
for index in range(self.num_layers):
|
628 |
+
if output_hidden_states:
|
629 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
630 |
+
|
631 |
+
layer = self._get_layer(index)
|
632 |
+
if self.gradient_checkpointing and self.training:
|
633 |
+
layer_ret = torch.utils.checkpoint.checkpoint(
|
634 |
+
layer,
|
635 |
+
hidden_states,
|
636 |
+
attention_mask,
|
637 |
+
rotary_pos_emb,
|
638 |
+
kv_caches[index],
|
639 |
+
use_cache
|
640 |
+
)
|
641 |
+
else:
|
642 |
+
layer_ret = layer(
|
643 |
+
hidden_states,
|
644 |
+
attention_mask,
|
645 |
+
rotary_pos_emb,
|
646 |
+
kv_cache=kv_caches[index],
|
647 |
+
use_cache=use_cache
|
648 |
+
)
|
649 |
+
hidden_states, kv_cache = layer_ret
|
650 |
+
if use_cache:
|
651 |
+
presents = presents + (kv_cache,)
|
652 |
+
|
653 |
+
if output_hidden_states:
|
654 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
655 |
+
|
656 |
+
# Final layer norm.
|
657 |
+
if self.post_layer_norm:
|
658 |
+
hidden_states = self.final_layernorm(hidden_states)
|
659 |
+
|
660 |
+
return hidden_states, presents, all_hidden_states, all_self_attentions
|
661 |
+
|
662 |
+
|
663 |
+
class ChatGLMPreTrainedModel(PreTrainedModel):
|
664 |
+
"""
|
665 |
+
An abstract class to handle weights initialization and
|
666 |
+
a simple interface for downloading and loading pretrained models.
|
667 |
+
"""
|
668 |
+
|
669 |
+
is_parallelizable = False
|
670 |
+
supports_gradient_checkpointing = True
|
671 |
+
config_class = ChatGLMConfig
|
672 |
+
base_model_prefix = "transformer"
|
673 |
+
_no_split_modules = ["GLMBlock"]
|
674 |
+
|
675 |
+
def _init_weights(self, module: nn.Module):
|
676 |
+
"""Initialize the weights."""
|
677 |
+
return
|
678 |
+
|
679 |
+
def get_masks(self, input_ids, past_key_values, padding_mask=None):
|
680 |
+
batch_size, seq_length = input_ids.shape
|
681 |
+
full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)
|
682 |
+
full_attention_mask.tril_()
|
683 |
+
past_length = 0
|
684 |
+
if past_key_values:
|
685 |
+
past_length = past_key_values[0][0].shape[0]
|
686 |
+
if past_length:
|
687 |
+
full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,
|
688 |
+
device=input_ids.device), full_attention_mask), dim=-1)
|
689 |
+
if padding_mask is not None:
|
690 |
+
full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
|
691 |
+
if not past_length and padding_mask is not None:
|
692 |
+
full_attention_mask -= padding_mask.unsqueeze(-1) - 1
|
693 |
+
full_attention_mask = (full_attention_mask < 0.5).bool()
|
694 |
+
full_attention_mask.unsqueeze_(1)
|
695 |
+
return full_attention_mask
|
696 |
+
|
697 |
+
def get_position_ids(self, input_ids, device):
|
698 |
+
batch_size, seq_length = input_ids.shape
|
699 |
+
position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
|
700 |
+
return position_ids
|
701 |
+
|
702 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
703 |
+
if isinstance(module, GLMTransformer):
|
704 |
+
module.gradient_checkpointing = value
|
705 |
+
|
706 |
+
|
707 |
+
class Embedding(torch.nn.Module):
|
708 |
+
"""Language model embeddings."""
|
709 |
+
|
710 |
+
def __init__(self, config: ChatGLMConfig, device=None):
|
711 |
+
super(Embedding, self).__init__()
|
712 |
+
|
713 |
+
self.hidden_size = config.hidden_size
|
714 |
+
# Word embeddings (parallel).
|
715 |
+
self.word_embeddings = nn.Embedding(
|
716 |
+
config.padded_vocab_size,
|
717 |
+
self.hidden_size,
|
718 |
+
dtype=config.torch_dtype,
|
719 |
+
device=device
|
720 |
+
)
|
721 |
+
self.fp32_residual_connection = config.fp32_residual_connection
|
722 |
+
|
723 |
+
def forward(self, input_ids):
|
724 |
+
# Embeddings.
|
725 |
+
words_embeddings = self.word_embeddings(input_ids)
|
726 |
+
embeddings = words_embeddings
|
727 |
+
# Data format change to avoid explicit tranposes : [b s h] --> [s b h].
|
728 |
+
embeddings = embeddings.transpose(0, 1).contiguous()
|
729 |
+
# If the input flag for fp32 residual connection is set, convert for float.
|
730 |
+
if self.fp32_residual_connection:
|
731 |
+
embeddings = embeddings.float()
|
732 |
+
return embeddings
|
733 |
+
|
734 |
+
|
735 |
+
class ChatGLMModel(ChatGLMPreTrainedModel):
|
736 |
+
def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):
|
737 |
+
super().__init__(config)
|
738 |
+
if empty_init:
|
739 |
+
init_method = skip_init
|
740 |
+
else:
|
741 |
+
init_method = default_init
|
742 |
+
init_kwargs = {}
|
743 |
+
if device is not None:
|
744 |
+
init_kwargs["device"] = device
|
745 |
+
self.embedding = init_method(Embedding, config, **init_kwargs)
|
746 |
+
self.num_layers = config.num_layers
|
747 |
+
self.multi_query_group_num = config.multi_query_group_num
|
748 |
+
self.kv_channels = config.kv_channels
|
749 |
+
|
750 |
+
# Rotary positional embeddings
|
751 |
+
self.seq_length = config.seq_length
|
752 |
+
rotary_dim = (
|
753 |
+
config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels
|
754 |
+
)
|
755 |
+
|
756 |
+
self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, original_impl=config.original_rope, device=device,
|
757 |
+
dtype=config.torch_dtype)
|
758 |
+
self.encoder = init_method(GLMTransformer, config, **init_kwargs)
|
759 |
+
self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,
|
760 |
+
dtype=config.torch_dtype, **init_kwargs)
|
761 |
+
self.pre_seq_len = config.pre_seq_len
|
762 |
+
self.prefix_projection = config.prefix_projection
|
763 |
+
if self.pre_seq_len is not None:
|
764 |
+
for param in self.parameters():
|
765 |
+
param.requires_grad = False
|
766 |
+
self.prefix_tokens = torch.arange(self.pre_seq_len).long()
|
767 |
+
self.prefix_encoder = PrefixEncoder(config)
|
768 |
+
self.dropout = torch.nn.Dropout(0.1)
|
769 |
+
|
770 |
+
def get_input_embeddings(self):
|
771 |
+
return self.embedding.word_embeddings
|
772 |
+
|
773 |
+
def get_prompt(self, batch_size, device, dtype=torch.half):
|
774 |
+
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)
|
775 |
+
past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)
|
776 |
+
past_key_values = past_key_values.view(
|
777 |
+
batch_size,
|
778 |
+
self.pre_seq_len,
|
779 |
+
self.num_layers * 2,
|
780 |
+
self.multi_query_group_num,
|
781 |
+
self.kv_channels
|
782 |
+
)
|
783 |
+
# seq_len, b, nh, hidden_size
|
784 |
+
past_key_values = self.dropout(past_key_values)
|
785 |
+
past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)
|
786 |
+
return past_key_values
|
787 |
+
|
788 |
+
def forward(
|
789 |
+
self,
|
790 |
+
input_ids,
|
791 |
+
position_ids: Optional[torch.Tensor] = None,
|
792 |
+
attention_mask: Optional[torch.BoolTensor] = None,
|
793 |
+
full_attention_mask: Optional[torch.BoolTensor] = None,
|
794 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
|
795 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
796 |
+
use_cache: Optional[bool] = None,
|
797 |
+
output_hidden_states: Optional[bool] = None,
|
798 |
+
return_dict: Optional[bool] = None,
|
799 |
+
):
|
800 |
+
output_hidden_states = (
|
801 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
802 |
+
)
|
803 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
804 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
805 |
+
|
806 |
+
batch_size, seq_length = input_ids.shape
|
807 |
+
|
808 |
+
if inputs_embeds is None:
|
809 |
+
inputs_embeds = self.embedding(input_ids)
|
810 |
+
|
811 |
+
if self.pre_seq_len is not None:
|
812 |
+
if past_key_values is None:
|
813 |
+
past_key_values = self.get_prompt(batch_size=batch_size, device=input_ids.device,
|
814 |
+
dtype=inputs_embeds.dtype)
|
815 |
+
if attention_mask is not None:
|
816 |
+
attention_mask = torch.cat([attention_mask.new_ones((batch_size, self.pre_seq_len)),
|
817 |
+
attention_mask], dim=-1)
|
818 |
+
|
819 |
+
if full_attention_mask is None:
|
820 |
+
if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):
|
821 |
+
full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
|
822 |
+
|
823 |
+
# Rotary positional embeddings
|
824 |
+
rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
|
825 |
+
if position_ids is not None:
|
826 |
+
rotary_pos_emb = rotary_pos_emb[position_ids]
|
827 |
+
else:
|
828 |
+
rotary_pos_emb = rotary_pos_emb[None, :seq_length]
|
829 |
+
rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous()
|
830 |
+
|
831 |
+
# Run encoder.
|
832 |
+
hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
|
833 |
+
inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,
|
834 |
+
kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states
|
835 |
+
)
|
836 |
+
|
837 |
+
if not return_dict:
|
838 |
+
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
|
839 |
+
|
840 |
+
return BaseModelOutputWithPast(
|
841 |
+
last_hidden_state=hidden_states,
|
842 |
+
past_key_values=presents,
|
843 |
+
hidden_states=all_hidden_states,
|
844 |
+
attentions=all_self_attentions,
|
845 |
+
)
|
846 |
+
|
847 |
+
def quantize(self, weight_bit_width: int):
|
848 |
+
from .quantization import quantize
|
849 |
+
quantize(self.encoder, weight_bit_width)
|
850 |
+
return self
|
851 |
+
|
852 |
+
|
853 |
+
class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
|
854 |
+
def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
|
855 |
+
super().__init__(config)
|
856 |
+
|
857 |
+
self.max_sequence_length = config.max_length
|
858 |
+
self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
|
859 |
+
self.config = config
|
860 |
+
self.quantized = False
|
861 |
+
|
862 |
+
if self.config.quantization_bit:
|
863 |
+
self.quantize(self.config.quantization_bit, empty_init=True)
|
864 |
+
|
865 |
+
def _update_model_kwargs_for_generation(
|
866 |
+
self,
|
867 |
+
outputs: ModelOutput,
|
868 |
+
model_kwargs: Dict[str, Any],
|
869 |
+
is_encoder_decoder: bool = False,
|
870 |
+
standardize_cache_format: bool = False,
|
871 |
+
) -> Dict[str, Any]:
|
872 |
+
# update past_key_values
|
873 |
+
model_kwargs["past_key_values"] = self._extract_past_from_model_output(
|
874 |
+
outputs, standardize_cache_format=standardize_cache_format
|
875 |
+
)
|
876 |
+
|
877 |
+
# update attention mask
|
878 |
+
if "attention_mask" in model_kwargs:
|
879 |
+
attention_mask = model_kwargs["attention_mask"]
|
880 |
+
model_kwargs["attention_mask"] = torch.cat(
|
881 |
+
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
|
882 |
+
)
|
883 |
+
|
884 |
+
# update position ids
|
885 |
+
if "position_ids" in model_kwargs:
|
886 |
+
position_ids = model_kwargs["position_ids"]
|
887 |
+
new_position_id = position_ids[..., -1:].clone()
|
888 |
+
new_position_id += 1
|
889 |
+
model_kwargs["position_ids"] = torch.cat(
|
890 |
+
[position_ids, new_position_id], dim=-1
|
891 |
+
)
|
892 |
+
|
893 |
+
model_kwargs["is_first_forward"] = False
|
894 |
+
return model_kwargs
|
895 |
+
|
896 |
+
def prepare_inputs_for_generation(
|
897 |
+
self,
|
898 |
+
input_ids: torch.LongTensor,
|
899 |
+
past_key_values: Optional[torch.Tensor] = None,
|
900 |
+
attention_mask: Optional[torch.Tensor] = None,
|
901 |
+
position_ids: Optional[torch.Tensor] = None,
|
902 |
+
is_first_forward: bool = True,
|
903 |
+
**kwargs
|
904 |
+
) -> dict:
|
905 |
+
# only last token for input_ids if past is not None
|
906 |
+
if position_ids is None:
|
907 |
+
position_ids = self.get_position_ids(input_ids, device=input_ids.device)
|
908 |
+
if not is_first_forward:
|
909 |
+
position_ids = position_ids[..., -1:]
|
910 |
+
input_ids = input_ids[:, -1:]
|
911 |
+
return {
|
912 |
+
"input_ids": input_ids,
|
913 |
+
"past_key_values": past_key_values,
|
914 |
+
"position_ids": position_ids,
|
915 |
+
"attention_mask": attention_mask,
|
916 |
+
"return_last_logit": True
|
917 |
+
}
|
918 |
+
|
919 |
+
def forward(
|
920 |
+
self,
|
921 |
+
input_ids: Optional[torch.Tensor] = None,
|
922 |
+
position_ids: Optional[torch.Tensor] = None,
|
923 |
+
attention_mask: Optional[torch.Tensor] = None,
|
924 |
+
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
|
925 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
926 |
+
labels: Optional[torch.Tensor] = None,
|
927 |
+
use_cache: Optional[bool] = None,
|
928 |
+
output_attentions: Optional[bool] = None,
|
929 |
+
output_hidden_states: Optional[bool] = None,
|
930 |
+
return_dict: Optional[bool] = None,
|
931 |
+
return_last_logit: Optional[bool] = False,
|
932 |
+
):
|
933 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
934 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
935 |
+
|
936 |
+
transformer_outputs = self.transformer(
|
937 |
+
input_ids=input_ids,
|
938 |
+
position_ids=position_ids,
|
939 |
+
attention_mask=attention_mask,
|
940 |
+
past_key_values=past_key_values,
|
941 |
+
inputs_embeds=inputs_embeds,
|
942 |
+
use_cache=use_cache,
|
943 |
+
output_hidden_states=output_hidden_states,
|
944 |
+
return_dict=return_dict,
|
945 |
+
)
|
946 |
+
|
947 |
+
hidden_states = transformer_outputs[0]
|
948 |
+
if return_last_logit:
|
949 |
+
hidden_states = hidden_states[-1:]
|
950 |
+
lm_logits = self.transformer.output_layer(hidden_states)
|
951 |
+
lm_logits = lm_logits.transpose(0, 1).contiguous()
|
952 |
+
|
953 |
+
loss = None
|
954 |
+
if labels is not None:
|
955 |
+
lm_logits = lm_logits.to(torch.float32)
|
956 |
+
|
957 |
+
# Shift so that tokens < n predict n
|
958 |
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
959 |
+
shift_labels = labels[..., 1:].contiguous()
|
960 |
+
# Flatten the tokens
|
961 |
+
loss_fct = CrossEntropyLoss(ignore_index=-100)
|
962 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
963 |
+
|
964 |
+
lm_logits = lm_logits.to(hidden_states.dtype)
|
965 |
+
loss = loss.to(hidden_states.dtype)
|
966 |
+
|
967 |
+
if not return_dict:
|
968 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
969 |
+
return ((loss,) + output) if loss is not None else output
|
970 |
+
|
971 |
+
return CausalLMOutputWithPast(
|
972 |
+
loss=loss,
|
973 |
+
logits=lm_logits,
|
974 |
+
past_key_values=transformer_outputs.past_key_values,
|
975 |
+
hidden_states=transformer_outputs.hidden_states,
|
976 |
+
attentions=transformer_outputs.attentions,
|
977 |
+
)
|
978 |
+
|
979 |
+
@staticmethod
|
980 |
+
def _reorder_cache(
|
981 |
+
past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
|
982 |
+
) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
|
983 |
+
"""
|
984 |
+
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
|
985 |
+
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
|
986 |
+
beam_idx at every generation step.
|
987 |
+
|
988 |
+
Output shares the same memory storage as `past`.
|
989 |
+
"""
|
990 |
+
return tuple(
|
991 |
+
(
|
992 |
+
layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
|
993 |
+
layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
|
994 |
+
)
|
995 |
+
for layer_past in past
|
996 |
+
)
|
997 |
+
|
998 |
+
def process_response(self, response):
|
999 |
+
response = response.strip()
|
1000 |
+
response = response.replace("[[训练时间]]", "2023年")
|
1001 |
+
return response
|
1002 |
+
|
1003 |
+
def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None):
|
1004 |
+
prompt = tokenizer.build_prompt(query, history=history)
|
1005 |
+
inputs = tokenizer([prompt], return_tensors="pt")
|
1006 |
+
inputs = inputs.to(self.device)
|
1007 |
+
return inputs
|
1008 |
+
|
1009 |
+
def build_stream_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None):
|
1010 |
+
if history:
|
1011 |
+
prompt = "\n\n[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
|
1012 |
+
input_ids = tokenizer.encode(prompt, add_special_tokens=False)
|
1013 |
+
input_ids = input_ids[1:]
|
1014 |
+
inputs = tokenizer.batch_encode_plus([(input_ids, None)], return_tensors="pt", add_special_tokens=False)
|
1015 |
+
else:
|
1016 |
+
prompt = "[Round {}]\n\n问:{}\n\n答:".format(len(history) + 1, query)
|
1017 |
+
inputs = tokenizer([prompt], return_tensors="pt")
|
1018 |
+
inputs = inputs.to(self.device)
|
1019 |
+
return inputs
|
1020 |
+
|
1021 |
+
@torch.inference_mode()
|
1022 |
+
def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 8192, num_beams=1,
|
1023 |
+
do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None, **kwargs):
|
1024 |
+
if history is None:
|
1025 |
+
history = []
|
1026 |
+
if logits_processor is None:
|
1027 |
+
logits_processor = LogitsProcessorList()
|
1028 |
+
logits_processor.append(InvalidScoreLogitsProcessor())
|
1029 |
+
gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
|
1030 |
+
"temperature": temperature, "logits_processor": logits_processor, **kwargs}
|
1031 |
+
inputs = self.build_inputs(tokenizer, query, history=history)
|
1032 |
+
outputs = self.generate(**inputs, **gen_kwargs)
|
1033 |
+
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
|
1034 |
+
response = tokenizer.decode(outputs)
|
1035 |
+
response = self.process_response(response)
|
1036 |
+
history = history + [(query, response)]
|
1037 |
+
return response, history
|
1038 |
+
|
1039 |
+
@torch.inference_mode()
|
1040 |
+
def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, past_key_values=None,
|
1041 |
+
max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,
|
1042 |
+
return_past_key_values=False, **kwargs):
|
1043 |
+
if history is None:
|
1044 |
+
history = []
|
1045 |
+
if logits_processor is None:
|
1046 |
+
logits_processor = LogitsProcessorList()
|
1047 |
+
logits_processor.append(InvalidScoreLogitsProcessor())
|
1048 |
+
gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
|
1049 |
+
"temperature": temperature, "logits_processor": logits_processor, **kwargs}
|
1050 |
+
if past_key_values is None and not return_past_key_values:
|
1051 |
+
inputs = self.build_inputs(tokenizer, query, history=history)
|
1052 |
+
else:
|
1053 |
+
inputs = self.build_stream_inputs(tokenizer, query, history=history)
|
1054 |
+
if past_key_values is not None:
|
1055 |
+
past_length = past_key_values[0][0].shape[0]
|
1056 |
+
if self.transformer.pre_seq_len is not None:
|
1057 |
+
past_length -= self.transformer.pre_seq_len
|
1058 |
+
inputs.position_ids += past_length
|
1059 |
+
attention_mask = inputs.attention_mask
|
1060 |
+
attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1)
|
1061 |
+
inputs['attention_mask'] = attention_mask
|
1062 |
+
for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,
|
1063 |
+
return_past_key_values=return_past_key_values, **gen_kwargs):
|
1064 |
+
if return_past_key_values:
|
1065 |
+
outputs, past_key_values = outputs
|
1066 |
+
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
|
1067 |
+
response = tokenizer.decode(outputs)
|
1068 |
+
if response and response[-1] != "�":
|
1069 |
+
response = self.process_response(response)
|
1070 |
+
new_history = history + [(query, response)]
|
1071 |
+
if return_past_key_values:
|
1072 |
+
yield response, new_history, past_key_values
|
1073 |
+
else:
|
1074 |
+
yield response, new_history
|
1075 |
+
|
1076 |
+
@torch.inference_mode()
|
1077 |
+
def stream_generate(
|
1078 |
+
self,
|
1079 |
+
input_ids,
|
1080 |
+
generation_config: Optional[GenerationConfig] = None,
|
1081 |
+
logits_processor: Optional[LogitsProcessorList] = None,
|
1082 |
+
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
1083 |
+
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
|
1084 |
+
return_past_key_values=False,
|
1085 |
+
**kwargs,
|
1086 |
+
):
|
1087 |
+
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
|
1088 |
+
|
1089 |
+
if generation_config is None:
|
1090 |
+
generation_config = self.generation_config
|
1091 |
+
generation_config = copy.deepcopy(generation_config)
|
1092 |
+
model_kwargs = generation_config.update(**kwargs)
|
1093 |
+
bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
|
1094 |
+
|
1095 |
+
if isinstance(eos_token_id, int):
|
1096 |
+
eos_token_id = [eos_token_id]
|
1097 |
+
|
1098 |
+
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
|
1099 |
+
if has_default_max_length and generation_config.max_new_tokens is None:
|
1100 |
+
warnings.warn(
|
1101 |
+
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
|
1102 |
+
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
|
1103 |
+
" recommend using `max_new_tokens` to control the maximum length of the generation.",
|
1104 |
+
UserWarning,
|
1105 |
+
)
|
1106 |
+
elif generation_config.max_new_tokens is not None:
|
1107 |
+
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
|
1108 |
+
if not has_default_max_length:
|
1109 |
+
logger.warn(
|
1110 |
+
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
|
1111 |
+
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
|
1112 |
+
"Please refer to the documentation for more information. "
|
1113 |
+
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
|
1114 |
+
UserWarning,
|
1115 |
+
)
|
1116 |
+
|
1117 |
+
if input_ids_seq_length >= generation_config.max_length:
|
1118 |
+
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
|
1119 |
+
logger.warning(
|
1120 |
+
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
|
1121 |
+
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
|
1122 |
+
" increasing `max_new_tokens`."
|
1123 |
+
)
|
1124 |
+
|
1125 |
+
# 2. Set generation parameters if not already defined
|
1126 |
+
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
|
1127 |
+
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
|
1128 |
+
|
1129 |
+
logits_processor = self._get_logits_processor(
|
1130 |
+
generation_config=generation_config,
|
1131 |
+
input_ids_seq_length=input_ids_seq_length,
|
1132 |
+
encoder_input_ids=input_ids,
|
1133 |
+
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
1134 |
+
logits_processor=logits_processor,
|
1135 |
+
)
|
1136 |
+
|
1137 |
+
stopping_criteria = self._get_stopping_criteria(
|
1138 |
+
generation_config=generation_config, stopping_criteria=stopping_criteria
|
1139 |
+
)
|
1140 |
+
logits_warper = self._get_logits_warper(generation_config)
|
1141 |
+
|
1142 |
+
unfinished_sequences = torch.ones(input_ids.shape[0], device=input_ids.device, dtype=input_ids.dtype)
|
1143 |
+
scores = None
|
1144 |
+
while True:
|
1145 |
+
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
|
1146 |
+
# forward pass to get next token
|
1147 |
+
outputs = self(
|
1148 |
+
**model_inputs,
|
1149 |
+
return_dict=True,
|
1150 |
+
output_attentions=False,
|
1151 |
+
output_hidden_states=False,
|
1152 |
+
)
|
1153 |
+
|
1154 |
+
next_token_logits = outputs.logits[:, -1, :]
|
1155 |
+
|
1156 |
+
# pre-process distribution
|
1157 |
+
next_token_scores = logits_processor(input_ids, next_token_logits)
|
1158 |
+
next_token_scores = logits_warper(input_ids, next_token_scores)
|
1159 |
+
|
1160 |
+
# sample
|
1161 |
+
probs = nn.functional.softmax(next_token_scores, dim=-1)
|
1162 |
+
if generation_config.do_sample:
|
1163 |
+
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
|
1164 |
+
else:
|
1165 |
+
next_tokens = torch.argmax(probs, dim=-1)
|
1166 |
+
|
1167 |
+
# update generated ids, model inputs, and length for next step
|
1168 |
+
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
|
1169 |
+
model_kwargs = self._update_model_kwargs_for_generation(
|
1170 |
+
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
|
1171 |
+
)
|
1172 |
+
unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())
|
1173 |
+
if return_past_key_values:
|
1174 |
+
yield input_ids, outputs.past_key_values
|
1175 |
+
else:
|
1176 |
+
yield input_ids
|
1177 |
+
# stop when each sentence is finished, or if we exceed the maximum length
|
1178 |
+
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
|
1179 |
+
break
|
1180 |
+
|
1181 |
+
def quantize(self, bits: int, empty_init=False, device=None, **kwargs):
|
1182 |
+
if bits == 0:
|
1183 |
+
return
|
1184 |
+
|
1185 |
+
from .quantization import quantize
|
1186 |
+
|
1187 |
+
if self.quantized:
|
1188 |
+
logger.info("Already quantized.")
|
1189 |
+
return self
|
1190 |
+
|
1191 |
+
self.quantized = True
|
1192 |
+
|
1193 |
+
self.config.quantization_bit = bits
|
1194 |
+
|
1195 |
+
self.transformer.encoder = quantize(self.transformer.encoder, bits, empty_init=empty_init, device=device,
|
1196 |
+
**kwargs)
|
1197 |
+
return self
|
mypy.ini
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[mypy]
|
2 |
+
python_version=3.9
|
3 |
+
|
4 |
+
mypy_path=$MYPY_CONFIG_FILE_DIR/typings
|
5 |
+
|
6 |
+
; Not all dependencies have type annotations; ignore this.
|
7 |
+
ignore_missing_imports=True
|
8 |
+
namespace_packages=True
|
9 |
+
explicit_package_bases = True
|
10 |
+
|
11 |
+
; Be strict about certain rules.
|
12 |
+
strict_equality=True
|
13 |
+
warn_unused_configs=True
|
14 |
+
no_implicit_optional=True
|
15 |
+
strict_optional=True
|
16 |
+
warn_redundant_casts=True
|
17 |
+
warn_unused_ignores=True
|
18 |
+
check_untyped_defs=True
|
19 |
+
|
20 |
+
; By default, code is not checked for type errors.
|
21 |
+
ignore_errors=True
|
22 |
+
disallow_untyped_defs=False
|
23 |
+
|
24 |
+
; However, some directories that are fully type-annotated and don't have type errors have opted in
|
25 |
+
; to type checking.
|
26 |
+
|
27 |
+
[mypy-evals.registry]
|
28 |
+
ignore_errors=False
|
29 |
+
disallow_untyped_defs=True
|
30 |
+
|
31 |
+
[mypy-evals.cli.oaievalset]
|
32 |
+
ignore_errors=False
|
33 |
+
disallow_untyped_defs=True
|
34 |
+
|
35 |
+
[mypy-evals.cli.oaieval]
|
36 |
+
ignore_errors=False
|
37 |
+
disallow_untyped_defs=True
|
38 |
+
|
39 |
+
[mypy-scripts.*]
|
40 |
+
ignore_errors=False
|
41 |
+
disallow_untyped_defs=True
|
42 |
+
|
43 |
+
[mypy-openai.*]
|
44 |
+
ignore_errors=False
|
45 |
+
disallow_untyped_defs=True
|
46 |
+
|
47 |
+
; TODO: Add the other modules here
|
pyproject.toml
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "evals"
|
3 |
+
version = "1.0.3.post1"
|
4 |
+
requires-python = ">=3.9"
|
5 |
+
dependencies = [
|
6 |
+
"mypy",
|
7 |
+
"openai >= 0.27.2",
|
8 |
+
"tiktoken",
|
9 |
+
"blobfile",
|
10 |
+
"backoff",
|
11 |
+
"numpy",
|
12 |
+
"snowflake-connector-python[pandas]",
|
13 |
+
"pandas",
|
14 |
+
"datasets",
|
15 |
+
"fire",
|
16 |
+
"pydantic",
|
17 |
+
"tqdm",
|
18 |
+
"nltk",
|
19 |
+
"filelock",
|
20 |
+
"mock",
|
21 |
+
"langdetect",
|
22 |
+
'termcolor',
|
23 |
+
"lz4",
|
24 |
+
"pyzstd",
|
25 |
+
"pyyaml",
|
26 |
+
"sacrebleu",
|
27 |
+
"matplotlib",
|
28 |
+
"pytest",
|
29 |
+
"setuptools_scm",
|
30 |
+
"langchain",
|
31 |
+
"types-PyYAML",
|
32 |
+
]
|
33 |
+
|
34 |
+
[project.optional-dependencies]
|
35 |
+
formatters = [
|
36 |
+
"black",
|
37 |
+
"isort",
|
38 |
+
"autoflake"
|
39 |
+
]
|
40 |
+
|
41 |
+
[project.scripts]
|
42 |
+
oaieval = "evals.cli.oaieval:main"
|
43 |
+
oaievalset = "evals.cli.oaievalset:main"
|
44 |
+
|
45 |
+
[tool.setuptools]
|
46 |
+
packages = ["evals"]
|
pytorch_model.bin.index.json
CHANGED
@@ -1,201 +1,207 @@
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
-
"total_size":
|
4 |
},
|
5 |
"weight_map": {
|
6 |
-
"transformer.
|
7 |
-
"transformer.
|
8 |
-
"transformer.
|
9 |
-
"transformer.
|
10 |
-
"transformer.
|
11 |
-
"transformer.
|
12 |
-
"transformer.
|
13 |
-
"transformer.
|
14 |
-
"transformer.
|
15 |
-
"transformer.
|
16 |
-
"transformer.
|
17 |
-
"transformer.
|
18 |
-
"transformer.
|
19 |
-
"transformer.
|
20 |
-
"transformer.
|
21 |
-
"transformer.
|
22 |
-
"transformer.
|
23 |
-
"transformer.
|
24 |
-
"transformer.
|
25 |
-
"transformer.
|
26 |
-
"transformer.
|
27 |
-
"transformer.
|
28 |
-
"transformer.
|
29 |
-
"transformer.
|
30 |
-
"transformer.
|
31 |
-
"transformer.
|
32 |
-
"transformer.
|
33 |
-
"transformer.
|
34 |
-
"transformer.
|
35 |
-
"transformer.
|
36 |
-
"transformer.
|
37 |
-
"transformer.
|
38 |
-
"transformer.
|
39 |
-
"transformer.
|
40 |
-
"transformer.
|
41 |
-
"transformer.
|
42 |
-
"transformer.
|
43 |
-
"transformer.
|
44 |
-
"transformer.
|
45 |
-
"transformer.
|
46 |
-
"transformer.
|
47 |
-
"transformer.
|
48 |
-
"transformer.
|
49 |
-
"transformer.
|
50 |
-
"transformer.
|
51 |
-
"transformer.
|
52 |
-
"transformer.
|
53 |
-
"transformer.
|
54 |
-
"transformer.
|
55 |
-
"transformer.
|
56 |
-
"transformer.
|
57 |
-
"transformer.
|
58 |
-
"transformer.
|
59 |
-
"transformer.
|
60 |
-
"transformer.
|
61 |
-
"transformer.
|
62 |
-
"transformer.
|
63 |
-
"transformer.
|
64 |
-
"transformer.
|
65 |
-
"transformer.
|
66 |
-
"transformer.
|
67 |
-
"transformer.
|
68 |
-
"transformer.
|
69 |
-
"transformer.
|
70 |
-
"transformer.
|
71 |
-
"transformer.
|
72 |
-
"transformer.
|
73 |
-
"transformer.
|
74 |
-
"transformer.
|
75 |
-
"transformer.
|
76 |
-
"transformer.
|
77 |
-
"transformer.
|
78 |
-
"transformer.
|
79 |
-
"transformer.
|
80 |
-
"transformer.
|
81 |
-
"transformer.
|
82 |
-
"transformer.
|
83 |
-
"transformer.
|
84 |
-
"transformer.
|
85 |
-
"transformer.
|
86 |
-
"transformer.
|
87 |
-
"transformer.
|
88 |
-
"transformer.
|
89 |
-
"transformer.
|
90 |
-
"transformer.
|
91 |
-
"transformer.
|
92 |
-
"transformer.
|
93 |
-
"transformer.
|
94 |
-
"transformer.
|
95 |
-
"transformer.
|
96 |
-
"transformer.
|
97 |
-
"transformer.
|
98 |
-
"transformer.
|
99 |
-
"transformer.
|
100 |
-
"transformer.
|
101 |
-
"transformer.
|
102 |
-
"transformer.
|
103 |
-
"transformer.
|
104 |
-
"transformer.
|
105 |
-
"transformer.
|
106 |
-
"transformer.
|
107 |
-
"transformer.
|
108 |
-
"transformer.
|
109 |
-
"transformer.
|
110 |
-
"transformer.
|
111 |
-
"transformer.
|
112 |
-
"transformer.
|
113 |
-
"transformer.
|
114 |
-
"transformer.
|
115 |
-
"transformer.
|
116 |
-
"transformer.
|
117 |
-
"transformer.
|
118 |
-
"transformer.
|
119 |
-
"transformer.
|
120 |
-
"transformer.
|
121 |
-
"transformer.
|
122 |
-
"transformer.
|
123 |
-
"transformer.
|
124 |
-
"transformer.
|
125 |
-
"transformer.
|
126 |
-
"transformer.
|
127 |
-
"transformer.
|
128 |
-
"transformer.
|
129 |
-
"transformer.
|
130 |
-
"transformer.
|
131 |
-
"transformer.
|
132 |
-
"transformer.
|
133 |
-
"transformer.
|
134 |
-
"transformer.
|
135 |
-
"transformer.
|
136 |
-
"transformer.
|
137 |
-
"transformer.
|
138 |
-
"transformer.
|
139 |
-
"transformer.
|
140 |
-
"transformer.
|
141 |
-
"transformer.
|
142 |
-
"transformer.
|
143 |
-
"transformer.
|
144 |
-
"transformer.
|
145 |
-
"transformer.
|
146 |
-
"transformer.
|
147 |
-
"transformer.
|
148 |
-
"transformer.
|
149 |
-
"transformer.
|
150 |
-
"transformer.
|
151 |
-
"transformer.
|
152 |
-
"transformer.
|
153 |
-
"transformer.
|
154 |
-
"transformer.
|
155 |
-
"transformer.
|
156 |
-
"transformer.
|
157 |
-
"transformer.
|
158 |
-
"transformer.
|
159 |
-
"transformer.
|
160 |
-
"transformer.
|
161 |
-
"transformer.
|
162 |
-
"transformer.
|
163 |
-
"transformer.
|
164 |
-
"transformer.
|
165 |
-
"transformer.
|
166 |
-
"transformer.
|
167 |
-
"transformer.
|
168 |
-
"transformer.
|
169 |
-
"transformer.
|
170 |
-
"transformer.
|
171 |
-
"transformer.
|
172 |
-
"transformer.
|
173 |
-
"transformer.
|
174 |
-
"transformer.
|
175 |
-
"transformer.
|
176 |
-
"transformer.
|
177 |
-
"transformer.
|
178 |
-
"transformer.
|
179 |
-
"transformer.
|
180 |
-
"transformer.
|
181 |
-
"transformer.
|
182 |
-
"transformer.
|
183 |
-
"transformer.
|
184 |
-
"transformer.
|
185 |
-
"transformer.
|
186 |
-
"transformer.
|
187 |
-
"transformer.
|
188 |
-
"transformer.
|
189 |
-
"transformer.
|
190 |
-
"transformer.
|
191 |
-
"transformer.
|
192 |
-
"transformer.
|
193 |
-
"transformer.
|
194 |
-
"transformer.
|
195 |
-
"transformer.
|
196 |
-
"transformer.
|
197 |
-
"transformer.
|
198 |
-
"transformer.
|
199 |
-
"transformer.
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
}
|
201 |
}
|
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
+
"total_size": 12487168064
|
4 |
},
|
5 |
"weight_map": {
|
6 |
+
"transformer.embedding.word_embeddings.weight": "pytorch_model-00001-of-00007.bin",
|
7 |
+
"transformer.encoder.final_layernorm.weight": "pytorch_model-00007-of-00007.bin",
|
8 |
+
"transformer.encoder.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
|
9 |
+
"transformer.encoder.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00007.bin",
|
10 |
+
"transformer.encoder.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00007.bin",
|
11 |
+
"transformer.encoder.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
|
12 |
+
"transformer.encoder.layers.0.self_attention.dense.weight": "pytorch_model-00001-of-00007.bin",
|
13 |
+
"transformer.encoder.layers.0.self_attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
|
14 |
+
"transformer.encoder.layers.0.self_attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
|
15 |
+
"transformer.encoder.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
|
16 |
+
"transformer.encoder.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00007.bin",
|
17 |
+
"transformer.encoder.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00007.bin",
|
18 |
+
"transformer.encoder.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
|
19 |
+
"transformer.encoder.layers.1.self_attention.dense.weight": "pytorch_model-00001-of-00007.bin",
|
20 |
+
"transformer.encoder.layers.1.self_attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
|
21 |
+
"transformer.encoder.layers.1.self_attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
|
22 |
+
"transformer.encoder.layers.10.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
|
23 |
+
"transformer.encoder.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
|
24 |
+
"transformer.encoder.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
|
25 |
+
"transformer.encoder.layers.10.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
|
26 |
+
"transformer.encoder.layers.10.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
|
27 |
+
"transformer.encoder.layers.10.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
|
28 |
+
"transformer.encoder.layers.10.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
|
29 |
+
"transformer.encoder.layers.11.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
|
30 |
+
"transformer.encoder.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
|
31 |
+
"transformer.encoder.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
|
32 |
+
"transformer.encoder.layers.11.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
|
33 |
+
"transformer.encoder.layers.11.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
|
34 |
+
"transformer.encoder.layers.11.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
|
35 |
+
"transformer.encoder.layers.11.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
|
36 |
+
"transformer.encoder.layers.12.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
|
37 |
+
"transformer.encoder.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
|
38 |
+
"transformer.encoder.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
|
39 |
+
"transformer.encoder.layers.12.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
|
40 |
+
"transformer.encoder.layers.12.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
|
41 |
+
"transformer.encoder.layers.12.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
|
42 |
+
"transformer.encoder.layers.12.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
|
43 |
+
"transformer.encoder.layers.13.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
44 |
+
"transformer.encoder.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
|
45 |
+
"transformer.encoder.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
|
46 |
+
"transformer.encoder.layers.13.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
47 |
+
"transformer.encoder.layers.13.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
|
48 |
+
"transformer.encoder.layers.13.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
|
49 |
+
"transformer.encoder.layers.13.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
|
50 |
+
"transformer.encoder.layers.14.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
51 |
+
"transformer.encoder.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
|
52 |
+
"transformer.encoder.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
|
53 |
+
"transformer.encoder.layers.14.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
54 |
+
"transformer.encoder.layers.14.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
|
55 |
+
"transformer.encoder.layers.14.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
|
56 |
+
"transformer.encoder.layers.14.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
|
57 |
+
"transformer.encoder.layers.15.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
58 |
+
"transformer.encoder.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
|
59 |
+
"transformer.encoder.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
|
60 |
+
"transformer.encoder.layers.15.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
61 |
+
"transformer.encoder.layers.15.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
|
62 |
+
"transformer.encoder.layers.15.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
|
63 |
+
"transformer.encoder.layers.15.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
|
64 |
+
"transformer.encoder.layers.16.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
65 |
+
"transformer.encoder.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00007.bin",
|
66 |
+
"transformer.encoder.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00007.bin",
|
67 |
+
"transformer.encoder.layers.16.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
68 |
+
"transformer.encoder.layers.16.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
|
69 |
+
"transformer.encoder.layers.16.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
|
70 |
+
"transformer.encoder.layers.16.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
|
71 |
+
"transformer.encoder.layers.17.input_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
72 |
+
"transformer.encoder.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
|
73 |
+
"transformer.encoder.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
|
74 |
+
"transformer.encoder.layers.17.post_attention_layernorm.weight": "pytorch_model-00004-of-00007.bin",
|
75 |
+
"transformer.encoder.layers.17.self_attention.dense.weight": "pytorch_model-00004-of-00007.bin",
|
76 |
+
"transformer.encoder.layers.17.self_attention.query_key_value.bias": "pytorch_model-00004-of-00007.bin",
|
77 |
+
"transformer.encoder.layers.17.self_attention.query_key_value.weight": "pytorch_model-00004-of-00007.bin",
|
78 |
+
"transformer.encoder.layers.18.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
|
79 |
+
"transformer.encoder.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
|
80 |
+
"transformer.encoder.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
|
81 |
+
"transformer.encoder.layers.18.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
|
82 |
+
"transformer.encoder.layers.18.self_attention.dense.weight": "pytorch_model-00005-of-00007.bin",
|
83 |
+
"transformer.encoder.layers.18.self_attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
|
84 |
+
"transformer.encoder.layers.18.self_attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
|
85 |
+
"transformer.encoder.layers.19.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
|
86 |
+
"transformer.encoder.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
|
87 |
+
"transformer.encoder.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
|
88 |
+
"transformer.encoder.layers.19.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
|
89 |
+
"transformer.encoder.layers.19.self_attention.dense.weight": "pytorch_model-00005-of-00007.bin",
|
90 |
+
"transformer.encoder.layers.19.self_attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
|
91 |
+
"transformer.encoder.layers.19.self_attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
|
92 |
+
"transformer.encoder.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
|
93 |
+
"transformer.encoder.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00007.bin",
|
94 |
+
"transformer.encoder.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00007.bin",
|
95 |
+
"transformer.encoder.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
|
96 |
+
"transformer.encoder.layers.2.self_attention.dense.weight": "pytorch_model-00001-of-00007.bin",
|
97 |
+
"transformer.encoder.layers.2.self_attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
|
98 |
+
"transformer.encoder.layers.2.self_attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
|
99 |
+
"transformer.encoder.layers.20.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
|
100 |
+
"transformer.encoder.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
|
101 |
+
"transformer.encoder.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
|
102 |
+
"transformer.encoder.layers.20.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
|
103 |
+
"transformer.encoder.layers.20.self_attention.dense.weight": "pytorch_model-00005-of-00007.bin",
|
104 |
+
"transformer.encoder.layers.20.self_attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
|
105 |
+
"transformer.encoder.layers.20.self_attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
|
106 |
+
"transformer.encoder.layers.21.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
|
107 |
+
"transformer.encoder.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00007.bin",
|
108 |
+
"transformer.encoder.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00007.bin",
|
109 |
+
"transformer.encoder.layers.21.post_attention_layernorm.weight": "pytorch_model-00005-of-00007.bin",
|
110 |
+
"transformer.encoder.layers.21.self_attention.dense.weight": "pytorch_model-00005-of-00007.bin",
|
111 |
+
"transformer.encoder.layers.21.self_attention.query_key_value.bias": "pytorch_model-00005-of-00007.bin",
|
112 |
+
"transformer.encoder.layers.21.self_attention.query_key_value.weight": "pytorch_model-00005-of-00007.bin",
|
113 |
+
"transformer.encoder.layers.22.input_layernorm.weight": "pytorch_model-00005-of-00007.bin",
|
114 |
+
"transformer.encoder.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
|
115 |
+
"transformer.encoder.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
|
116 |
+
"transformer.encoder.layers.22.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
|
117 |
+
"transformer.encoder.layers.22.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
|
118 |
+
"transformer.encoder.layers.22.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
|
119 |
+
"transformer.encoder.layers.22.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
|
120 |
+
"transformer.encoder.layers.23.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
|
121 |
+
"transformer.encoder.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
|
122 |
+
"transformer.encoder.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
|
123 |
+
"transformer.encoder.layers.23.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
|
124 |
+
"transformer.encoder.layers.23.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
|
125 |
+
"transformer.encoder.layers.23.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
|
126 |
+
"transformer.encoder.layers.23.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
|
127 |
+
"transformer.encoder.layers.24.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
|
128 |
+
"transformer.encoder.layers.24.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
|
129 |
+
"transformer.encoder.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
|
130 |
+
"transformer.encoder.layers.24.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
|
131 |
+
"transformer.encoder.layers.24.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
|
132 |
+
"transformer.encoder.layers.24.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
|
133 |
+
"transformer.encoder.layers.24.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
|
134 |
+
"transformer.encoder.layers.25.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
|
135 |
+
"transformer.encoder.layers.25.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00007.bin",
|
136 |
+
"transformer.encoder.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
|
137 |
+
"transformer.encoder.layers.25.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
|
138 |
+
"transformer.encoder.layers.25.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
|
139 |
+
"transformer.encoder.layers.25.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
|
140 |
+
"transformer.encoder.layers.25.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
|
141 |
+
"transformer.encoder.layers.26.input_layernorm.weight": "pytorch_model-00006-of-00007.bin",
|
142 |
+
"transformer.encoder.layers.26.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00007.bin",
|
143 |
+
"transformer.encoder.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00007.bin",
|
144 |
+
"transformer.encoder.layers.26.post_attention_layernorm.weight": "pytorch_model-00006-of-00007.bin",
|
145 |
+
"transformer.encoder.layers.26.self_attention.dense.weight": "pytorch_model-00006-of-00007.bin",
|
146 |
+
"transformer.encoder.layers.26.self_attention.query_key_value.bias": "pytorch_model-00006-of-00007.bin",
|
147 |
+
"transformer.encoder.layers.26.self_attention.query_key_value.weight": "pytorch_model-00006-of-00007.bin",
|
148 |
+
"transformer.encoder.layers.27.input_layernorm.weight": "pytorch_model-00007-of-00007.bin",
|
149 |
+
"transformer.encoder.layers.27.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00007.bin",
|
150 |
+
"transformer.encoder.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00007.bin",
|
151 |
+
"transformer.encoder.layers.27.post_attention_layernorm.weight": "pytorch_model-00007-of-00007.bin",
|
152 |
+
"transformer.encoder.layers.27.self_attention.dense.weight": "pytorch_model-00007-of-00007.bin",
|
153 |
+
"transformer.encoder.layers.27.self_attention.query_key_value.bias": "pytorch_model-00007-of-00007.bin",
|
154 |
+
"transformer.encoder.layers.27.self_attention.query_key_value.weight": "pytorch_model-00007-of-00007.bin",
|
155 |
+
"transformer.encoder.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00007.bin",
|
156 |
+
"transformer.encoder.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
|
157 |
+
"transformer.encoder.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
|
158 |
+
"transformer.encoder.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00007.bin",
|
159 |
+
"transformer.encoder.layers.3.self_attention.dense.weight": "pytorch_model-00001-of-00007.bin",
|
160 |
+
"transformer.encoder.layers.3.self_attention.query_key_value.bias": "pytorch_model-00001-of-00007.bin",
|
161 |
+
"transformer.encoder.layers.3.self_attention.query_key_value.weight": "pytorch_model-00001-of-00007.bin",
|
162 |
+
"transformer.encoder.layers.4.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
|
163 |
+
"transformer.encoder.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
|
164 |
+
"transformer.encoder.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
|
165 |
+
"transformer.encoder.layers.4.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
|
166 |
+
"transformer.encoder.layers.4.self_attention.dense.weight": "pytorch_model-00002-of-00007.bin",
|
167 |
+
"transformer.encoder.layers.4.self_attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
|
168 |
+
"transformer.encoder.layers.4.self_attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
|
169 |
+
"transformer.encoder.layers.5.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
|
170 |
+
"transformer.encoder.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
|
171 |
+
"transformer.encoder.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
|
172 |
+
"transformer.encoder.layers.5.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
|
173 |
+
"transformer.encoder.layers.5.self_attention.dense.weight": "pytorch_model-00002-of-00007.bin",
|
174 |
+
"transformer.encoder.layers.5.self_attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
|
175 |
+
"transformer.encoder.layers.5.self_attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
|
176 |
+
"transformer.encoder.layers.6.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
|
177 |
+
"transformer.encoder.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
|
178 |
+
"transformer.encoder.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
|
179 |
+
"transformer.encoder.layers.6.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
|
180 |
+
"transformer.encoder.layers.6.self_attention.dense.weight": "pytorch_model-00002-of-00007.bin",
|
181 |
+
"transformer.encoder.layers.6.self_attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
|
182 |
+
"transformer.encoder.layers.6.self_attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
|
183 |
+
"transformer.encoder.layers.7.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
|
184 |
+
"transformer.encoder.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00007.bin",
|
185 |
+
"transformer.encoder.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00007.bin",
|
186 |
+
"transformer.encoder.layers.7.post_attention_layernorm.weight": "pytorch_model-00002-of-00007.bin",
|
187 |
+
"transformer.encoder.layers.7.self_attention.dense.weight": "pytorch_model-00002-of-00007.bin",
|
188 |
+
"transformer.encoder.layers.7.self_attention.query_key_value.bias": "pytorch_model-00002-of-00007.bin",
|
189 |
+
"transformer.encoder.layers.7.self_attention.query_key_value.weight": "pytorch_model-00002-of-00007.bin",
|
190 |
+
"transformer.encoder.layers.8.input_layernorm.weight": "pytorch_model-00002-of-00007.bin",
|
191 |
+
"transformer.encoder.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
|
192 |
+
"transformer.encoder.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
|
193 |
+
"transformer.encoder.layers.8.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
|
194 |
+
"transformer.encoder.layers.8.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
|
195 |
+
"transformer.encoder.layers.8.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
|
196 |
+
"transformer.encoder.layers.8.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
|
197 |
+
"transformer.encoder.layers.9.input_layernorm.weight": "pytorch_model-00003-of-00007.bin",
|
198 |
+
"transformer.encoder.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00007.bin",
|
199 |
+
"transformer.encoder.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00007.bin",
|
200 |
+
"transformer.encoder.layers.9.post_attention_layernorm.weight": "pytorch_model-00003-of-00007.bin",
|
201 |
+
"transformer.encoder.layers.9.self_attention.dense.weight": "pytorch_model-00003-of-00007.bin",
|
202 |
+
"transformer.encoder.layers.9.self_attention.query_key_value.bias": "pytorch_model-00003-of-00007.bin",
|
203 |
+
"transformer.encoder.layers.9.self_attention.query_key_value.weight": "pytorch_model-00003-of-00007.bin",
|
204 |
+
"transformer.output_layer.weight": "pytorch_model-00007-of-00007.bin",
|
205 |
+
"transformer.rotary_pos_emb.inv_freq": "pytorch_model-00001-of-00007.bin"
|
206 |
}
|
207 |
}
|
quantization.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.nn import Linear
|
2 |
+
from torch.nn.parameter import Parameter
|
3 |
+
|
4 |
+
import bz2
|
5 |
+
import torch
|
6 |
+
import base64
|
7 |
+
import ctypes
|
8 |
+
from transformers.utils import logging
|
9 |
+
|
10 |
+
from typing import List
|
11 |
+
from functools import partial
|
12 |
+
|
13 |
+
logger = logging.get_logger(__name__)
|
14 |
+
|
15 |
+
try:
|
16 |
+
from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up
|
17 |
+
|
18 |
+
class Kernel:
|
19 |
+
def __init__(self, code: bytes, function_names: List[str]):
|
20 |
+
self.code = code
|
21 |
+
self._function_names = function_names
|
22 |
+
self._cmodule = LazyKernelCModule(self.code)
|
23 |
+
|
24 |
+
for name in self._function_names:
|
25 |
+
setattr(self, name, KernelFunction(self._cmodule, name))
|
26 |
+
|
27 |
+
quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
|
28 |
+
|
29 |
+
kernels = Kernel(
|
30 |
+
bz2.decompress(base64.b64decode(quantization_code)),
|
31 |
+
[
|
32 |
+
"int4WeightCompression",
|
33 |
+
"int4WeightExtractionFloat",
|
34 |
+
"int4WeightExtractionHalf",
|
35 |
+
"int8WeightExtractionFloat",
|
36 |
+
"int8WeightExtractionHalf",
|
37 |
+
],
|
38 |
+
)
|
39 |
+
except Exception as exception:
|
40 |
+
kernels = None
|
41 |
+
logger.warning("Failed to load cpm_kernels:" + str(exception))
|
42 |
+
|
43 |
+
|
44 |
+
class W8A16Linear(torch.autograd.Function):
|
45 |
+
@staticmethod
|
46 |
+
def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
|
47 |
+
ctx.inp_shape = inp.size()
|
48 |
+
ctx.weight_bit_width = weight_bit_width
|
49 |
+
out_features = quant_w.size(0)
|
50 |
+
inp = inp.contiguous().view(-1, inp.size(-1))
|
51 |
+
weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
|
52 |
+
ctx.weight_shape = weight.size()
|
53 |
+
output = inp.mm(weight.t())
|
54 |
+
ctx.save_for_backward(inp, quant_w, scale_w)
|
55 |
+
return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
|
56 |
+
|
57 |
+
@staticmethod
|
58 |
+
def backward(ctx, grad_output: torch.Tensor):
|
59 |
+
inp, quant_w, scale_w = ctx.saved_tensors
|
60 |
+
weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)
|
61 |
+
grad_output = grad_output.contiguous().view(-1, weight.size(0))
|
62 |
+
grad_input = grad_output.mm(weight)
|
63 |
+
grad_weight = grad_output.t().mm(inp)
|
64 |
+
return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None
|
65 |
+
|
66 |
+
|
67 |
+
def compress_int4_weight(weight: torch.Tensor): # (n, m)
|
68 |
+
with torch.cuda.device(weight.device):
|
69 |
+
n, m = weight.size(0), weight.size(1)
|
70 |
+
assert m % 2 == 0
|
71 |
+
m = m // 2
|
72 |
+
out = torch.empty(n, m, dtype=torch.int8, device="cuda")
|
73 |
+
stream = torch.cuda.current_stream()
|
74 |
+
|
75 |
+
gridDim = (n, 1, 1)
|
76 |
+
blockDim = (min(round_up(m, 32), 1024), 1, 1)
|
77 |
+
|
78 |
+
kernels.int4WeightCompression(
|
79 |
+
gridDim,
|
80 |
+
blockDim,
|
81 |
+
0,
|
82 |
+
stream,
|
83 |
+
[ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
|
84 |
+
)
|
85 |
+
return out
|
86 |
+
|
87 |
+
|
88 |
+
def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
|
89 |
+
assert scale_list.dtype in [torch.half, torch.bfloat16]
|
90 |
+
assert weight.dtype in [torch.int8]
|
91 |
+
if source_bit_width == 8:
|
92 |
+
return weight.to(scale_list.dtype) * scale_list[:, None]
|
93 |
+
elif source_bit_width == 4:
|
94 |
+
func = (
|
95 |
+
kernels.int4WeightExtractionHalf if scale_list.dtype == torch.half else kernels.int4WeightExtractionBFloat16
|
96 |
+
)
|
97 |
+
else:
|
98 |
+
assert False, "Unsupported bit-width"
|
99 |
+
|
100 |
+
with torch.cuda.device(weight.device):
|
101 |
+
n, m = weight.size(0), weight.size(1)
|
102 |
+
out = torch.empty(n, m * (8 // source_bit_width), dtype=scale_list.dtype, device="cuda")
|
103 |
+
stream = torch.cuda.current_stream()
|
104 |
+
|
105 |
+
gridDim = (n, 1, 1)
|
106 |
+
blockDim = (min(round_up(m, 32), 1024), 1, 1)
|
107 |
+
|
108 |
+
func(
|
109 |
+
gridDim,
|
110 |
+
blockDim,
|
111 |
+
0,
|
112 |
+
stream,
|
113 |
+
[
|
114 |
+
ctypes.c_void_p(weight.data_ptr()),
|
115 |
+
ctypes.c_void_p(scale_list.data_ptr()),
|
116 |
+
ctypes.c_void_p(out.data_ptr()),
|
117 |
+
ctypes.c_int32(n),
|
118 |
+
ctypes.c_int32(m),
|
119 |
+
],
|
120 |
+
)
|
121 |
+
return out
|
122 |
+
|
123 |
+
|
124 |
+
class QuantizedLinear(torch.nn.Module):
|
125 |
+
def __init__(self, weight_bit_width: int, weight, bias=None, device="cpu", dtype=None, empty_init=False, *args,
|
126 |
+
**kwargs):
|
127 |
+
super().__init__()
|
128 |
+
self.weight_bit_width = weight_bit_width
|
129 |
+
|
130 |
+
shape = weight.shape
|
131 |
+
|
132 |
+
if weight is None or empty_init:
|
133 |
+
self.weight = torch.empty(shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=device)
|
134 |
+
self.weight_scale = torch.empty(shape[0], dtype=dtype, device=device)
|
135 |
+
else:
|
136 |
+
self.weight_scale = weight.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)
|
137 |
+
self.weight = torch.round(weight / self.weight_scale[:, None]).to(torch.int8)
|
138 |
+
if weight_bit_width == 4:
|
139 |
+
self.weight = compress_int4_weight(self.weight)
|
140 |
+
|
141 |
+
self.weight = Parameter(self.weight.to(device), requires_grad=False)
|
142 |
+
self.weight_scale = Parameter(self.weight_scale.to(device), requires_grad=False)
|
143 |
+
self.bias = Parameter(bias.to(device), requires_grad=False) if bias is not None else None
|
144 |
+
|
145 |
+
def forward(self, input):
|
146 |
+
output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
|
147 |
+
if self.bias is not None:
|
148 |
+
output = output + self.bias
|
149 |
+
return output
|
150 |
+
|
151 |
+
|
152 |
+
def quantize(model, weight_bit_width, empty_init=False, device=None):
|
153 |
+
"""Replace fp16 linear with quantized linear"""
|
154 |
+
for layer in model.layers:
|
155 |
+
layer.self_attention.query_key_value = QuantizedLinear(
|
156 |
+
weight_bit_width=weight_bit_width,
|
157 |
+
weight=layer.self_attention.query_key_value.weight.to(torch.cuda.current_device()),
|
158 |
+
bias=layer.self_attention.query_key_value.bias,
|
159 |
+
dtype=layer.self_attention.query_key_value.weight.dtype,
|
160 |
+
device=layer.self_attention.query_key_value.weight.device if device is None else device,
|
161 |
+
empty_init=empty_init
|
162 |
+
)
|
163 |
+
layer.self_attention.dense = QuantizedLinear(
|
164 |
+
weight_bit_width=weight_bit_width,
|
165 |
+
weight=layer.self_attention.dense.weight.to(torch.cuda.current_device()),
|
166 |
+
bias=layer.self_attention.dense.bias,
|
167 |
+
dtype=layer.self_attention.dense.weight.dtype,
|
168 |
+
device=layer.self_attention.dense.weight.device if device is None else device,
|
169 |
+
empty_init=empty_init
|
170 |
+
)
|
171 |
+
layer.mlp.dense_h_to_4h = QuantizedLinear(
|
172 |
+
weight_bit_width=weight_bit_width,
|
173 |
+
weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
|
174 |
+
bias=layer.mlp.dense_h_to_4h.bias,
|
175 |
+
dtype=layer.mlp.dense_h_to_4h.weight.dtype,
|
176 |
+
device=layer.mlp.dense_h_to_4h.weight.device if device is None else device,
|
177 |
+
empty_init=empty_init
|
178 |
+
)
|
179 |
+
layer.mlp.dense_4h_to_h = QuantizedLinear(
|
180 |
+
weight_bit_width=weight_bit_width,
|
181 |
+
weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
|
182 |
+
bias=layer.mlp.dense_4h_to_h.bias,
|
183 |
+
dtype=layer.mlp.dense_4h_to_h.weight.dtype,
|
184 |
+
device=layer.mlp.dense_4h_to_h.weight.device if device is None else device,
|
185 |
+
empty_init=empty_init
|
186 |
+
)
|
187 |
+
|
188 |
+
return model
|