yuchen005 commited on
Commit
fcdac52
1 Parent(s): eed57d6

Upload 32 files

Browse files
my_jiwer/.github/workflows/pythonpackage.yml ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2
+ # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3
+
4
+ name: Python package
5
+
6
+ on:
7
+ push:
8
+ branches: [ master ]
9
+ pull_request:
10
+ branches: [ master ]
11
+
12
+ jobs:
13
+ style:
14
+ runs-on: ubuntu-latest
15
+ strategy:
16
+ matrix:
17
+ python-version: [3.7]
18
+
19
+ steps:
20
+ - uses: actions/checkout@v3
21
+ - name: Set up Python ${{ matrix.python-version }}
22
+ uses: actions/setup-python@v4
23
+ with:
24
+ python-version: ${{ matrix.python-version }}
25
+ - name: Install Poetry
26
+ uses: snok/install-poetry@v1
27
+ - name: Setup dependencies
28
+ run: |
29
+ poetry update
30
+ poetry install
31
+ - name: Lint with flake8
32
+ run: |
33
+ # stop the build if there are Python syntax errors or undefined names
34
+ poetry run flake8 jiwer --count --select=E9,F63,F7,F82 --show-source --statistics
35
+ # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
36
+ poetry run flake8 jiwer --count --exit-zero --max-complexity=10 --max-line-length=88 --statistics
37
+ - name: Check formatting with black
38
+ run: |
39
+ poetry run black . --check
40
+
41
+ build:
42
+ runs-on: ubuntu-latest
43
+ strategy:
44
+ matrix:
45
+ python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
46
+
47
+ steps:
48
+ - uses: actions/checkout@v3
49
+ - name: Set up Python ${{ matrix.python-version }}
50
+ uses: actions/setup-python@v4
51
+ with:
52
+ python-version: ${{ matrix.python-version }}
53
+ - name: Install Poetry
54
+ uses: snok/install-poetry@v1
55
+ - name: Install dependencies
56
+ run: |
57
+ poetry run python -m pip install --upgrade pip
58
+ poetry update
59
+ poetry install
60
+ - name: Test with pytest
61
+ run: |
62
+ poetry run python --version
63
+ poetry run pytest
my_jiwer/.github/workflows/pythonpublish.yml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflows will upload a Python Package using Twine when a release is created
2
+ # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3
+
4
+ name: Upload Python Package
5
+
6
+ on:
7
+ release:
8
+ types: [created]
9
+
10
+ jobs:
11
+ deploy:
12
+
13
+ runs-on: ubuntu-latest
14
+
15
+ steps:
16
+ - uses: actions/checkout@v3
17
+ - name: Set up Python
18
+ uses: actions/setup-python@v4
19
+ with:
20
+ python-version: '3.x'
21
+ - name: Install Poetry
22
+ uses: snok/install-poetry@v1
23
+ - name: Install dependencies
24
+ run: |
25
+ poetry run python -m pip install --upgrade pip
26
+ poetry update
27
+ poetry install
28
+ - name: Build and publish
29
+ env:
30
+ POETRY_HTTP_BASIC_PYPI_USERNAME: ${{ secrets.PYPI_USERNAME }}
31
+ POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
32
+ run: |
33
+ poetry publish --build
my_jiwer/.gitignore ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Compiled python modules.
2
+ *.pyc
3
+
4
+ # Setuptools distribution folder.
5
+ /dist/
6
+
7
+ # Python egg metadata, regenerated from source files by setuptools.
8
+ /*.egg-info
9
+
10
+ # playground directory for running local debug code
11
+ playground
12
+
13
+ # poetry .lock file
14
+ poetry.lock
15
+
16
+ # idea specific folders
17
+ .idea
18
+ .vscode
19
+
20
+ # virtual environments
21
+ venv
22
+ .venv
23
+
24
+ # cache folders
25
+ .pytest_cache
26
+ .benchmarks
27
+ /docs/site/
28
+ /site/
my_jiwer/.mailmap ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # mail map file
2
+ # for cleaner output of e.g. `git shortlog -nes`
3
+
4
+ Nik Vaessen <nikvaes@gmail.com>
5
+ Nik Vaessen <nikvaes@gmail.com> <nikvaessen@users.noreply.github.com>
6
+ Bart van Andel <bavanandel@gmail.com>
my_jiwer/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2018 8x8, Inc.
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
my_jiwer/README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # JiWER
2
+
3
+ JiWER is a simple and fast python package to evaluate an automatic speech recognition system.
4
+ It supports the following measures:
5
+
6
+ 1. word error rate (WER)
7
+ 2. match error rate (MER)
8
+ 3. word information lost (WIL)
9
+ 4. word information preserved (WIP)
10
+ 5. character error rate (CER)
11
+
12
+ These measures are computed with the use of the minimum-edit distance between one or more reference and hypothesis sentences.
13
+ The minimum-edit distance is calculated using [RapidFuzz](https://github.com/maxbachmann/RapidFuzz), which uses C++ under the hood, and is therefore faster than a pure python implementation.
14
+
15
+ ## Documentation
16
+
17
+ For further info, see the documentation at [jitsi.github.io/jiwer](https://jitsi.github.io/jiwer).
18
+
19
+ ## Installation
20
+
21
+ You should be able to install this package using [poetry](https://python-poetry.org/docs/):
22
+
23
+ ```
24
+ $ poetry add jiwer
25
+ ```
26
+
27
+ Or, if you prefer old-fashioned pip and you're using Python >= `3.7`:
28
+
29
+ ```bash
30
+ $ pip install jiwer
31
+ ```
32
+
33
+ ## Usage
34
+
35
+ The most simple use-case is computing the word error rate between two strings:
36
+
37
+ ```python
38
+ from jiwer import wer
39
+
40
+ reference = "hello world"
41
+ hypothesis = "hello duck"
42
+
43
+ error = wer(reference, hypothesis)
44
+ ```
45
+
46
+ ## Licence
47
+
48
+ The jiwer package is released under the `Apache License, Version 2.0` licence by [8x8](https://www.8x8.com/).
49
+
50
+ For further information, see [`LICENCE`](./LICENSE).
51
+
52
+ ## Reference
53
+
54
+ _For a comparison between WER, MER and WIL, see: \
55
+ Morris, Andrew & Maier, Viktoria & Green, Phil. (2004). [From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.](https://www.researchgate.net/publication/221478089_From_WER_and_RIL_to_MER_and_WIL_improved_evaluation_measures_for_connected_speech_recognition)_
my_jiwer/docs/cli.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ JiWER provides a simple CLI, which should be available after installation.
3
+
4
+ For details, see `jiwer --help`.
5
+
6
+ ```text
7
+ $ jiwer --help
8
+ Usage: jiwer [OPTIONS]
9
+
10
+ JiWER is a python tool for computing the word-error-rate of ASR systems. To
11
+ use this CLI, store the reference and hypothesis sentences in a text file,
12
+ where each sentence is delimited by a new-line character. The text files are
13
+ expected to have an equal number of lines, unless the `-g` flag is used. The
14
+ `-g` flag joins computation of the WER by doing a global minimal alignment.
15
+
16
+ Options:
17
+ -r, --reference PATH Path to new-line delimited text file of reference
18
+ sentences. [required]
19
+ -h, --hypothesis PATH Path to new-line delimited text file of hypothesis
20
+ sentences. [required]
21
+ -c, --cer Compute CER instead of WER.
22
+ -a, --align Print alignment of each sentence.
23
+ -g, --global Apply a global minimal alignment between reference
24
+ and hypothesis sentences before computing the WER.
25
+ --help Show this message and exit.
26
+ ```
27
+
28
+ Note that the CLI does not support a custom pre-processing (as described below). Any pre-processing
29
+ should be done on the text files manually before calling JiWER when using the CLI.
my_jiwer/docs/gen_ref_pages.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generate the code reference pages and navigation."""
2
+
3
+ from pathlib import Path
4
+ import mkdocs_gen_files
5
+
6
+ nav = mkdocs_gen_files.Nav()
7
+
8
+ for path in sorted(Path("jiwer").rglob("*.py")):
9
+ doc_path = path.relative_to("jiwer").with_suffix(".md")
10
+ full_doc_path = Path("reference", doc_path)
11
+
12
+ module_path = path.relative_to("jiwer").with_suffix("")
13
+ parts = list(module_path.parts)
14
+
15
+ if parts[-1] == "__init__" or parts[-1] == "cli":
16
+ continue
17
+
18
+ nav[parts] = doc_path.as_posix()
19
+
20
+ with mkdocs_gen_files.open(full_doc_path, "w") as fd:
21
+ identifier = ".".join(parts)
22
+ print("::: " + identifier, file=fd)
23
+
24
+ mkdocs_gen_files.set_edit_path(full_doc_path, path)
25
+
26
+
27
+ with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file:
28
+ nav_file.writelines(nav.build_literate_nav())
my_jiwer/docs/index.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # JiWER
2
+
3
+ JiWER is a simple and fast python package to evaluate an automatic speech recognition system.
4
+ It supports the following measures:
5
+
6
+ 1. word error rate (WER)
7
+ 2. match error rate (MER)
8
+ 3. word information lost (WIL)
9
+ 4. word information preserved (WIP)
10
+ 5. character error rate (CER)
11
+
12
+ These measures are computed with the use of the minimum-edit distance between one or more reference and hypothesis sentences.
13
+ The minimum-edit distance is calculated using [RapidFuzz](https://github.com/maxbachmann/RapidFuzz), which uses C++ under the hood, and is therefore faster than a pure python implementation.
14
+
15
+ # Installation
16
+
17
+ You should be able to install this package using [poetry](https://python-poetry.org/docs/):
18
+
19
+ ```
20
+ $ poetry add jiwer
21
+ ```
22
+
23
+ Or, if you prefer old-fashioned pip and you're using Python >= `3.7`:
24
+
25
+ ```bash
26
+ $ pip install jiwer
27
+ ```
28
+
29
+
30
+
31
+
my_jiwer/docs/requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ mkdocs==1.4.2
2
+ mkdocstrings[python]==0.20.0
3
+ mkdocs-gen-files==0.4.0
4
+ mkdocs-literate-nav==0.6.0
5
+ mkdocs-material==9.1.3
my_jiwer/docs/usage.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Usage
2
+
3
+ The most simple use-case is computing the word error rate between two strings:
4
+
5
+ ```python
6
+ from jiwer import wer
7
+
8
+ reference = "hello world"
9
+ hypothesis = "hello duck"
10
+
11
+ error = wer(reference, hypothesis)
12
+ ```
13
+
14
+ Similarly, to get other measures:
15
+
16
+ ```python
17
+ import jiwer
18
+
19
+ reference = "hello world"
20
+ hypothesis = "hello duck"
21
+
22
+ wer = jiwer.wer(reference, hypothesis)
23
+ mer = jiwer.mer(reference, hypothesis)
24
+ wil = jiwer.wil(reference, hypothesis)
25
+
26
+ # faster, because `compute_measures` only needs to perform the heavy lifting once:
27
+ output = jiwer.process_words(reference, hypothesis)
28
+ wer = output.wer
29
+ mer = output.mer
30
+ wil = output.wil
31
+ ```
32
+
33
+ You can also compute the WER over multiple sentences:
34
+
35
+ ```python
36
+ from jiwer import wer
37
+
38
+ reference = ["hello world", "i like monthy python"]
39
+ hypothesis = ["hello duck", "i like python"]
40
+
41
+ error = wer(reference, hypothesis)
42
+ ```
43
+
44
+ We also provide the character error rate:
45
+
46
+ ```python
47
+ import jiwer
48
+
49
+ reference = ["i can spell", "i hope"]
50
+ hypothesis = ["i kan cpell", "i hop"]
51
+
52
+ error = jiwer.cer(reference, hypothesis)
53
+
54
+ # if you also want the alignment
55
+ output = jiwer.process_characters(reference, hypothesis)
56
+ error = output.cer
57
+ ```
58
+
59
+ # Alignment
60
+
61
+ With `jiwer.process_words`, you also get the alignment between the reference and hypothesis.
62
+
63
+ We provide the alignment as a list of `(op, ref_start_idx, ref_idx_end, hyp_idx_start, hyp_idx_end)`, where `op` is one of
64
+ `equal`, `replace`, `delete`, or `insert`.
65
+
66
+ This looks like the following:
67
+
68
+ ```python3
69
+ import jiwer
70
+
71
+ out = jiwer.process_words("short one here", "shoe order one")
72
+ print(out.alignments)
73
+ # [[[AlignmentChunk(type='insert', ref_start_idx=0, ref_end_idx=0, hyp_start_idx=0, hyp_end_idx=1), ...]]
74
+ ```
75
+
76
+ To visualize the alignment, you can use `jiwer.visualize_alignment()`
77
+
78
+ For example:
79
+
80
+ ```python3
81
+ import jiwer
82
+
83
+ out = jiwer.process_words(
84
+ ["short one here", "quite a bit of longer sentence"],
85
+ ["shoe order one", "quite bit of an even longest sentence here"],
86
+ )
87
+
88
+ print(jiwer.visualize_alignment(out))
89
+ ```
90
+ Gives the following output
91
+ ```text
92
+ sentence 1
93
+ REF: **** short one here
94
+ HYP: shoe order one ****
95
+ I S D
96
+
97
+ sentence 2
98
+ REF: quite a bit of ** **** longer sentence ****
99
+ HYP: quite * bit of an even longest sentence here
100
+ D I I S I
101
+
102
+ number of sentences: 2
103
+ substitutions=2 deletions=2 insertions=4 hits=5
104
+
105
+ mer=61.54%
106
+ wil=74.75%
107
+ wip=25.25%
108
+ wer=88.89%
109
+ ```
110
+
111
+ Note that it also possible to visualize the character-level alignment, simply use the output of `jiwer.process_characters()` instead.
my_jiwer/mkdocs.yml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ site_name: "jiwer"
2
+ site_description: "Documentation for jiwer."
3
+ site_url: "https://jitsi.github.io/jiwer/"
4
+ repo_url: "https://github.com/jitsi/jiwer"
5
+ edit_uri: "blob/master/docs/"
6
+ repo_name: "jitsi/jiwer"
7
+ site_dir: "site"
8
+
9
+ theme:
10
+ name: "material"
11
+ features:
12
+ - content.code.annotate
13
+ - navigation.tabs
14
+ - navigation.tabs.sticky
15
+ - navigation.top
16
+
17
+ plugins:
18
+ - mkdocstrings:
19
+ handlers:
20
+ python:
21
+ paths: [jiwer]
22
+ options:
23
+ separate_signature: true
24
+ show_root_heading: true
25
+ show_root_members_full_path: false
26
+ - search
27
+ - gen-files:
28
+ scripts:
29
+ - docs/gen_ref_pages.py
30
+ - literate-nav:
31
+ nav_file: SUMMARY.md
32
+ - autorefs
33
+
34
+ nav:
35
+ - jiwer: index.md
36
+ - Usage: usage.md
37
+ - Command-line interface: cli.md
38
+ - API reference: reference/
39
+
40
+ markdown_extensions:
41
+ - toc:
42
+ permalink: "#"
43
+ - pymdownx.superfences
44
+
45
+ watch:
46
+ - docs
47
+ - jiwer
my_jiwer/my_jiwer/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .measures import *
2
+ from .transforms import *
3
+ from .transformations import *
4
+ from .alignment import *
5
+ from .process import *
6
+
7
+ name = "jiwer"
my_jiwer/my_jiwer/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (287 Bytes). View file
 
my_jiwer/my_jiwer/__pycache__/alignment.cpython-38.pyc ADDED
Binary file (4.97 kB). View file
 
my_jiwer/my_jiwer/__pycache__/measures.cpython-38.pyc ADDED
Binary file (11.2 kB). View file
 
my_jiwer/my_jiwer/__pycache__/process.cpython-38.pyc ADDED
Binary file (12.2 kB). View file
 
my_jiwer/my_jiwer/__pycache__/transformations.cpython-38.pyc ADDED
Binary file (1.19 kB). View file
 
my_jiwer/my_jiwer/__pycache__/transforms.cpython-38.pyc ADDED
Binary file (21.8 kB). View file
 
my_jiwer/my_jiwer/alignment.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ Utility method to visualize the alignment between one or more reference and hypothesis
21
+ pairs.
22
+ """
23
+
24
+ from typing import Dict, List, Tuple, Union
25
+
26
+
27
+ from jiwer.process import CharacterOutput, WordOutput, AlignmentChunk
28
+
29
+ __all__ = ["visualize_alignment"]
30
+
31
+
32
+ def visualize_alignment(
33
+ output: Union[WordOutput, CharacterOutput],
34
+ show_measures: bool = True,
35
+ skip_correct: bool = True,
36
+ ) -> str:
37
+ """
38
+ Visualize the output of [jiwer.process_words][process.process_words] and
39
+ [jiwer.process_characters][process.process_characters]. The visualization
40
+ shows the alignment between each processed reference and hypothesis pair.
41
+ If `show_measures=True`, the output string will also contain all measures in the
42
+ output.
43
+
44
+ Args:
45
+ output: The processed output of reference and hypothesis pair(s).
46
+ show_measures: If enabled, the visualization will include measures like the WER
47
+ or CER
48
+ skip_correct: If enabled, the visualization will exclude correct reference and hypothesis pairs
49
+
50
+ Returns:
51
+ (str): The visualization as a string
52
+
53
+ Example:
54
+ This code snippet
55
+ ```python
56
+ import jiwer
57
+
58
+ out = jiwer.process_words(
59
+ ["short one here", "quite a bit of longer sentence"],
60
+ ["shoe order one", "quite bit of an even longest sentence here"],
61
+ )
62
+
63
+ print(jiwer.visualize_alignment(out))
64
+ ```
65
+ will produce this visualization:
66
+ ```txt
67
+ sentence 1
68
+ REF: # short one here
69
+ HYP: shoe order one *
70
+ I S D
71
+
72
+ sentence 2
73
+ REF: quite a bit of # # longer sentence #
74
+ HYP: quite * bit of an even longest sentence here
75
+ D I I S I
76
+
77
+ number of sentences: 2
78
+ substitutions=2 deletions=2 insertions=4 hits=5
79
+
80
+ mer=61.54%
81
+ wil=74.75%
82
+ wip=25.25%
83
+ wer=88.89%
84
+ ```
85
+
86
+ When `show_measures=False`, only the alignment will be printed:
87
+
88
+ ```txt
89
+ sentence 1
90
+ REF: # short one here
91
+ HYP: shoe order one *
92
+ I S D
93
+
94
+ sentence 2
95
+ REF: quite a bit of # # longer sentence #
96
+ HYP: quite * bit of an even longest sentence here
97
+ D I I S I
98
+ ```
99
+ """
100
+ references = output.references
101
+ hypothesis = output.hypotheses
102
+ alignment = output.alignments
103
+ is_cer = isinstance(output, CharacterOutput)
104
+
105
+ final_str = ""
106
+ for idx, (gt, hp, chunks) in enumerate(zip(references, hypothesis, alignment)):
107
+ if skip_correct and len(chunks) == 1 and chunks[0].type == "equal":
108
+ continue
109
+
110
+ final_str += f"sentence {idx+1}\n"
111
+ final_str += _construct_comparison_string(
112
+ gt, hp, chunks, include_space_seperator=not is_cer
113
+ )
114
+ final_str += "\n"
115
+
116
+ if show_measures:
117
+ final_str += f"number of sentences: {len(alignment)}\n"
118
+ final_str += f"substitutions={output.substitutions} "
119
+ final_str += f"deletions={output.deletions} "
120
+ final_str += f"insertions={output.insertions} "
121
+ final_str += f"hits={output.hits}\n"
122
+
123
+ if is_cer:
124
+ final_str += f"\ncer={output.cer*100:.2f}%\n"
125
+ else:
126
+ final_str += f"\nmer={output.mer*100:.2f}%"
127
+ final_str += f"\nwil={output.wil*100:.2f}%"
128
+ final_str += f"\nwip={output.wip*100:.2f}%"
129
+ final_str += f"\nwer={output.wer*100:.2f}%\n"
130
+ else:
131
+ # remove last newline
132
+ final_str = final_str[:-1]
133
+
134
+ return final_str
135
+
136
+
137
+ def _construct_comparison_string(
138
+ reference: List[str],
139
+ hypothesis: List[str],
140
+ ops: List[AlignmentChunk],
141
+ include_space_seperator: bool = False,
142
+ ) -> str:
143
+ ref_str = "REF: "
144
+ hyp_str = "HYP: "
145
+ op_str = " "
146
+
147
+ for op in ops:
148
+ if op.type == "equal" or op.type == "substitute":
149
+ ref = reference[op.ref_start_idx : op.ref_end_idx]
150
+ hyp = hypothesis[op.hyp_start_idx : op.hyp_end_idx]
151
+ op_char = " " if op.type == "equal" else "s"
152
+ elif op.type == "delete":
153
+ ref = reference[op.ref_start_idx : op.ref_end_idx]
154
+ hyp = ["*" for _ in range(len(ref))]
155
+ op_char = "d"
156
+ elif op.type == "insert":
157
+ hyp = hypothesis[op.hyp_start_idx : op.hyp_end_idx]
158
+ ref = ["*" for _ in range(len(hyp))]
159
+ op_char = "i"
160
+ else:
161
+ raise ValueError(f"unparseable op name={op.type}")
162
+
163
+ op_chars = [op_char for _ in range(len(ref))]
164
+ for rf, hp, c in zip(ref, hyp, op_chars):
165
+ str_len = max(len(rf), len(hp), len(c))
166
+
167
+ if rf == "*":
168
+ rf = "".join(["*"] * str_len)
169
+ elif hp == "*":
170
+ hp = "".join(["*"] * str_len)
171
+
172
+ ref_str += f"{rf:>{str_len}}"
173
+ hyp_str += f"{hp:>{str_len}}"
174
+ op_str += f"{c.upper():>{str_len}}"
175
+
176
+ if include_space_seperator:
177
+ ref_str += " "
178
+ hyp_str += " "
179
+ op_str += " "
180
+
181
+ if include_space_seperator:
182
+ # remove last space
183
+ return f"{ref_str[:-1]}\n{hyp_str[:-1]}\n{op_str[:-1]}\n"
184
+ else:
185
+ return f"{ref_str}\n{hyp_str}\n{op_str}\n"
my_jiwer/my_jiwer/cli.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ Provide a simple CLI wrapper for JiWER. The CLI does not support custom transforms.
21
+ """
22
+
23
+ import click
24
+ import pathlib
25
+
26
+ import jiwer
27
+
28
+
29
+ @click.command()
30
+ @click.option(
31
+ "-r",
32
+ "--reference",
33
+ "reference_file",
34
+ type=pathlib.Path,
35
+ required=True,
36
+ help="Path to new-line delimited text file of reference sentences.",
37
+ )
38
+ @click.option(
39
+ "-h",
40
+ "--hypothesis",
41
+ "hypothesis_file",
42
+ type=pathlib.Path,
43
+ required=True,
44
+ help="Path to new-line delimited text file of hypothesis sentences.",
45
+ )
46
+ @click.option(
47
+ "--cer",
48
+ "-c",
49
+ "compute_cer",
50
+ is_flag=True,
51
+ default=False,
52
+ help="Compute CER instead of WER.",
53
+ )
54
+ @click.option(
55
+ "--align",
56
+ "-a",
57
+ "show_alignment",
58
+ is_flag=True,
59
+ default=False,
60
+ help="Print alignment of each sentence.",
61
+ )
62
+ @click.option(
63
+ "--global",
64
+ "-g",
65
+ "global_alignment",
66
+ is_flag=True,
67
+ default=False,
68
+ help="Apply a global minimal alignment between reference and hypothesis sentences "
69
+ "before computing the WER.",
70
+ )
71
+ def cli(
72
+ reference_file: pathlib.Path,
73
+ hypothesis_file: pathlib.Path,
74
+ compute_cer: bool,
75
+ show_alignment: bool,
76
+ global_alignment: bool,
77
+ ):
78
+ """
79
+ JiWER is a python tool for computing the word-error-rate of ASR systems. To use
80
+ this CLI, store the reference and hypothesis sentences in a text file, where
81
+ each sentence is delimited by a new-line character.
82
+ The text files are expected to have an equal number of lines, unless the `-g` flag
83
+ is used. The `-g` flag joins computation of the WER by doing a global minimal
84
+ alignment.
85
+
86
+ """
87
+ with reference_file.open("r") as f:
88
+ reference_sentences = [
89
+ ln.strip() for ln in f.readlines() if len(ln.strip()) > 1
90
+ ]
91
+
92
+ with hypothesis_file.open("r") as f:
93
+ hypothesis_sentences = [
94
+ ln.strip() for ln in f.readlines() if len(ln.strip()) > 1
95
+ ]
96
+
97
+ if not global_alignment and len(reference_sentences) != len(hypothesis_sentences):
98
+ raise ValueError(
99
+ f"Number of sentences does not match. "
100
+ f"{reference_file} contains {len(reference_sentences)} lines."
101
+ f"{hypothesis_file} contains {len(hypothesis_sentences)} lines."
102
+ )
103
+
104
+ if global_alignment and compute_cer:
105
+ raise ValueError("--global and --cer are mutually exclusive.")
106
+
107
+ if compute_cer:
108
+ out = jiwer.process_characters(
109
+ reference_sentences,
110
+ hypothesis_sentences,
111
+ )
112
+ else:
113
+ if global_alignment:
114
+ out = jiwer.process_words(
115
+ reference_sentences,
116
+ hypothesis_sentences,
117
+ reference_transform=jiwer.wer_contiguous,
118
+ hypothesis_transform=jiwer.wer_contiguous,
119
+ )
120
+ else:
121
+ out = jiwer.process_words(reference_sentences, hypothesis_sentences)
122
+
123
+ if show_alignment:
124
+ print(jiwer.visualize_alignment(out, show_measures=True))
125
+ else:
126
+ if compute_cer:
127
+ print(out.cer)
128
+ else:
129
+ print(out.wer)
130
+
131
+
132
+ if __name__ == "__main__":
133
+ cli()
my_jiwer/my_jiwer/measures.py ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ Convenience methods for calculating a number of similarity error
21
+ measures between a reference and hypothesis sentence.
22
+ These measures are
23
+ commonly used to measure the performance for an automatic speech recognition
24
+ (ASR) system.
25
+
26
+ The following measures are implemented:
27
+
28
+ - Word Error Rate (WER), which is where this library got its name from. This
29
+ has long been (and arguably still is) the de facto standard for computing
30
+ ASR performance.
31
+ - Match Error Rate (MER)
32
+ - Word Information Lost (WIL)
33
+ - Word Information Preserved (WIP)
34
+ - Character Error Rate (CER)
35
+
36
+ Note that these functions merely call
37
+ [jiwer.process_words][process.process_words] and
38
+ [jiwer.process_characters][process.process_characters].
39
+ It is more efficient to call `process_words` or `process_characters` and access the
40
+ results from the
41
+ [jiwer.WordOutput][process.WordOutput] and
42
+ [jiwer.CharacterOutput][process.CharacterOutput]
43
+ classes.
44
+ """
45
+ import warnings
46
+
47
+ from typing import List, Union, Dict, Any
48
+
49
+ from jiwer import transforms as tr
50
+ from jiwer.transformations import wer_default, cer_default
51
+ from jiwer.process import process_words, process_words_embdiff, process_characters
52
+
53
+ __all__ = [
54
+ "wer",
55
+ "wer_embdiff",
56
+ "mer",
57
+ "wil",
58
+ "wip",
59
+ "cer",
60
+ "compute_measures",
61
+ ]
62
+
63
+ ########################################################################################
64
+ # Implementation of the WER method and co, exposed publicly
65
+
66
+
67
+ def wer(
68
+ reference: Union[str, List[str]] = None,
69
+ hypothesis: Union[str, List[str]] = None,
70
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
71
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
72
+ truth: Union[str, List[str]] = None,
73
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
74
+ ) -> float:
75
+ """
76
+ Calculate the word error rate (WER) between one or more reference and
77
+ hypothesis sentences.
78
+
79
+ Args:
80
+ reference: The reference sentence(s)
81
+ hypothesis: The hypothesis sentence(s)
82
+ reference_transform: The transformation(s) to apply to the reference string(s)
83
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
84
+ truth: Deprecated, renamed to `reference`
85
+ truth_transform: Deprecated, renamed to `reference_transform`
86
+
87
+ Deprecated:
88
+ Arguments `truth` and `truth_transform` have been renamed to respectively
89
+ `reference` and `reference_transform`. Therefore, the keyword arguments
90
+ `truth` and `truth_transform` will be removed in the next release.
91
+ At the same time, `reference` and `reference_transform` will lose their
92
+ default value.
93
+
94
+ Returns:
95
+ (float): The word error rate of the given reference and
96
+ hypothesis sentence(s).
97
+ """
98
+ (
99
+ reference,
100
+ hypothesis,
101
+ reference_transform,
102
+ hypothesis_transform,
103
+ ) = _deprecate_truth(
104
+ reference=reference,
105
+ hypothesis=hypothesis,
106
+ truth=truth,
107
+ reference_transform=reference_transform,
108
+ truth_transform=truth_transform,
109
+ hypothesis_transform=hypothesis_transform,
110
+ )
111
+
112
+ output = process_words(
113
+ reference, hypothesis, reference_transform, hypothesis_transform
114
+ )
115
+ return output.wer
116
+
117
+ def wer_embdiff(
118
+ reference: Union[str, List[str]] = None,
119
+ hypothesis: Union[str, List[str]] = None,
120
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
121
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
122
+ truth: Union[str, List[str]] = None,
123
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
124
+ ) -> float:
125
+ """
126
+ Calculate the word error rate (WER) between one or more reference and
127
+ hypothesis sentences.
128
+
129
+ Args:
130
+ reference: The reference sentence(s)
131
+ hypothesis: The hypothesis sentence(s)
132
+ reference_transform: The transformation(s) to apply to the reference string(s)
133
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
134
+ truth: Deprecated, renamed to `reference`
135
+ truth_transform: Deprecated, renamed to `reference_transform`
136
+
137
+ Deprecated:
138
+ Arguments `truth` and `truth_transform` have been renamed to respectively
139
+ `reference` and `reference_transform`. Therefore, the keyword arguments
140
+ `truth` and `truth_transform` will be removed in the next release.
141
+ At the same time, `reference` and `reference_transform` will lose their
142
+ default value.
143
+
144
+ Returns:
145
+ (float): The word error rate of the given reference and
146
+ hypothesis sentence(s).
147
+ """
148
+ (
149
+ reference,
150
+ hypothesis,
151
+ reference_transform,
152
+ hypothesis_transform,
153
+ ) = _deprecate_truth(
154
+ reference=reference,
155
+ hypothesis=hypothesis,
156
+ truth=truth,
157
+ reference_transform=reference_transform,
158
+ truth_transform=truth_transform,
159
+ hypothesis_transform=hypothesis_transform,
160
+ )
161
+
162
+ output, edit_ops = process_words_embdiff(
163
+ reference, hypothesis, reference_transform, hypothesis_transform
164
+ )
165
+ return output, edit_ops
166
+
167
+
168
+ def mer(
169
+ reference: Union[str, List[str]] = None,
170
+ hypothesis: Union[str, List[str]] = None,
171
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
172
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
173
+ truth: Union[str, List[str]] = None,
174
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
175
+ ) -> float:
176
+ """
177
+ Calculate the match error rate (MER) between one or more reference and
178
+ hypothesis sentences.
179
+
180
+ Args:
181
+ reference: The reference sentence(s)
182
+ hypothesis: The hypothesis sentence(s)
183
+ reference_transform: The transformation(s) to apply to the reference string(s)
184
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
185
+ truth: Deprecated, renamed to `reference`
186
+ truth_transform: Deprecated, renamed to `reference_transform`
187
+
188
+ Deprecated:
189
+ Arguments `truth` and `truth_transform` have been renamed to respectively
190
+ `reference` and `reference_transform`. Therefore, the keyword arguments
191
+ `truth` and `truth_transform` will be removed in the next release.
192
+ At the same time, `reference` and `reference_transform` will lose their
193
+ default value.
194
+
195
+ Returns:
196
+ (float): The match error rate of the given reference and
197
+ hypothesis sentence(s).
198
+ """
199
+ (
200
+ reference,
201
+ hypothesis,
202
+ reference_transform,
203
+ hypothesis_transform,
204
+ ) = _deprecate_truth(
205
+ reference=reference,
206
+ hypothesis=hypothesis,
207
+ truth=truth,
208
+ reference_transform=reference_transform,
209
+ truth_transform=truth_transform,
210
+ hypothesis_transform=hypothesis_transform,
211
+ )
212
+
213
+ output = process_words(
214
+ reference, hypothesis, reference_transform, hypothesis_transform
215
+ )
216
+
217
+ return output.mer
218
+
219
+
220
+ def wip(
221
+ reference: Union[str, List[str]] = None,
222
+ hypothesis: Union[str, List[str]] = None,
223
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
224
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
225
+ truth: Union[str, List[str]] = None,
226
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
227
+ ) -> float:
228
+ """
229
+ Calculate the word information preserved (WIP) between one or more reference and
230
+ hypothesis sentences.
231
+
232
+ Args:
233
+ reference: The reference sentence(s)
234
+ hypothesis: The hypothesis sentence(s)
235
+ reference_transform: The transformation(s) to apply to the reference string(s)
236
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
237
+ truth: Deprecated, renamed to `reference`
238
+ truth_transform: Deprecated, renamed to `reference_transform`
239
+
240
+ Deprecated:
241
+ Arguments `truth` and `truth_transform` have been renamed to respectively
242
+ `reference` and `reference_transform`. Therefore, the keyword arguments
243
+ `truth` and `truth_transform` will be removed in the next release.
244
+ At the same time, `reference` and `reference_transform` will lose their
245
+ default value.
246
+
247
+ Returns:
248
+ (float): The word information preserved of the given reference and
249
+ hypothesis sentence(s).
250
+ """
251
+ (
252
+ reference,
253
+ hypothesis,
254
+ reference_transform,
255
+ hypothesis_transform,
256
+ ) = _deprecate_truth(
257
+ reference=reference,
258
+ hypothesis=hypothesis,
259
+ truth=truth,
260
+ reference_transform=reference_transform,
261
+ truth_transform=truth_transform,
262
+ hypothesis_transform=hypothesis_transform,
263
+ )
264
+
265
+ output = process_words(
266
+ reference, hypothesis, reference_transform, hypothesis_transform
267
+ )
268
+
269
+ return output.wip
270
+
271
+
272
+ def wil(
273
+ reference: Union[str, List[str]] = None,
274
+ hypothesis: Union[str, List[str]] = None,
275
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
276
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
277
+ truth: Union[str, List[str]] = None,
278
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
279
+ ) -> float:
280
+ """
281
+ Calculate the word information lost (WIL) between one or more reference and
282
+ hypothesis sentences.
283
+
284
+ Args:
285
+ reference: The reference sentence(s)
286
+ hypothesis: The hypothesis sentence(s)
287
+ reference_transform: The transformation(s) to apply to the reference string(s)
288
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
289
+ truth: Deprecated, renamed to `reference`
290
+ truth_transform: Deprecated, renamed to `reference_transform`
291
+
292
+ Deprecated:
293
+ Arguments `truth` and `truth_transform` have been renamed to respectively
294
+ `reference` and `reference_transform`. Therefore, the keyword arguments
295
+ `truth` and `truth_transform` will be removed in the next release.
296
+ At the same time, `reference` and `reference_transform` will lose their
297
+ default value.
298
+
299
+ Returns:
300
+ (float): The word information lost of the given reference and
301
+ hypothesis sentence(s).
302
+ """
303
+ (
304
+ reference,
305
+ hypothesis,
306
+ reference_transform,
307
+ hypothesis_transform,
308
+ ) = _deprecate_truth(
309
+ reference=reference,
310
+ hypothesis=hypothesis,
311
+ truth=truth,
312
+ reference_transform=reference_transform,
313
+ truth_transform=truth_transform,
314
+ hypothesis_transform=hypothesis_transform,
315
+ )
316
+
317
+ output = process_words(
318
+ reference, hypothesis, reference_transform, hypothesis_transform
319
+ )
320
+
321
+ return output.wil
322
+
323
+
324
+ ########################################################################################
325
+ # deprecated method 'compute_measures'
326
+
327
+
328
+ def compute_measures(
329
+ truth: Union[str, List[str]],
330
+ hypothesis: Union[str, List[str]],
331
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
332
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
333
+ ) -> Dict[str, Any]:
334
+ """
335
+ Efficiently computes all measures using only one function call.
336
+
337
+ Deprecated:
338
+ Deprecated method. Superseded by [jiwer.process_words][process.process_words].
339
+ This method will be removed on next release.
340
+
341
+ Args:
342
+ truth: The reference sentence(s)
343
+ hypothesis: The hypothesis sentence(s)
344
+ truth_transform: The transformation(s) to apply to the reference string(s)
345
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
346
+
347
+ Returns:
348
+ (dict): A dictionary containing key-value pairs for all measures.
349
+
350
+ """
351
+ warnings.warn(
352
+ DeprecationWarning(
353
+ "jiwer.compute_measures() is deprecated. Please use jiwer.process_words()."
354
+ )
355
+ )
356
+
357
+ output = process_words(
358
+ reference=truth,
359
+ hypothesis=hypothesis,
360
+ reference_transform=truth_transform,
361
+ hypothesis_transform=hypothesis_transform,
362
+ )
363
+
364
+ return {
365
+ "wer": output.wer,
366
+ "mer": output.mer,
367
+ "wil": output.wil,
368
+ "wip": output.wip,
369
+ "hits": output.hits,
370
+ "substitutions": output.substitutions,
371
+ "deletions": output.deletions,
372
+ "insertions": output.insertions,
373
+ "ops": output.alignments,
374
+ "truth": output.references,
375
+ "hypothesis": output.hypotheses,
376
+ }
377
+
378
+
379
+ ########################################################################################
380
+ # Implementation of character-error-rate, exposed publicly
381
+
382
+
383
+ def cer(
384
+ reference: Union[str, List[str]] = None,
385
+ hypothesis: Union[str, List[str]] = None,
386
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = cer_default,
387
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = cer_default,
388
+ return_dict: bool = False,
389
+ truth: Union[str, List[str]] = None,
390
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
391
+ ) -> Union[float, Dict[str, Any]]:
392
+ """
393
+ Calculate the character error rate (CER) between one or more reference and
394
+ hypothesis sentences.
395
+
396
+ Args:
397
+ reference: The reference sentence(s)
398
+ hypothesis: The hypothesis sentence(s)
399
+ reference_transform: The transformation(s) to apply to the reference string(s)
400
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
401
+ return_dict: Deprecated option to return the more results in a dict instead of
402
+ returning only the cer as a single float value
403
+ truth: Deprecated, renamed to `reference`
404
+ truth_transform: Deprecated, renamed to `reference_transform`
405
+
406
+ Deprecated:
407
+ Argument `return_dict` will be deprecated. Please use
408
+ [jiwer.process_characters][process.process_characters] instead.
409
+
410
+ Arguments `truth` and `truth_transform` have been renamed to respectively
411
+ `reference` and `reference_transform`. Therefore, the keyword arguments
412
+ `truth` and `truth_transform` will be removed in the next release.
413
+ At the same time, `reference` and `reference_transform` will lose their
414
+ default value.
415
+
416
+ Returns:
417
+ (float): The character error rate of the given reference and hypothesis
418
+ sentence(s).
419
+ """
420
+ (
421
+ reference,
422
+ hypothesis,
423
+ reference_transform,
424
+ hypothesis_transform,
425
+ ) = _deprecate_truth(
426
+ reference=reference,
427
+ hypothesis=hypothesis,
428
+ truth=truth,
429
+ reference_transform=reference_transform,
430
+ truth_transform=truth_transform,
431
+ hypothesis_transform=hypothesis_transform,
432
+ )
433
+
434
+ output = process_characters(
435
+ reference, hypothesis, reference_transform, hypothesis_transform
436
+ )
437
+
438
+ if return_dict:
439
+ warnings.warn(
440
+ DeprecationWarning(
441
+ "`return_dict` is deprecated, "
442
+ "please use jiwer.process_characters() instead."
443
+ )
444
+ )
445
+ return {
446
+ "cer": output.cer,
447
+ "hits": output.hits,
448
+ "substitutions": output.substitutions,
449
+ "deletions": output.deletions,
450
+ "insertions": output.insertions,
451
+ }
452
+ else:
453
+ return output.cer
454
+
455
+
456
+ def _deprecate_truth(
457
+ reference: Union[str, List[str]],
458
+ hypothesis: Union[str, List[str]],
459
+ truth: Union[str, List[str]],
460
+ reference_transform: Union[tr.Compose, tr.AbstractTransform],
461
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform],
462
+ truth_transform: Union[tr.Compose, tr.AbstractTransform],
463
+ ):
464
+ if truth is not None:
465
+ warnings.warn(
466
+ DeprecationWarning(
467
+ "keyword argument `truth` is deprecated, please use `reference`."
468
+ )
469
+ )
470
+ if reference is not None:
471
+ raise ValueError("cannot give `reference` and `truth`")
472
+ reference = truth
473
+ if truth_transform is not None:
474
+ warnings.warn(
475
+ DeprecationWarning(
476
+ "keyword argument `truth_transform` is deprecated, "
477
+ "please use `reference_transform`."
478
+ )
479
+ )
480
+ reference_transform = truth_transform
481
+
482
+ if reference is None or hypothesis is None:
483
+ raise ValueError(
484
+ "detected default values for reference or hypothesis arguments, "
485
+ "please provide actual string or list of strings"
486
+ )
487
+
488
+ return reference, hypothesis, reference_transform, hypothesis_transform
my_jiwer/my_jiwer/process.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ The core algorithm(s) for processing a one or more reference and hypothesis sentences
21
+ so that measures can be computed and an alignment can be visualized.
22
+ """
23
+
24
+ from dataclasses import dataclass
25
+
26
+ from typing import Any, List, Union
27
+ from itertools import chain
28
+
29
+ import rapidfuzz
30
+
31
+ from rapidfuzz.distance import Opcodes
32
+
33
+ from jiwer import transforms as tr
34
+ from jiwer.transformations import wer_default, cer_default
35
+
36
+
37
+ __all__ = [
38
+ "AlignmentChunk",
39
+ "WordOutput",
40
+ "CharacterOutput",
41
+ "process_words",
42
+ "process_words_embdiff",
43
+ "process_characters",
44
+ ]
45
+
46
+
47
+ @dataclass
48
+ class AlignmentChunk:
49
+ """
50
+ Define an alignment between two subsequence of the reference and hypothesis.
51
+
52
+ Attributes:
53
+ type: one of `equal`, `substitute`, `insert`, or `delete`
54
+ ref_start_idx: the start index of the reference subsequence
55
+ ref_end_idx: the end index of the reference subsequence
56
+ hyp_start_idx: the start index of the hypothesis subsequence
57
+ hyp_end_idx: the end index of the hypothesis subsequence
58
+ """
59
+
60
+ type: str
61
+
62
+ ref_start_idx: int
63
+ ref_end_idx: int
64
+
65
+ hyp_start_idx: int
66
+ hyp_end_idx: int
67
+
68
+ def __post_init__(self):
69
+ if self.type not in ["replace", "insert", "delete", "equal", "substitute"]:
70
+ raise ValueError("")
71
+
72
+ # rapidfuzz uses replace instead of substitute... For consistency, we change it
73
+ if self.type == "replace":
74
+ self.type = "substitute"
75
+
76
+ if self.ref_start_idx > self.ref_end_idx:
77
+ raise ValueError(
78
+ f"ref_start_idx={self.ref_start_idx} "
79
+ f"is larger "
80
+ f"than ref_end_idx={self.ref_end_idx}"
81
+ )
82
+ if self.hyp_start_idx > self.hyp_end_idx:
83
+ raise ValueError(
84
+ f"hyp_start_idx={self.hyp_start_idx} "
85
+ f"is larger "
86
+ f"than hyp_end_idx={self.hyp_end_idx}"
87
+ )
88
+
89
+
90
+ @dataclass
91
+ class WordOutput:
92
+ """
93
+ The output of calculating the word-level levenshtein distance between one or more
94
+ reference and hypothesis sentence(s).
95
+
96
+ Attributes:
97
+ references: The reference sentences
98
+ hypotheses: The hypothesis sentences
99
+ alignments: The alignment between reference and hypothesis sentences
100
+ wer: The word error rate
101
+ mer: The match error rate
102
+ wil: The word information lost measure
103
+ wip: The word information preserved measure
104
+ hits: The number of correct words between reference and hypothesis sentences
105
+ substitutions: The number of substitutions required to transform hypothesis
106
+ sentences to reference sentences
107
+ insertions: The number of insertions required to transform hypothesis
108
+ sentences to reference sentences
109
+ deletions: The number of deletions required to transform hypothesis
110
+ sentences to reference sentences
111
+
112
+ """
113
+
114
+ # processed input data
115
+ references: List[List[str]]
116
+ hypotheses: List[List[str]]
117
+
118
+ # alignment
119
+ alignments: List[List[AlignmentChunk]]
120
+
121
+ # measures
122
+ wer: float
123
+ mer: float
124
+ wil: float
125
+ wip: float
126
+
127
+ # stats
128
+ hits: int
129
+ substitutions: int
130
+ insertions: int
131
+ deletions: int
132
+
133
+
134
+ def process_words(
135
+ reference: Union[str, List[str]],
136
+ hypothesis: Union[str, List[str]],
137
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
138
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
139
+ ) -> WordOutput:
140
+ """
141
+ Compute the word-level levenshtein distance and alignment between one or more
142
+ reference and hypothesis sentences. Based on the result, multiple measures
143
+ can be computed, such as the word error rate.
144
+
145
+ Args:
146
+ reference: The reference sentence(s)
147
+ hypothesis: The hypothesis sentence(s)
148
+ reference_transform: The transformation(s) to apply to the reference string(s)
149
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
150
+
151
+ Returns:
152
+ (WordOutput): The processed reference and hypothesis sentences
153
+ """
154
+ # validate input type
155
+ if isinstance(reference, str):
156
+ reference = [reference]
157
+ if isinstance(hypothesis, str):
158
+ hypothesis = [hypothesis]
159
+ if any(len(t) == 0 for t in reference):
160
+ raise ValueError("one or more references are empty strings")
161
+
162
+ # pre-process reference and hypothesis by applying transforms
163
+ ref_transformed = _apply_transform(
164
+ reference, reference_transform, is_reference=True
165
+ )
166
+ hyp_transformed = _apply_transform(
167
+ hypothesis, hypothesis_transform, is_reference=False
168
+ )
169
+
170
+ if len(ref_transformed) != len(hyp_transformed):
171
+ raise ValueError(
172
+ "After applying the transforms on the reference and hypothesis sentences, "
173
+ f"their lengths must match. "
174
+ f"Instead got {len(ref_transformed)} reference and "
175
+ f"{len(hyp_transformed)} hypothesis sentences."
176
+ )
177
+
178
+ # Change each word into a unique character in order to compute
179
+ # word-level levenshtein distance
180
+ ref_as_chars, hyp_as_chars = _word2char(ref_transformed, hyp_transformed)
181
+
182
+ # keep track of total hits, substitutions, deletions and insertions
183
+ # across all input sentences
184
+ num_hits, num_substitutions, num_deletions, num_insertions = 0, 0, 0, 0
185
+
186
+ # also keep track of the total number of words in the reference and hypothesis
187
+ num_rf_words, num_hp_words = 0, 0
188
+
189
+ # anf finally, keep track of the alignment between each reference and hypothesis
190
+ alignments = []
191
+
192
+ for reference_sentence, hypothesis_sentence in zip(ref_as_chars, hyp_as_chars):
193
+ # Get the required edit operations to transform reference into hypothesis
194
+ edit_ops = rapidfuzz.distance.Levenshtein.editops(
195
+ reference_sentence, hypothesis_sentence
196
+ )
197
+
198
+ # count the number of edits of each type
199
+ substitutions = sum(1 if op.tag == "replace" else 0 for op in edit_ops)
200
+ deletions = sum(1 if op.tag == "delete" else 0 for op in edit_ops)
201
+ insertions = sum(1 if op.tag == "insert" else 0 for op in edit_ops)
202
+ hits = len(reference_sentence) - (substitutions + deletions)
203
+
204
+ # update state
205
+ num_hits += hits
206
+ num_substitutions += substitutions
207
+ num_deletions += deletions
208
+ num_insertions += insertions
209
+ num_rf_words += len(reference_sentence)
210
+ num_hp_words += len(hypothesis_sentence)
211
+ alignments.append(
212
+ [
213
+ AlignmentChunk(
214
+ type=op.tag,
215
+ ref_start_idx=op.src_start,
216
+ ref_end_idx=op.src_end,
217
+ hyp_start_idx=op.dest_start,
218
+ hyp_end_idx=op.dest_end,
219
+ )
220
+ for op in Opcodes.from_editops(edit_ops)
221
+ ]
222
+ )
223
+
224
+ # Compute all measures
225
+ S, D, I, H = num_substitutions, num_deletions, num_insertions, num_hits
226
+
227
+ wer = float(S + D + I) / float(H + S + D)
228
+ mer = float(S + D + I) / float(H + S + D + I)
229
+ wip = (
230
+ (float(H) / num_rf_words) * (float(H) / num_hp_words)
231
+ if num_hp_words >= 1
232
+ else 0
233
+ )
234
+ wil = 1 - wip
235
+
236
+ # return all output
237
+ return WordOutput(
238
+ references=ref_transformed,
239
+ hypotheses=hyp_transformed,
240
+ alignments=alignments,
241
+ wer=wer,
242
+ mer=mer,
243
+ wil=wil,
244
+ wip=wip,
245
+ hits=num_hits,
246
+ substitutions=num_substitutions,
247
+ insertions=num_insertions,
248
+ deletions=num_deletions,
249
+ )
250
+
251
+ def process_words_embdiff(
252
+ reference: Union[str, List[str]],
253
+ hypothesis: Union[str, List[str]],
254
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
255
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
256
+ ) -> WordOutput:
257
+ """
258
+ Compute the word-level levenshtein distance and alignment between one or more
259
+ reference and hypothesis sentences. Based on the result, multiple measures
260
+ can be computed, such as the word error rate.
261
+
262
+ Args:
263
+ reference: The reference sentence(s)
264
+ hypothesis: The hypothesis sentence(s)
265
+ reference_transform: The transformation(s) to apply to the reference string(s)
266
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
267
+
268
+ Returns:
269
+ (WordOutput): The processed reference and hypothesis sentences
270
+ """
271
+ # validate input type
272
+ if isinstance(reference, str):
273
+ reference = [reference]
274
+ if isinstance(hypothesis, str):
275
+ hypothesis = [hypothesis]
276
+ if any(len(t) == 0 for t in reference):
277
+ raise ValueError("one or more references are empty strings")
278
+
279
+ # pre-process reference and hypothesis by applying transforms
280
+ ref_transformed = _apply_transform(
281
+ reference, reference_transform, is_reference=True
282
+ )
283
+ hyp_transformed = _apply_transform(
284
+ hypothesis, hypothesis_transform, is_reference=False
285
+ )
286
+
287
+ if len(ref_transformed) != len(hyp_transformed):
288
+ raise ValueError(
289
+ "After applying the transforms on the reference and hypothesis sentences, "
290
+ f"their lengths must match. "
291
+ f"Instead got {len(ref_transformed)} reference and "
292
+ f"{len(hyp_transformed)} hypothesis sentences."
293
+ )
294
+
295
+ # Change each word into a unique character in order to compute
296
+ # word-level levenshtein distance
297
+ ref_as_chars, hyp_as_chars = _word2char(ref_transformed, hyp_transformed)
298
+
299
+ # keep track of total hits, substitutions, deletions and insertions
300
+ # across all input sentences
301
+ num_hits, num_substitutions, num_deletions, num_insertions = 0, 0, 0, 0
302
+
303
+ # also keep track of the total number of words in the reference and hypothesis
304
+ num_rf_words, num_hp_words = 0, 0
305
+
306
+ # anf finally, keep track of the alignment between each reference and hypothesis
307
+ alignments = []
308
+
309
+ for reference_sentence, hypothesis_sentence in zip(ref_as_chars, hyp_as_chars):
310
+ # Get the required edit operations to transform reference into hypothesis
311
+ edit_ops = rapidfuzz.distance.Levenshtein.editops(
312
+ reference_sentence, hypothesis_sentence
313
+ )
314
+
315
+ # count the number of edits of each type
316
+ substitutions = sum(1 if op.tag == "replace" else 0 for op in edit_ops)
317
+ deletions = sum(1 if op.tag == "delete" else 0 for op in edit_ops)
318
+ insertions = sum(1 if op.tag == "insert" else 0 for op in edit_ops)
319
+ hits = len(reference_sentence) - (substitutions + deletions)
320
+
321
+ # update state
322
+ num_hits += hits
323
+ num_substitutions += substitutions
324
+ num_deletions += deletions
325
+ num_insertions += insertions
326
+ num_rf_words += len(reference_sentence)
327
+ num_hp_words += len(hypothesis_sentence)
328
+ alignments.append(
329
+ [
330
+ AlignmentChunk(
331
+ type=op.tag,
332
+ ref_start_idx=op.src_start,
333
+ ref_end_idx=op.src_end,
334
+ hyp_start_idx=op.dest_start,
335
+ hyp_end_idx=op.dest_end,
336
+ )
337
+ for op in Opcodes.from_editops(edit_ops)
338
+ ]
339
+ )
340
+
341
+ # Compute all measures
342
+ S, D, I, H = num_substitutions, num_deletions, num_insertions, num_hits
343
+
344
+ wer = float(S + D + I) / float(H + S + D)
345
+ mer = float(S + D + I) / float(H + S + D + I)
346
+ wip = (
347
+ (float(H) / num_rf_words) * (float(H) / num_hp_words)
348
+ if num_hp_words >= 1
349
+ else 0
350
+ )
351
+ wil = 1 - wip
352
+
353
+ # return all output
354
+ return WordOutput(
355
+ references=ref_transformed,
356
+ hypotheses=hyp_transformed,
357
+ alignments=alignments,
358
+ wer=wer,
359
+ mer=mer,
360
+ wil=wil,
361
+ wip=wip,
362
+ hits=num_hits,
363
+ substitutions=num_substitutions,
364
+ insertions=num_insertions,
365
+ deletions=num_deletions,
366
+ ), edit_ops
367
+
368
+
369
+ ########################################################################################
370
+ # Implementation of character error rate
371
+
372
+
373
+ @dataclass
374
+ class CharacterOutput:
375
+ """
376
+ The output of calculating the character-level levenshtein distance between one or
377
+ more reference and hypothesis sentence(s).
378
+
379
+ Attributes:
380
+ references: The reference sentences
381
+ hypotheses: The hypothesis sentences
382
+ alignments: The alignment between reference and hypothesis sentences
383
+ cer: The character error rate
384
+ hits: The number of correct characters between reference and hypothesis
385
+ sentences
386
+ substitutions: The number of substitutions required to transform hypothesis
387
+ sentences to reference sentences
388
+ insertions: The number of insertions required to transform hypothesis
389
+ sentences to reference sentences
390
+ deletions: The number of deletions required to transform hypothesis
391
+ sentences to reference sentences
392
+ """
393
+
394
+ # processed input data
395
+ references: List[List[str]]
396
+ hypotheses: List[List[str]]
397
+
398
+ # alignment
399
+ alignments: List[List[AlignmentChunk]]
400
+
401
+ # measures
402
+ cer: float
403
+
404
+ # stats
405
+ hits: int
406
+ substitutions: int
407
+ insertions: int
408
+ deletions: int
409
+
410
+
411
+ def process_characters(
412
+ reference: Union[str, List[str]],
413
+ hypothesis: Union[str, List[str]],
414
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = cer_default,
415
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = cer_default,
416
+ ) -> CharacterOutput:
417
+ """
418
+ Compute the character-level levenshtein distance and alignment between one or more
419
+ reference and hypothesis sentences. Based on the result, the character error rate
420
+ can be computed.
421
+
422
+ Note that the by default this method includes space (` `) as a
423
+ character over which the error rate is computed. If this is not desired, the
424
+ reference and hypothesis transform need to be modified.
425
+
426
+ Args:
427
+ reference: The reference sentence(s)
428
+ hypothesis: The hypothesis sentence(s)
429
+ reference_transform: The transformation(s) to apply to the reference string(s)
430
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
431
+
432
+ Returns:
433
+ (CharacterOutput): The processed reference and hypothesis sentences.
434
+
435
+ """
436
+ # make sure the transforms end with tr.ReduceToListOfListOfChars(),
437
+
438
+ # it's the same as word processing, just every word is of length 1
439
+ result = process_words(
440
+ reference, hypothesis, reference_transform, hypothesis_transform
441
+ )
442
+
443
+ return CharacterOutput(
444
+ references=result.references,
445
+ hypotheses=result.hypotheses,
446
+ alignments=result.alignments,
447
+ cer=result.wer,
448
+ hits=result.hits,
449
+ substitutions=result.substitutions,
450
+ insertions=result.insertions,
451
+ deletions=result.deletions,
452
+ )
453
+
454
+
455
+ ################################################################################
456
+ # Implementation of helper methods
457
+
458
+
459
+ def _apply_transform(
460
+ sentence: Union[str, List[str]],
461
+ transform: Union[tr.Compose, tr.AbstractTransform],
462
+ is_reference: bool,
463
+ ):
464
+ # Apply transforms. The transforms should collapse input to a
465
+ # list with lists of words
466
+ transformed_sentence = transform(sentence)
467
+
468
+ # Validate the output is a list containing lists of strings
469
+ if is_reference:
470
+ if not _is_list_of_list_of_strings(
471
+ transformed_sentence, require_non_empty_lists=True
472
+ ):
473
+ raise ValueError(
474
+ "After applying the transformation, each reference should be a "
475
+ "non-empty list of strings, with each string being a single word."
476
+ )
477
+ else:
478
+ if not _is_list_of_list_of_strings(
479
+ transformed_sentence, require_non_empty_lists=False
480
+ ):
481
+ raise ValueError(
482
+ "After applying the transformation, each hypothesis should be a "
483
+ "list of strings, with each string being a single word."
484
+ )
485
+
486
+ return transformed_sentence
487
+
488
+
489
+ def _is_list_of_list_of_strings(x: Any, require_non_empty_lists: bool):
490
+ if not isinstance(x, list):
491
+ return False
492
+
493
+ for e in x:
494
+ if not isinstance(e, list):
495
+ return False
496
+
497
+ if require_non_empty_lists and len(e) == 0:
498
+ return False
499
+
500
+ if not all([isinstance(s, str) for s in e]):
501
+ return False
502
+
503
+ return True
504
+
505
+
506
+ def _word2char(reference: List[List[str]], hypothesis: List[List[str]]):
507
+ # tokenize each word into an integer
508
+ vocabulary = set(chain(*reference, *hypothesis))
509
+
510
+ if "" in vocabulary:
511
+ raise ValueError(
512
+ "Empty strings cannot be a word. "
513
+ "Please ensure that the given transform removes empty strings."
514
+ )
515
+
516
+ word2char = dict(zip(vocabulary, range(len(vocabulary))))
517
+
518
+ reference_chars = [
519
+ "".join([chr(word2char[w]) for w in sentence]) for sentence in reference
520
+ ]
521
+ hypothesis_chars = [
522
+ "".join([chr(word2char[w]) for w in sentence]) for sentence in hypothesis
523
+ ]
524
+
525
+ return reference_chars, hypothesis_chars
my_jiwer/my_jiwer/transformations.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ """
19
+ This file is intended to provide the default transformation which need
20
+ to be applied to input text in order to compute the WER (or similar measures).
21
+
22
+ It also implements some alternative transformations which might be
23
+ useful in specific use cases.
24
+ """
25
+
26
+ import jiwer.transforms as tr
27
+
28
+ __all__ = [
29
+ "wer_default",
30
+ "wer_contiguous",
31
+ "wer_standardize",
32
+ "wer_standardize_contiguous",
33
+ "cer_default",
34
+ "cer_contiguous",
35
+ ]
36
+
37
+ ########################################################################################
38
+ # implement transformations for WER (and accompanying measures)
39
+
40
+ wer_default = tr.Compose(
41
+ [
42
+ tr.RemoveMultipleSpaces(),
43
+ tr.Strip(),
44
+ tr.ReduceToListOfListOfWords(),
45
+ ]
46
+ )
47
+ """
48
+ This is the default transformation when using `proces_words`. Each input string will
49
+ have its leading and tailing white space removed.
50
+ Thereafter multiple spaces between words are also removed.
51
+ Then each string is transformed into a list with lists of strings, where each string
52
+ is a single word.
53
+ """
54
+
55
+ wer_contiguous = tr.Compose(
56
+ [
57
+ tr.RemoveMultipleSpaces(),
58
+ tr.Strip(),
59
+ tr.ReduceToSingleSentence(),
60
+ tr.ReduceToListOfListOfWords(),
61
+ ]
62
+ )
63
+ """
64
+ This is can be used instead of `wer_default` when the number of reference and hypothesis
65
+ sentences differ.
66
+ """
67
+
68
+ wer_standardize = tr.Compose(
69
+ [
70
+ tr.ToLowerCase(),
71
+ tr.ExpandCommonEnglishContractions(),
72
+ tr.RemoveKaldiNonWords(),
73
+ tr.RemoveWhiteSpace(replace_by_space=True),
74
+ tr.RemoveMultipleSpaces(),
75
+ tr.Strip(),
76
+ tr.ReduceToListOfListOfWords(),
77
+ ]
78
+ )
79
+ """
80
+ This transform attempts to standardize the strings by setting all characters to lower
81
+ case, expanding common contractions, and removing non-words. Then the default operations
82
+ are applied.
83
+ """
84
+
85
+ wer_standardize_contiguous = tr.Compose(
86
+ [
87
+ tr.ToLowerCase(),
88
+ tr.ExpandCommonEnglishContractions(),
89
+ tr.RemoveKaldiNonWords(),
90
+ tr.RemoveWhiteSpace(replace_by_space=True),
91
+ tr.RemoveMultipleSpaces(),
92
+ tr.Strip(),
93
+ tr.ReduceToSingleSentence(),
94
+ tr.ReduceToListOfListOfWords(),
95
+ ]
96
+ )
97
+ """
98
+ This is the same as `wer_standize`, but this version can be usd when the number of
99
+ reference and hypothesis sentences differ.
100
+ """
101
+
102
+ ########################################################################################
103
+ # implement transformations for CER
104
+
105
+
106
+ cer_default = tr.Compose(
107
+ [
108
+ tr.Strip(),
109
+ tr.ReduceToListOfListOfChars(),
110
+ ]
111
+ )
112
+ """
113
+ This is the default transformation when using `process_characters`. Each input string
114
+ will have its leading and tailing white space removed. Then each string is
115
+ transformed into a list with lists of strings, where each string is a single character.
116
+ """
117
+
118
+ cer_contiguous = tr.Compose(
119
+ [
120
+ tr.Strip(),
121
+ tr.ReduceToSingleSentence(),
122
+ tr.ReduceToListOfListOfChars(),
123
+ ]
124
+ )
125
+ """
126
+ This can used instead of `cer_default` when the number of reference and hypothesis
127
+ sentences differ.
128
+ """
my_jiwer/my_jiwer/transforms.py ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ This file implements the building blocks for transforming a collection
21
+ of input strings to the desired format in order to calculate the WER of CER.
22
+
23
+ In principle, for word error rate calculations, every string of a sentence needs to be
24
+ collapsed into a list of strings, where each string is a *single* word.
25
+ This is done with [transforms.ReduceToListOfListOfWords][].
26
+ A composition of multiple transformations must therefore *always* end with
27
+ [transforms.ReduceToListOfListOfWords][].
28
+
29
+ For the character error rate, every string of a sentence also needs to be collapsed into
30
+ a list of strings, but here each string is a *single* character.
31
+ This is done with [transforms.ReduceToListOfListOfChars][]. Similarly, a
32
+ composition of multiple transformations must therefore also always end with
33
+ [transforms.ReduceToListOfListOfChars][].
34
+ """
35
+
36
+ import sys
37
+ import functools
38
+ import re
39
+ import string
40
+ import unicodedata
41
+
42
+ from typing import Union, List, Mapping
43
+
44
+
45
+ __all__ = [
46
+ "AbstractTransform",
47
+ "Compose",
48
+ "ExpandCommonEnglishContractions",
49
+ "RemoveEmptyStrings",
50
+ "ReduceToListOfListOfWords",
51
+ "ReduceToListOfListOfChars",
52
+ "ReduceToSingleSentence",
53
+ "RemoveKaldiNonWords",
54
+ "RemoveMultipleSpaces",
55
+ "RemovePunctuation",
56
+ "RemoveSpecificWords",
57
+ "RemoveWhiteSpace",
58
+ "Strip",
59
+ "SubstituteRegexes",
60
+ "SubstituteWords",
61
+ "ToLowerCase",
62
+ "ToUpperCase",
63
+ ]
64
+
65
+
66
+ class AbstractTransform(object):
67
+ """
68
+ The base class of a Transform.
69
+ """
70
+
71
+ def __call__(self, sentences: Union[str, List[str]]):
72
+ """
73
+ Transforms one or more strings.
74
+
75
+ Args:
76
+ sentences: The strings to transform.
77
+
78
+ Returns:
79
+ (Union[str, List[str]]): The transformed strings.
80
+
81
+ """
82
+ if isinstance(sentences, str):
83
+ return self.process_string(sentences)
84
+ elif isinstance(sentences, list):
85
+ return self.process_list(sentences)
86
+ else:
87
+ raise ValueError(
88
+ "input {} was expected to be a string or list of strings".format(
89
+ sentences
90
+ )
91
+ )
92
+
93
+ def process_string(self, s: str):
94
+ raise NotImplementedError()
95
+
96
+ def process_list(self, inp: List[str]):
97
+ return [self.process_string(s) for s in inp]
98
+
99
+
100
+ class Compose(object):
101
+ """
102
+ Chain multiple transformations back-to-back to create a pipeline combining multiple
103
+ transformations.
104
+
105
+ Note that each transformation needs to end with either `ReduceToListOfListOfWords`
106
+ or `ReduceToListOfListOfChars`, depending on whether word error rate,
107
+ or character error rate is desired.
108
+
109
+ Example:
110
+ ```python3
111
+ import jiwer
112
+
113
+ jiwer.Compose([
114
+ jiwer.RemoveMultipleSpaces(),
115
+ jiwer.ReduceToListOfListOfWords()
116
+ ])
117
+ ```
118
+ """
119
+
120
+ def __init__(self, transforms: List[AbstractTransform]):
121
+ """
122
+
123
+ Args:
124
+ transforms: The list of transformations to chain.
125
+ """
126
+ self.transforms = transforms
127
+
128
+ def __call__(self, text):
129
+ for tr in self.transforms:
130
+ text = tr(text)
131
+
132
+ return text
133
+
134
+
135
+ class BaseRemoveTransform(AbstractTransform):
136
+ def __init__(self, tokens_to_remove: List[str], replace_token=""):
137
+ self.tokens_to_remove = tokens_to_remove
138
+ self.replace_token = replace_token
139
+
140
+ def process_string(self, s: str):
141
+ for w in self.tokens_to_remove:
142
+ s = s.replace(w, self.replace_token)
143
+
144
+ return s
145
+
146
+ def process_list(self, inp: List[str]):
147
+ return [self.process_string(s) for s in inp]
148
+
149
+
150
+ class ReduceToListOfListOfWords(AbstractTransform):
151
+ """
152
+ Transforms a single input sentence, or a list of input sentences, into
153
+ a list with lists of words, which is the expected format for calculating the
154
+ edit operations between two input sentences on a word-level.
155
+
156
+ A sentence is assumed to be a string, where words are delimited by a token
157
+ (such as ` `, space). Each string is expected to contain only a single sentence.
158
+ Empty strings (no output) are removed for the list.
159
+
160
+ Example:
161
+ ```python
162
+ import jiwer
163
+
164
+ sentences = ["hi", "this is an example"]
165
+
166
+ print(jiwer.ReduceToListOfListOfWords()(sentences))
167
+ # prints: [['hi'], ['this', 'is', 'an, 'example']]
168
+ ```
169
+ """
170
+
171
+ def __init__(self, word_delimiter: str = " "):
172
+ """
173
+ Args:
174
+ word_delimiter: the character which delimits words. Default is ` ` (space).
175
+ """
176
+ self.word_delimiter = word_delimiter
177
+
178
+ def process_string(self, s: str):
179
+ return [[w for w in s.split(self.word_delimiter) if len(w) >= 1]]
180
+
181
+ def process_list(self, inp: List[str]):
182
+ sentence_collection = []
183
+
184
+ for sentence in inp:
185
+ list_of_words = self.process_string(sentence)[0]
186
+
187
+ sentence_collection.append(list_of_words)
188
+
189
+ if len(sentence_collection) == 0:
190
+ return [[]]
191
+
192
+ return sentence_collection
193
+
194
+
195
+ class ReduceToListOfListOfChars(AbstractTransform):
196
+ """
197
+ Transforms a single input sentence, or a list of input sentences, into
198
+ a list with lists of characters, which is the expected format for calculating the
199
+ edit operations between two input sentences on a character-level.
200
+
201
+ A sentence is assumed to be a string. Each string is expected to contain only a
202
+ single sentence.
203
+
204
+ Example:
205
+ ```python
206
+ import jiwer
207
+
208
+ sentences = ["hi", "this is an example"]
209
+
210
+ print(jiwer.ReduceToListOfListOfChars()(sentences))
211
+ # prints: [['h', 'i'], ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', 'n', ' ', 'e', 'x', 'a', 'm', 'p', 'l', 'e']]
212
+ ```
213
+ """
214
+
215
+ def process_string(self, s: str):
216
+ return [[w for w in s]]
217
+
218
+ def process_list(self, inp: List[str]):
219
+ sentence_collection = []
220
+
221
+ for sentence in inp:
222
+ list_of_words = self.process_string(sentence)[0]
223
+
224
+ sentence_collection.append(list_of_words)
225
+
226
+ if len(sentence_collection) == 0:
227
+ return [[]]
228
+
229
+ return sentence_collection
230
+
231
+
232
+ class ReduceToSingleSentence(AbstractTransform):
233
+ """
234
+ Transforms multiple sentences into a single sentence.
235
+ This operation can be useful when the number of reference and hypothesis sentences
236
+ differ, and you want to do a minimal alignment over these lists.
237
+ Note that this creates an invariance: `wer([a, b], [a, b])` might not be equal to
238
+ `wer([b, a], [b, a])`.
239
+
240
+ Example:
241
+ ```python3
242
+ import jiwer
243
+
244
+ sentences = ["hi", "this is an example"]
245
+
246
+ print(jiwer.ReduceToSingleSentence()(sentences))
247
+ # prints: ['hi this is an example']
248
+ ```
249
+ """
250
+
251
+ def __init__(self, word_delimiter: str = " "):
252
+ """
253
+ :param word_delimiter: the character which delimits words. Default is ` ` (space).
254
+ """
255
+ self.word_delimiter = word_delimiter
256
+
257
+ def process_string(self, s: str):
258
+ return s
259
+
260
+ def process_list(self, inp: List[str]):
261
+ filtered_inp = [i for i in inp if len(i) >= 1]
262
+
263
+ if len(filtered_inp) == 0:
264
+ return []
265
+ else:
266
+ return ["{}".format(self.word_delimiter).join(filtered_inp)]
267
+
268
+
269
+ class SubstituteRegexes(AbstractTransform):
270
+ """
271
+ Transform strings by substituting substrings matching regex expressions into
272
+ another substring.
273
+
274
+ Example:
275
+ ```python
276
+ import jiwer
277
+
278
+ sentences = ["is the world doomed or loved?", "edibles are allegedly cultivated"]
279
+
280
+ # note: the regex string "\b(\w+)ed\b", matches every word ending in 'ed',
281
+ # and "\1" stands for the first group ('\w+). It therefore removes 'ed' in every match.
282
+ print(jiwer.SubstituteRegexes({r"doom": r"sacr", r"\b(\w+)ed\b": r"\1"})(sentences))
283
+
284
+ # prints: ["is the world sacr or lov?", "edibles are allegedly cultivat"]
285
+ ```
286
+ """
287
+
288
+ def __init__(self, substitutions: Mapping[str, str]):
289
+ """
290
+
291
+ Args:
292
+ substitutions: a mapping of regex expressions to replacement strings.
293
+ """
294
+ self.substitutions = substitutions
295
+
296
+ def process_string(self, s: str):
297
+ for key, value in self.substitutions.items():
298
+ s = re.sub(key, value, s)
299
+
300
+ return s
301
+
302
+
303
+ class SubstituteWords(AbstractTransform):
304
+ """
305
+ This transform can be used to replace a word into another word.
306
+ Note that the whole word is matched. If the word you're attempting to substitute
307
+ is a substring of another word it will not be affected.
308
+ For example, if you're substituting `foo` into `bar`, the word `foobar` will NOT
309
+ be substituted into `barbar`.
310
+
311
+ Example:
312
+ ```python
313
+ import jiwer
314
+
315
+ sentences = ["you're pretty", "your book", "foobar"]
316
+
317
+ print(jiwer.SubstituteWords({"pretty": "awesome", "you": "i", "'re": " am", 'foo': 'bar'})(sentences))
318
+
319
+ # prints: ["i am awesome", "your book", "foobar"]
320
+ ```
321
+
322
+ """
323
+
324
+ def __init__(self, substitutions: Mapping[str, str]):
325
+ """
326
+ Args:
327
+ substitutions: A mapping of words to replacement words.
328
+ """
329
+ self.substitutions = substitutions
330
+
331
+ def process_string(self, s: str):
332
+ for key, value in self.substitutions.items():
333
+ s = re.sub(r"\b{}\b".format(re.escape(key)), value, s)
334
+
335
+ return s
336
+
337
+
338
+ class RemoveSpecificWords(SubstituteWords):
339
+ """
340
+ Can be used to filter out certain words.
341
+ As words are replaced with a ` ` character, make sure to that
342
+ `RemoveMultipleSpaces`, `Strip()` and `RemoveEmptyStrings` are present
343
+ in the composition _after_ `RemoveSpecificWords`.
344
+
345
+ Example:
346
+ ```python
347
+ import jiwer
348
+
349
+ sentences = ["yhe awesome", "the apple is not a pear", "yhe"]
350
+
351
+ print(jiwer.RemoveSpecificWords(["yhe", "the", "a"])(sentences))
352
+ # prints: [' awesome', ' apple is not pear', ' ']
353
+ # note the extra spaces
354
+ ```
355
+ """
356
+
357
+ def __init__(self, words_to_remove: List[str]):
358
+ """
359
+ Args:
360
+ words_to_remove: List of words to remove.
361
+ """
362
+ mapping = {word: " " for word in words_to_remove}
363
+
364
+ super().__init__(mapping)
365
+
366
+
367
+ class RemoveWhiteSpace(BaseRemoveTransform):
368
+ """
369
+ This transform filters out white space characters.
370
+ Note that by default space (` `) is also removed, which will make it impossible to
371
+ split a sentence into a list of words by using `ReduceToListOfListOfWords` or
372
+ `ReduceToSingleSentence`.
373
+ This can be prevented by replacing all whitespace with the space character.
374
+ If so, make sure that `jiwer.RemoveMultipleSpaces`,
375
+ `Strip()` and `RemoveEmptyStrings` are present in the composition _after_
376
+ `RemoveWhiteSpace`.
377
+
378
+ Example:
379
+ ```python
380
+ import jiwer
381
+
382
+ sentences = ["this is an example", "hello world\t"]
383
+
384
+ print(jiwer.RemoveWhiteSpace()(sentences))
385
+ # prints: ["thisisanexample", "helloworld"]
386
+
387
+ print(jiwer.RemoveWhiteSpace(replace_by_space=True)(sentences))
388
+ # prints: ["this is an example", "hello world "]
389
+ # note the trailing spaces
390
+ ```
391
+ """
392
+
393
+ def __init__(self, replace_by_space: bool = False):
394
+ """
395
+
396
+ Args:
397
+ replace_by_space: every white space character is replaced with a space (` `)
398
+ """
399
+ characters = [c for c in string.whitespace]
400
+
401
+ if replace_by_space:
402
+ replace_token = " "
403
+ else:
404
+ replace_token = ""
405
+
406
+ super().__init__(characters, replace_token=replace_token)
407
+
408
+
409
+ @functools.lru_cache(1)
410
+ def _get_punctuation_characters():
411
+ """Compute the punctuation characters only once and memoize."""
412
+ codepoints = range(sys.maxunicode + 1)
413
+ punctuation = set(
414
+ chr(i) for i in codepoints if unicodedata.category(chr(i)).startswith("P")
415
+ )
416
+ return punctuation
417
+
418
+
419
+ class RemovePunctuation(BaseRemoveTransform):
420
+ """
421
+ This transform filters out punctuation. The punctuation characters are defined as
422
+ all unicode characters whose category name starts with `P`.
423
+ See [here](https://www.unicode.org/reports/tr44/#General_Category_Values) for more
424
+ information.
425
+ Example:
426
+ ```python
427
+ import jiwer
428
+
429
+ sentences = ["this is an example!", "hello. goodbye"]
430
+
431
+ print(jiwer.RemovePunctuation()(sentences))
432
+ # prints: ['this is an example', "hello goodbye"]
433
+ ```
434
+ """
435
+
436
+ def __init__(self):
437
+ punctuation_characters = _get_punctuation_characters()
438
+ super().__init__(punctuation_characters)
439
+
440
+
441
+ class RemoveMultipleSpaces(AbstractTransform):
442
+ """
443
+ Filter out multiple spaces between words.
444
+
445
+ Example:
446
+ ```python
447
+ import jiwer
448
+
449
+ sentences = ["this is an example ", " hello goodbye ", " "]
450
+
451
+ print(jiwer.RemoveMultipleSpaces()(sentences))
452
+ # prints: ['this is an example ', " hello goodbye ", " "]
453
+ # note that there are still trailing spaces
454
+ ```
455
+
456
+ """
457
+
458
+ def process_string(self, s: str):
459
+ return re.sub(r"\s\s+", " ", s)
460
+
461
+ def process_list(self, inp: List[str]):
462
+ return [self.process_string(s) for s in inp]
463
+
464
+
465
+ class Strip(AbstractTransform):
466
+ """
467
+ Removes all leading and trailing spaces.
468
+
469
+ Example:
470
+ ```python
471
+ import jiwer
472
+
473
+ sentences = [" this is an example ", " hello goodbye ", " "]
474
+
475
+ print(jiwer.Strip()(sentences))
476
+ # prints: ['this is an example', "hello goodbye", ""]
477
+ # note that there is an empty string left behind which might need to be cleaned up
478
+ ```
479
+ """
480
+
481
+ def process_string(self, s: str):
482
+ return s.strip()
483
+
484
+
485
+ class RemoveEmptyStrings(AbstractTransform):
486
+ """
487
+ Remove empty strings from a list of strings.
488
+
489
+ Example:
490
+ ```python
491
+ import jiwer
492
+
493
+ sentences = ["", "this is an example", " ", " "]
494
+
495
+ print(jiwer.RemoveEmptyStrings()(sentences))
496
+ # prints: ['this is an example']
497
+ ```
498
+ """
499
+
500
+ def process_string(self, s: str):
501
+ return s.strip()
502
+
503
+ def process_list(self, inp: List[str]):
504
+ return [s for s in inp if self.process_string(s) != ""]
505
+
506
+
507
+ class ExpandCommonEnglishContractions(AbstractTransform):
508
+ """
509
+ Replace common contractions such as `let's` to `let us`.
510
+
511
+ Currently, this method will perform the following replacements. Note that `␣` is
512
+ used to indicate a space (` `) to get around markdown rendering constrains.
513
+
514
+ | Contraction | transformed into |
515
+ | ------------- |:----------------:|
516
+ | `won't` | `␣will not` |
517
+ | `can't` | `␣can not` |
518
+ | `let's` | `␣let us` |
519
+ | `n't` | `␣not` |
520
+ | `'re` | `␣are` |
521
+ | `'s` | `␣is` |
522
+ | `'d` | `␣would` |
523
+ | `'ll` | `␣will` |
524
+ | `'t` | `␣not` |
525
+ | `'ve` | `␣have` |
526
+ | `'m` | `␣am` |
527
+
528
+ Example:
529
+ ```python
530
+ import jiwer
531
+
532
+ sentences = ["she'll make sure you can't make it", "let's party!"]
533
+
534
+ print(jiwer.ExpandCommonEnglishContractions()(sentences))
535
+ # prints: ["she will make sure you can not make it", "let us party!"]
536
+ ```
537
+
538
+ """
539
+
540
+ def process_string(self, s: str):
541
+ # definitely a non exhaustive list
542
+
543
+ # specific words
544
+ s = re.sub(r"won't", "will not", s)
545
+ s = re.sub(r"can\'t", "can not", s)
546
+ s = re.sub(r"let\'s", "let us", s)
547
+
548
+ # general attachments
549
+ s = re.sub(r"n\'t", " not", s)
550
+ s = re.sub(r"\'re", " are", s)
551
+ s = re.sub(r"\'s", " is", s)
552
+ s = re.sub(r"\'d", " would", s)
553
+ s = re.sub(r"\'ll", " will", s)
554
+ s = re.sub(r"\'t", " not", s)
555
+ s = re.sub(r"\'ve", " have", s)
556
+ s = re.sub(r"\'m", " am", s)
557
+
558
+ return s
559
+
560
+
561
+ class ToLowerCase(AbstractTransform):
562
+ """
563
+ Convert every character into lowercase.
564
+ Example:
565
+ ```python
566
+ import jiwer
567
+
568
+ sentences = ["You're PRETTY"]
569
+
570
+ print(jiwer.ToLowerCase()(sentences))
571
+
572
+ # prints: ["you're pretty"]
573
+ ```
574
+ """
575
+
576
+ def process_string(self, s: str):
577
+ return s.lower()
578
+
579
+
580
+ class ToUpperCase(AbstractTransform):
581
+ """
582
+ Convert every character to uppercase.
583
+
584
+ Example:
585
+ ```python
586
+ import jiwer
587
+
588
+ sentences = ["You're amazing"]
589
+
590
+ print(jiwer.ToUpperCase()(sentences))
591
+
592
+ # prints: ["YOU'RE AMAZING"]
593
+ ```
594
+ """
595
+
596
+ def process_string(self, s: str):
597
+ return s.upper()
598
+
599
+
600
+ class RemoveKaldiNonWords(AbstractTransform):
601
+ """
602
+ Remove any word between `[]` and `<>`. This can be useful when working
603
+ with hypotheses from the Kaldi project, which can output non-words such as
604
+ `[laugh]` and `<unk>`.
605
+
606
+ Example:
607
+ ```python
608
+ import jiwer
609
+
610
+ sentences = ["you <unk> like [laugh]"]
611
+
612
+ print(jiwer.RemoveKaldiNonWords()(sentences))
613
+
614
+ # prints: ["you like "]
615
+ # note the extra spaces
616
+ ```
617
+ """
618
+
619
+ def process_string(self, s: str):
620
+ return re.sub(r"[<\[][^>\]]*[>\]]", "", s)
my_jiwer/pyproject.toml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "jiwer"
3
+ version = "3.0.2"
4
+ description = "Evaluate your speech-to-text system with similarity measures such as word error rate (WER)"
5
+ authors = ["Nik Vaessen <nikvaes@gmail.com>"]
6
+ readme = "README.md"
7
+ license = "Apache-2.0"
8
+ repository = "https://github.com/jitsi/jiwer"
9
+ include = ["LICENCE"]
10
+
11
+ [tool.poetry.dependencies]
12
+ python = "^3.7"
13
+ rapidfuzz = "2.13.7"
14
+ click = "^8.1.3"
15
+
16
+ [tool.poetry.group.dev.dependencies]
17
+ black = "^22.8.0"
18
+ pytest = "7.1.3"
19
+ pytest-benchmark = "^3.4.1"
20
+ flake8 = "5.0.4"
21
+
22
+ [tool.poetry.scripts]
23
+ jiwer = "jiwer.cli:cli"
24
+
25
+ [build-system]
26
+ requires = ["poetry-core>=1.0.0"]
27
+ build-backend = "poetry.core.masonry.api"
my_jiwer/tests/__init__.py ADDED
File without changes
my_jiwer/tests/test_alignment.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ import jiwer
3
+
4
+
5
+ class TestAlignmentVisualizationWords(unittest.TestCase):
6
+ def test_insertion(self):
7
+ correct_alignment = (
8
+ "sentence 1\n"
9
+ "REF: this is a ****\n"
10
+ "HYP: this is a test\n"
11
+ " I\n"
12
+ )
13
+ alignment = jiwer.visualize_alignment(
14
+ jiwer.process_words("this is a", "this is a test"), show_measures=False
15
+ )
16
+ self.assertEqual(alignment, correct_alignment)
17
+
18
+ pass
19
+
20
+ def test_deletion(self):
21
+ correct_alignment = (
22
+ "sentence 1\n"
23
+ "REF: this is a test\n"
24
+ "HYP: this is a ****\n"
25
+ " D\n"
26
+ )
27
+ alignment = jiwer.visualize_alignment(
28
+ jiwer.process_words("this is a test", "this is a"), show_measures=False
29
+ )
30
+ self.assertEqual(alignment, correct_alignment)
31
+
32
+ def test_substitution(self):
33
+ correct_alignment = (
34
+ "sentence 1\n"
35
+ "REF: this is a test\n"
36
+ "HYP: this was a test\n"
37
+ " S \n"
38
+ )
39
+ alignment = jiwer.visualize_alignment(
40
+ jiwer.process_words("this is a test", "this was a test"),
41
+ show_measures=False,
42
+ )
43
+ self.assertEqual(alignment, correct_alignment)
44
+
45
+ def test_all_three(self):
46
+ correct_alignment = (
47
+ "sentence 1\n"
48
+ "REF: this is a ***** test of skill\n"
49
+ "HYP: this was a messy test ** *****\n"
50
+ " S I D D\n"
51
+ )
52
+ alignment = jiwer.visualize_alignment(
53
+ jiwer.process_words("this is a test of skill", "this was a messy test"),
54
+ show_measures=False,
55
+ )
56
+ self.assertEqual(alignment, correct_alignment)
57
+
58
+ def test_show_measures(self):
59
+ correct_alignment = (
60
+ "sentence 1\n"
61
+ "REF: this test will have a high word error rate\n"
62
+ "HYP: no it will not * **** **** ***** ****\n"
63
+ " S S S D D D D D\n"
64
+ "\n"
65
+ "number of sentences: 1\n"
66
+ "substitutions=3 deletions=5 insertions=0 hits=1\n"
67
+ "\n"
68
+ "mer=88.89%\n"
69
+ "wil=97.22%\n"
70
+ "wip=2.78%\n"
71
+ "wer=88.89%\n"
72
+ )
73
+ alignment = jiwer.visualize_alignment(
74
+ jiwer.process_words(
75
+ "this test will have a high word error rate", "no it will not"
76
+ ),
77
+ show_measures=True,
78
+ )
79
+ self.assertEqual(alignment, correct_alignment)
80
+
81
+ def test_empty_hypothesis(self):
82
+ correct_alignment = "sentence 1\n" "REF: empty\n" "HYP: *****\n" " D\n"
83
+ alignment = jiwer.visualize_alignment(
84
+ jiwer.process_words("empty", ""), show_measures=False
85
+ )
86
+ self.assertEqual(alignment, correct_alignment)
87
+
88
+ def test_multiple_sentences(self):
89
+ correct_alignment = (
90
+ "sentence 1\n"
91
+ "REF: one\n"
92
+ "HYP: 1\n"
93
+ " S\n"
94
+ "\n"
95
+ "sentence 2\n"
96
+ "REF: two\n"
97
+ "HYP: 2\n"
98
+ " S\n"
99
+ )
100
+ alignment = jiwer.visualize_alignment(
101
+ jiwer.process_words(["one", "two"], ["1", "2"]),
102
+ show_measures=False,
103
+ )
104
+ self.assertEqual(alignment, correct_alignment)
105
+
106
+ def test_skip_correct(self):
107
+ correct_alignment = (
108
+ "sentence 2\n"
109
+ "REF: one\n"
110
+ "HYP: 1\n"
111
+ " S\n"
112
+ "\n"
113
+ "sentence 3\n"
114
+ "REF: two\n"
115
+ "HYP: 2\n"
116
+ " S\n"
117
+ )
118
+ alignment = jiwer.visualize_alignment(
119
+ jiwer.process_words(
120
+ ["perfect", "one", "two", "three"], ["perfect", "1", "2", "three"]
121
+ ),
122
+ show_measures=False,
123
+ )
124
+ self.assertEqual(alignment, correct_alignment)
125
+
126
+
127
+ class TestAlignmentVisualizationCharacters(unittest.TestCase):
128
+ def test_insertion(self):
129
+ correct_alignment = (
130
+ "sentence 1\n"
131
+ "REF: this is a*****\n"
132
+ "HYP: this is a test\n"
133
+ " IIIII\n"
134
+ )
135
+ alignment = jiwer.visualize_alignment(
136
+ jiwer.process_characters("this is a", "this is a test"), show_measures=False
137
+ )
138
+ self.assertEqual(alignment, correct_alignment)
139
+
140
+ pass
141
+
142
+ def test_deletion(self):
143
+ correct_alignment = (
144
+ "sentence 1\n"
145
+ "REF: this is a test\n"
146
+ "HYP: this is a*****\n"
147
+ " DDDDD\n"
148
+ )
149
+ alignment = jiwer.visualize_alignment(
150
+ jiwer.process_characters("this is a test", "this is a"), show_measures=False
151
+ )
152
+ self.assertEqual(alignment, correct_alignment)
153
+
154
+ def test_substitution(self):
155
+ correct_alignment = (
156
+ "sentence 1\n"
157
+ "REF: this is a test\n"
158
+ "HYP: this iz a test\n"
159
+ " S \n"
160
+ )
161
+ alignment = jiwer.visualize_alignment(
162
+ jiwer.process_characters("this is a test", "this iz a test"),
163
+ show_measures=False,
164
+ )
165
+ self.assertEqual(alignment, correct_alignment)
166
+
167
+ def test_all_three(self):
168
+ correct_alignment = (
169
+ "sentence 1\n"
170
+ "REF: this *is a tes*t of skill\n"
171
+ "HYP: this was a messy te*st***\n"
172
+ " IS S IS SSD SDDD\n"
173
+ )
174
+ alignment = jiwer.visualize_alignment(
175
+ jiwer.process_characters(
176
+ "this is a test of skill", "this was a messy test"
177
+ ),
178
+ show_measures=False,
179
+ )
180
+ self.assertEqual(alignment, correct_alignment)
181
+
182
+ def test_show_measures(self):
183
+ correct_alignment = (
184
+ "sentence 1\n"
185
+ "REF: this test will have a high word error rate\n"
186
+ "HYP: no** i**t will n*************o***********t*\n"
187
+ " SSDD SDD SDDDDDDDDDDDDD DDDDDDDDDDD D\n"
188
+ "\n"
189
+ "number of sentences: 1\n"
190
+ "substitutions=4 deletions=29 insertions=0 hits=10\n"
191
+ "\n"
192
+ "cer=76.74%\n"
193
+ )
194
+ alignment = jiwer.visualize_alignment(
195
+ jiwer.process_characters(
196
+ "this test will have a high word error rate", "no it will not"
197
+ ),
198
+ show_measures=True,
199
+ )
200
+ self.assertEqual(alignment, correct_alignment)
201
+
202
+ def test_empty_hypothesis(self):
203
+ correct_alignment = "sentence 1\n" "REF: empty\n" "HYP: *****\n" " DDDDD\n"
204
+ alignment = jiwer.visualize_alignment(
205
+ jiwer.process_characters("empty", ""), show_measures=False
206
+ )
207
+ self.assertEqual(alignment, correct_alignment)
208
+
209
+ def test_multiple_sentences(self):
210
+ correct_alignment = (
211
+ "sentence 1\n"
212
+ "REF: one\n"
213
+ "HYP: 1**\n"
214
+ " SDD\n"
215
+ "\n"
216
+ "sentence 2\n"
217
+ "REF: two\n"
218
+ "HYP: 2**\n"
219
+ " SDD\n"
220
+ )
221
+ alignment = jiwer.visualize_alignment(
222
+ jiwer.process_characters(["one", "two"], ["1", "2"]),
223
+ show_measures=False,
224
+ )
225
+ self.assertEqual(alignment, correct_alignment)
my_jiwer/tests/test_cer.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ import pytest
3
+
4
+ import jiwer
5
+
6
+ from .test_measures import assert_dict_almost_equal
7
+
8
+
9
+ class TestCERInputMethods(unittest.TestCase):
10
+ def test_input_ref_string_hyp_string(self):
11
+ cases = [
12
+ ("This is a test", "This is a test", 0 / 14),
13
+ ("This is a test", "", 14 / 14),
14
+ ("This is a test", "This test", 5 / 14),
15
+ ]
16
+
17
+ self._apply_test_on(cases)
18
+
19
+ def test_input_ref_string_hyp_list(self):
20
+ cases = [
21
+ ("This is a test", ["This is a test"], 0 / 14),
22
+ ("This is a test", [""], 14 / 14),
23
+ ("This is a test", ["This test"], 5 / 14),
24
+ ]
25
+
26
+ self._apply_test_on(cases)
27
+
28
+ def test_input_ref_list_hyp_string(self):
29
+ cases = [
30
+ (["This is a test"], "This is a test", 0 / 14),
31
+ (["This is a test"], "", 14 / 14),
32
+ (["This is a test"], "This test", 5 / 14),
33
+ ]
34
+
35
+ self._apply_test_on(cases)
36
+
37
+ def test_input_ref_list_hyp_list(self):
38
+ cases = [
39
+ (["This is a test"], ["This is a test"], 0 / 14),
40
+ (["This is a test"], [""], 14 / 14),
41
+ (["This is a test"], ["This test"], 5 / 14),
42
+ ]
43
+
44
+ self._apply_test_on(cases)
45
+
46
+ def test_fail_on_different_sentence_length(self):
47
+ def callback():
48
+ jiwer.cer(["hello", "this", "sentence", "is fractured"], ["this sentence"])
49
+
50
+ self.assertRaises(ValueError, callback)
51
+
52
+ def test_fail_on_empty_reference(self):
53
+ def callback():
54
+ jiwer.cer("", "test")
55
+
56
+ self.assertRaises(ValueError, callback)
57
+
58
+ def test_known_values(self):
59
+ # Taken from the "From WER and RIL to MER and WIL" paper, for link see README.md
60
+ cases = [
61
+ (
62
+ "X",
63
+ "X",
64
+ 0,
65
+ ),
66
+ (
67
+ "X",
68
+ "X X Y Y",
69
+ 6,
70
+ ),
71
+ (
72
+ "X Y X",
73
+ "X Z",
74
+ 3 / 5,
75
+ ),
76
+ (
77
+ "X",
78
+ "Y",
79
+ 1,
80
+ ),
81
+ (
82
+ "X",
83
+ "Y Z",
84
+ 3,
85
+ ),
86
+ ]
87
+
88
+ self._apply_test_on(cases)
89
+
90
+ def test_permutations_invariance(self):
91
+ cases = [
92
+ (
93
+ ["i", "am i good"],
94
+ ["i am", "i good"],
95
+ 0.6,
96
+ ),
97
+ (
98
+ ["am i good", "i"],
99
+ [
100
+ "i good",
101
+ "i am",
102
+ ],
103
+ 0.6,
104
+ ),
105
+ ]
106
+
107
+ self._apply_test_on(cases)
108
+
109
+ def test_return_dict(self):
110
+ # TODO: remove unit test once deprecated
111
+ with pytest.deprecated_call():
112
+ return_dict = jiwer.cer(
113
+ ["i", "am i good"], ["i am", "y good"], return_dict=True
114
+ )
115
+
116
+ assert_dict_almost_equal(
117
+ self,
118
+ return_dict,
119
+ {
120
+ "cer": 0.7,
121
+ "hits": 6,
122
+ "substitutions": 1,
123
+ "deletions": 3,
124
+ "insertions": 3,
125
+ },
126
+ delta=1e-16,
127
+ )
128
+
129
+ def _apply_test_on(self, cases):
130
+ for ref, hyp, correct_cer in cases:
131
+ cer = jiwer.cer(reference=ref, hypothesis=hyp)
132
+
133
+ self.assertTrue(isinstance(cer, float))
134
+ if isinstance(cer, float):
135
+ self.assertAlmostEqual(cer, correct_cer, delta=1e-16)
my_jiwer/tests/test_measures.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ import pytest
4
+
5
+ import jiwer
6
+
7
+
8
+ def all_m(wer, mer, wil):
9
+ return {
10
+ "wer": wer,
11
+ "mer": mer,
12
+ "wip": 1 - wil,
13
+ "wil": wil,
14
+ }
15
+
16
+
17
+ def to_measure_dict(x: jiwer.WordOutput):
18
+ return {"wer": x.wer, "mer": x.mer, "wip": x.wip, "wil": x.wil}
19
+
20
+
21
+ def assert_dict_almost_equal(
22
+ test_case: unittest.TestCase, a, b, places=None, msg=None, delta=None
23
+ ):
24
+ test_case.assertIsInstance(a, dict)
25
+ test_case.assertIsInstance(b, dict)
26
+ test_case.assertEqual(set(a.keys()), set(b.keys()))
27
+
28
+ for k in a.keys():
29
+ test_case.assertAlmostEqual(a[k], b[k], places=places, msg=msg, delta=delta)
30
+
31
+
32
+ class TestMeasuresContiguousSentencesTransform(unittest.TestCase):
33
+ def test_input_ref_string_hyp_string(self):
34
+ cases = [
35
+ ("This is a test", "This is a test", all_m(0, 0, 0)),
36
+ ("This is a test", "", all_m(1, 1, 1)),
37
+ ("This is a test", "This test", all_m(0.5, 0.5, 0.5)),
38
+ ]
39
+
40
+ self._apply_test_on(cases)
41
+
42
+ def test_input_ref_string_hyp_list(self):
43
+ cases = [
44
+ ("This is a test", ["This is a test"], all_m(0, 0, 0)),
45
+ ("This is a test", [""], all_m(1, 1, 1)),
46
+ ("This is a test", ["This test"], all_m(0.5, 0.5, 0.5)),
47
+ ]
48
+
49
+ self._apply_test_on(cases)
50
+
51
+ def test_input_ref_list_hyp_string(self):
52
+ cases = [
53
+ (["This is a test"], "This is a test", all_m(0, 0, 0)),
54
+ (["This is a test"], "", all_m(1, 1, 1)),
55
+ (["This is a test"], "This test", all_m(0.5, 0.5, 0.5)),
56
+ ]
57
+
58
+ self._apply_test_on(cases)
59
+
60
+ def test_input_ref_list_hyp_list(self):
61
+ cases = [
62
+ (["This is a test"], ["This is a test"], all_m(0, 0, 0)),
63
+ (["This is a test"], [""], all_m(1, 1, 1)),
64
+ (["This is a test"], ["This test"], all_m(0.5, 0.5, 0.5)),
65
+ ]
66
+
67
+ self._apply_test_on(cases)
68
+
69
+ def test_different_sentence_length_equal_type(self):
70
+ cases = [
71
+ (
72
+ ["hello", "this", "sentence", "is fractured"],
73
+ ["this sentence"],
74
+ all_m(0.6, 0.6, 0.6),
75
+ ),
76
+ (
77
+ "i am a short ground truth",
78
+ "i am a considerably longer and very much incorrect hypothesis",
79
+ all_m(7 / 6, 0.7, 0.85),
80
+ ),
81
+ ]
82
+
83
+ self._apply_test_on(cases)
84
+
85
+ def test_different_sentence_length_unequaL_type(self):
86
+ reference = [
87
+ "i like monthy python",
88
+ "what do you mean african or european swallow",
89
+ ]
90
+ hypothesis = ["i like", "python", "what you mean", "or swallow"]
91
+ x = jiwer.process_words(
92
+ reference,
93
+ hypothesis,
94
+ reference_transform=jiwer.transformations.wer_contiguous,
95
+ hypothesis_transform=jiwer.transformations.wer_contiguous,
96
+ )
97
+ x_dict = to_measure_dict(x)
98
+
99
+ # is equivalent to
100
+
101
+ reference = "i like monthy python what do you mean african or european swallow"
102
+ hypothesis = "i like python what you mean or swallow"
103
+ y = jiwer.process_words(
104
+ reference,
105
+ hypothesis,
106
+ reference_transform=jiwer.transformations.wer_contiguous,
107
+ hypothesis_transform=jiwer.transformations.wer_contiguous,
108
+ )
109
+ y_dict = to_measure_dict(y)
110
+
111
+ assert_dict_almost_equal(self, x_dict, y_dict, delta=1e-9)
112
+
113
+ def test_fail_on_empty_reference(self):
114
+ for method in [
115
+ jiwer.wer,
116
+ jiwer.wil,
117
+ jiwer.wip,
118
+ jiwer.mer,
119
+ jiwer.compute_measures,
120
+ ]:
121
+
122
+ def callback():
123
+ method("", "test")
124
+
125
+ self.assertRaises(ValueError, callback)
126
+
127
+ def test_known_values(self):
128
+ # Taken from the "From WER and RIL to MER and WIL" paper, for link see README.md
129
+ cases = [
130
+ (
131
+ "X",
132
+ "X",
133
+ all_m(0, 0, 0),
134
+ ),
135
+ (
136
+ "X",
137
+ "X X Y Y",
138
+ all_m(3, 0.75, 0.75),
139
+ ),
140
+ (
141
+ "X Y X",
142
+ "X Z",
143
+ all_m(2 / 3, 2 / 3, 5 / 6),
144
+ ),
145
+ (
146
+ "X",
147
+ "Y",
148
+ all_m(1, 1, 1),
149
+ ),
150
+ (
151
+ "X",
152
+ "Y Z",
153
+ all_m(2, 1, 1),
154
+ ),
155
+ ]
156
+
157
+ self._apply_test_on(cases)
158
+
159
+ def test_permutations_variance(self):
160
+ cases = [
161
+ (
162
+ ["i", "am i good"],
163
+ ["i am", "i good"],
164
+ all_m(0.0, 0.0, 0),
165
+ ),
166
+ (
167
+ ["am i good", "i"],
168
+ [
169
+ "i good",
170
+ "i am",
171
+ ],
172
+ all_m(0.5, 0.4, 7 / 16),
173
+ ),
174
+ ]
175
+
176
+ self._apply_test_on(cases)
177
+
178
+ def _apply_test_on(self, cases):
179
+ for ref, hyp, correct_measures in cases:
180
+ output = jiwer.process_words(
181
+ reference=ref,
182
+ hypothesis=hyp,
183
+ reference_transform=jiwer.transformations.wer_contiguous,
184
+ hypothesis_transform=jiwer.transformations.wer_contiguous,
185
+ )
186
+ output_dict = to_measure_dict(output)
187
+
188
+ assert_dict_almost_equal(self, output_dict, correct_measures, delta=1e-16)
189
+
190
+
191
+ class TestMeasuresDefaultTransform(unittest.TestCase):
192
+ def test_input_gt_string_h_string(self):
193
+ cases = [
194
+ ("This is a test", "This is a test", all_m(0, 0, 0)),
195
+ ("This is a test", "", all_m(1, 1, 1)),
196
+ ("This is a test", "This test", all_m(0.5, 0.5, 0.5)),
197
+ ]
198
+
199
+ self._apply_test_on(cases)
200
+
201
+ def test_input_gt_string_h_list(self):
202
+ cases = [
203
+ ("This is a test", ["This is a test"], all_m(0, 0, 0)),
204
+ ("This is a test", [""], all_m(1, 1, 1)),
205
+ ("This is a test", ["This test"], all_m(0.5, 0.5, 0.5)),
206
+ ]
207
+
208
+ self._apply_test_on(cases)
209
+
210
+ def test_input_gt_list_h_string(self):
211
+ cases = [
212
+ (["This is a test"], "This is a test", all_m(0, 0, 0)),
213
+ (["This is a test"], "", all_m(1, 1, 1)),
214
+ (["This is a test"], "This test", all_m(0.5, 0.5, 0.5)),
215
+ ]
216
+
217
+ self._apply_test_on(cases)
218
+
219
+ def test_input_gt_list_h_list(self):
220
+ cases = [
221
+ (["This is a test"], ["This is a test"], all_m(0, 0, 0)),
222
+ (["This is a test"], [""], all_m(1, 1, 1)),
223
+ (["This is a test"], ["This test"], all_m(0.5, 0.5, 0.5)),
224
+ ]
225
+
226
+ self._apply_test_on(cases)
227
+
228
+ def test_fail_on_different_sentence_length(self):
229
+ for method in [
230
+ jiwer.process_words,
231
+ jiwer.wer,
232
+ jiwer.wil,
233
+ jiwer.wip,
234
+ jiwer.mer,
235
+ jiwer.compute_measures,
236
+ ]:
237
+
238
+ def callback():
239
+ method(["hello", "this", "sentence", "is fractured"], ["this sentence"])
240
+
241
+ self.assertRaises(ValueError, callback)
242
+
243
+ def test_fail_on_empty_reference(self):
244
+ for method in [
245
+ jiwer.process_words,
246
+ jiwer.wer,
247
+ jiwer.wil,
248
+ jiwer.wip,
249
+ jiwer.mer,
250
+ jiwer.compute_measures,
251
+ ]:
252
+
253
+ def callback():
254
+ method("", "test")
255
+
256
+ self.assertRaises(ValueError, callback)
257
+
258
+ def test_known_values(self):
259
+ # Taken from the "From WER and RIL to MER and WIL" paper, for link see README.md
260
+ cases = [
261
+ (
262
+ "X",
263
+ "X",
264
+ all_m(0, 0, 0),
265
+ ),
266
+ (
267
+ "X",
268
+ "X X Y Y",
269
+ all_m(3, 0.75, 0.75),
270
+ ),
271
+ (
272
+ "X Y X",
273
+ "X Z",
274
+ all_m(2 / 3, 2 / 3, 5 / 6),
275
+ ),
276
+ (
277
+ "X",
278
+ "Y",
279
+ all_m(1, 1, 1),
280
+ ),
281
+ (
282
+ "X",
283
+ "Y Z",
284
+ all_m(2, 1, 1),
285
+ ),
286
+ ]
287
+
288
+ self._apply_test_on(cases)
289
+
290
+ def test_permutations_invariance(self):
291
+ cases = [
292
+ (
293
+ ["i", "am i good"],
294
+ ["i am", "i good"],
295
+ all_m(0.5, 0.4, 7 / 16),
296
+ ),
297
+ (
298
+ ["am i good", "i"],
299
+ [
300
+ "i good",
301
+ "i am",
302
+ ],
303
+ all_m(0.5, 0.4, 7 / 16),
304
+ ),
305
+ ]
306
+
307
+ self._apply_test_on(cases)
308
+
309
+ def _apply_test_on(self, cases):
310
+ for ref, hyp, correct_measures in cases:
311
+ output = jiwer.process_words(reference=ref, hypothesis=hyp)
312
+ output_dict = to_measure_dict(output)
313
+
314
+ assert_dict_almost_equal(self, output_dict, correct_measures, delta=1e-16)
315
+
316
+ with pytest.deprecated_call():
317
+ output = jiwer.compute_measures(truth=ref, hypothesis=hyp)
318
+ output_dict = {
319
+ "wer": output["wer"],
320
+ "mer": output["mer"],
321
+ "wil": output["wil"],
322
+ "wip": output["wip"],
323
+ }
324
+ assert_dict_almost_equal(
325
+ self, output_dict, correct_measures, delta=1e-16
326
+ )
327
+
328
+ self._apply_test_deprecated_truth(cases)
329
+
330
+ def _apply_test_deprecated_truth(self, cases):
331
+ with pytest.deprecated_call():
332
+ for ref, hyp, correct_measures in cases:
333
+ output_dict = {}
334
+ for key, method in [
335
+ ("wer", jiwer.wer),
336
+ ("mer", jiwer.mer),
337
+ ("wil", jiwer.wil),
338
+ ("wip", jiwer.wip),
339
+ ]:
340
+ output = method(truth=ref, hypothesis=hyp)
341
+ output_dict[key] = output
342
+
343
+ assert_dict_almost_equal(
344
+ self, output_dict, correct_measures, delta=1e-16
345
+ )
346
+
347
+ def test_deprecated_truth_and_ref(self):
348
+ for key, method in [
349
+ ("wer", jiwer.wer),
350
+ ("mer", jiwer.mer),
351
+ ("wil", jiwer.wil),
352
+ ("wip", jiwer.wip),
353
+ ("cer", jiwer.cer),
354
+ ]:
355
+ with pytest.raises(ValueError):
356
+ method(truth="ref", reference="truth", hypothesis="huh")
357
+ method()
358
+ method(truth="only truth")
359
+ method(reference="only ref")
360
+ method(hypothesis="only hypothesis")
361
+
362
+ def test_deprecated_truth_and_ref_with_transform(self):
363
+ wer_transform = jiwer.Compose(
364
+ [
365
+ jiwer.ToLowerCase(),
366
+ jiwer.RemoveMultipleSpaces(),
367
+ jiwer.Strip(),
368
+ jiwer.ReduceToListOfListOfWords(),
369
+ ]
370
+ )
371
+ cer_transform = jiwer.Compose(
372
+ [
373
+ jiwer.ToLowerCase(),
374
+ jiwer.RemoveMultipleSpaces(),
375
+ jiwer.Strip(),
376
+ jiwer.ReduceToListOfListOfChars(),
377
+ ]
378
+ )
379
+
380
+ for key, method in [
381
+ ("wer", jiwer.wer),
382
+ ("mer", jiwer.mer),
383
+ ("wil", jiwer.wil),
384
+ ("wip", jiwer.wip),
385
+ ("cer", jiwer.cer),
386
+ ]:
387
+ if key == "cer":
388
+ tr = cer_transform
389
+ else:
390
+ tr = wer_transform
391
+
392
+ result = method(
393
+ truth="This is a short Sentence with a few Words with upper and Lower cases",
394
+ hypothesis="His is a short Sentence with a few Words with upper and Lower cases",
395
+ truth_transform=tr,
396
+ hypothesis_transform=tr,
397
+ )
398
+ result_same = method(
399
+ reference="This is a short Sentence with a few Words with upper and Lower cases",
400
+ hypothesis="His is a short Sentence with a few Words with upper and Lower cases",
401
+ reference_transform=tr,
402
+ hypothesis_transform=tr,
403
+ )
404
+ self.assertAlmostEqual(result, result_same)
405
+
406
+
407
+ def test_deprecate_compute_measures():
408
+ # TODO: remove once deprecated
409
+ with pytest.deprecated_call():
410
+ jiwer.compute_measures("no more", "compute_measures")
my_jiwer/tests/test_speed.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from jiwer import wer
2
+
3
+
4
+ def perform_computation(num_sentences):
5
+ truth = ["this is a speed test" for _ in range(0, num_sentences)]
6
+ hypo = ["this is not a speed test" for _ in range(0, num_sentences)]
7
+
8
+ wer(truth, hypo)
9
+
10
+
11
+ def test_speed_n1(benchmark):
12
+ benchmark(perform_computation, 1)
13
+
14
+
15
+ def test_speed_n10(benchmark):
16
+ benchmark(perform_computation, 10)
17
+
18
+
19
+ def test_speed_n100(benchmark):
20
+ benchmark(perform_computation, 100)
21
+
22
+
23
+ def test_speed_n1000(benchmark):
24
+ benchmark(perform_computation, 1000)
my_jiwer/tests/test_transforms.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ from jiwer.transforms import *
4
+ from jiwer.transforms import ReduceToListOfListOfChars
5
+
6
+
7
+ def _apply_test_on(self: unittest.TestCase, tr, cases):
8
+ for inp, outp in cases:
9
+ self.assertEqual(outp, tr(inp))
10
+
11
+
12
+ class TestReduceToSingleSentence(unittest.TestCase):
13
+ def test_normal(self):
14
+ cases = [
15
+ ("this is a test", "this is a test"),
16
+ ("", ""),
17
+ (["this is one", "is two"], ["this is one is two"]),
18
+ (["one", "two", "three", "", "five six"], ["one two three five six"]),
19
+ ([""], []),
20
+ ]
21
+
22
+ _apply_test_on(self, ReduceToSingleSentence(), cases)
23
+
24
+ def test_delimiter(self):
25
+ cases = [
26
+ ("this_is_a_test", "this_is_a_test"),
27
+ ("", ""),
28
+ (["this_is_one", "is_two"], ["this_is_one_is_two"]),
29
+ (["one", "two", "three", "", "five_six"], ["one_two_three_five_six"]),
30
+ ([""], []),
31
+ ]
32
+
33
+ _apply_test_on(self, ReduceToSingleSentence("_"), cases)
34
+
35
+
36
+ class TestReduceToListOfListOfWords(unittest.TestCase):
37
+ def test_normal(self):
38
+ cases = [
39
+ ("this is a test", [["this", "is", "a", "test"]]),
40
+ ("", [[]]),
41
+ (["this is one", "is two"], [["this", "is", "one"], ["is", "two"]]),
42
+ (
43
+ ["one", "two", "three", "", "five six"],
44
+ [["one"], ["two"], ["three"], [], ["five", "six"]],
45
+ ),
46
+ ([], [[]]),
47
+ ([""], [[]]),
48
+ (["", "", ""], [[], [], []]),
49
+ ]
50
+
51
+ _apply_test_on(self, ReduceToListOfListOfWords(), cases)
52
+
53
+ def test_delimiter(self):
54
+ cases = [
55
+ ("this_is_a_test", [["this", "is", "a", "test"]]),
56
+ ("", [[]]),
57
+ (["this_is_one", "is_two"], [["this", "is", "one"], ["is", "two"]]),
58
+ (
59
+ ["one", "two", "three", "", "five_six"],
60
+ [["one"], ["two"], ["three"], [], ["five", "six"]],
61
+ ),
62
+ ([], [[]]),
63
+ ([""], [[]]),
64
+ (["", "", ""], [[], [], []]),
65
+ ]
66
+
67
+ _apply_test_on(self, ReduceToListOfListOfWords("_"), cases)
68
+
69
+
70
+ class TestReduceToListOfListOfChars(unittest.TestCase):
71
+ def test_normal(self):
72
+ cases = [
73
+ (
74
+ "this is a test",
75
+ [
76
+ [
77
+ "t",
78
+ "h",
79
+ "i",
80
+ "s",
81
+ " ",
82
+ "i",
83
+ "s",
84
+ " ",
85
+ "a",
86
+ " ",
87
+ "t",
88
+ "e",
89
+ "s",
90
+ "t",
91
+ ]
92
+ ],
93
+ ),
94
+ ("", [[]]),
95
+ (
96
+ ["this is one", "is two"],
97
+ [
98
+ ["t", "h", "i", "s", " ", "i", "s", " ", "o", "n", "e"],
99
+ ["i", "s", " ", "t", "w", "o"],
100
+ ],
101
+ ),
102
+ (
103
+ ["one", "two", "three", "", "five six"],
104
+ [
105
+ ["o", "n", "e"],
106
+ ["t", "w", "o"],
107
+ ["t", "h", "r", "e", "e"],
108
+ [],
109
+ ["f", "i", "v", "e", " ", "s", "i", "x"],
110
+ ],
111
+ ),
112
+ ([], [[]]),
113
+ ([""], [[]]),
114
+ (["", "", ""], [[], [], []]),
115
+ ]
116
+
117
+ _apply_test_on(self, ReduceToListOfListOfChars(), cases)
118
+
119
+ def test_delimiter(self):
120
+ cases = [
121
+ (
122
+ "this_is_a_test",
123
+ [
124
+ [
125
+ "t",
126
+ "h",
127
+ "i",
128
+ "s",
129
+ "_",
130
+ "i",
131
+ "s",
132
+ "_",
133
+ "a",
134
+ "_",
135
+ "t",
136
+ "e",
137
+ "s",
138
+ "t",
139
+ ]
140
+ ],
141
+ ),
142
+ ("", [[]]),
143
+ (
144
+ ["this_is_one", "is_two"],
145
+ [
146
+ ["t", "h", "i", "s", "_", "i", "s", "_", "o", "n", "e"],
147
+ ["i", "s", "_", "t", "w", "o"],
148
+ ],
149
+ ),
150
+ (
151
+ ["one", "two", "three", "", "five_six"],
152
+ [
153
+ ["o", "n", "e"],
154
+ ["t", "w", "o"],
155
+ ["t", "h", "r", "e", "e"],
156
+ [],
157
+ ["f", "i", "v", "e", "_", "s", "i", "x"],
158
+ ],
159
+ ),
160
+ ([], [[]]),
161
+ ([""], [[]]),
162
+ (["", "", ""], [[], [], []]),
163
+ ]
164
+
165
+ _apply_test_on(self, ReduceToListOfListOfChars(), cases)
166
+
167
+
168
+ class TestRemoveSpecificWords(unittest.TestCase):
169
+ def test_normal(self):
170
+ cases = [
171
+ (["yhe about that bug"], [" about that bug"]),
172
+ (["yeah about that bug"], [" about that bug"]),
173
+ (["one bug"], ["one bug"]),
174
+ (["yhe", "about", "bug"], [" ", "about", "bug"]),
175
+ (["yeah", "about", "bug"], [" ", "about", "bug"]),
176
+ (["one", "bug"], ["one", "bug"]),
177
+ (["yhe about bug"], [" about bug"]),
178
+ (["yeah about bug"], [" about bug"]),
179
+ (["about bug yhe"], ["about bug "]),
180
+ (["one bug"], ["one bug"]),
181
+ (["he asked a helpful question"], [" asked helpful question"]),
182
+ (["normal sentence"], ["normal sentence"]),
183
+ (["yhe awesome", " awesome"]),
184
+ (["the apple is not a pear", " apple is not pear"]),
185
+ (["yhe", " "]),
186
+ ]
187
+
188
+ _apply_test_on(
189
+ self, RemoveSpecificWords(["yhe", "yeah", "a", "he", "the"]), cases
190
+ )
191
+
192
+
193
+ class TestRemoveWhiteSpace(unittest.TestCase):
194
+ def test_normal(self):
195
+ cases = [
196
+ (["this is an example", "thisisanexample"]),
197
+ (["hello\tworld\n\r", "helloworld"]),
198
+ ]
199
+
200
+ _apply_test_on(self, RemoveWhiteSpace(), cases)
201
+
202
+ def test_replace_by_space(self):
203
+ cases = [
204
+ (["this is an example", "this is an example"]),
205
+ (["hello\tworld\n\r", "hello world "]),
206
+ ]
207
+
208
+ _apply_test_on(self, RemoveWhiteSpace(replace_by_space=True), cases)
209
+
210
+
211
+ class TestRemovePunctuation(unittest.TestCase):
212
+ def test_normal(self):
213
+ cases = [
214
+ (["this is an example!", "this is an example"]),
215
+ (["hello. goodbye", "hello goodbye"]),
216
+ (["this sentence has no punctuation", "this sentence has no punctuation"]),
217
+ ]
218
+
219
+ _apply_test_on(self, RemovePunctuation(), cases)
220
+
221
+ def test_non_ascii_punctuation(self):
222
+ cases = [
223
+ (["word༆’'", "word"]),
224
+ (["‘no’", "no"]),
225
+ (["“yes”", "yes"]),
226
+ ]
227
+
228
+ _apply_test_on(self, RemovePunctuation(), cases)
229
+
230
+
231
+ class TestRemoveMultipleSpaces(unittest.TestCase):
232
+ def test_normal(self):
233
+ cases = [
234
+ (["this is an example "], ["this is an example "]),
235
+ ([" hello goodbye "], [" hello goodbye "]),
236
+ ([" "], [" "]),
237
+ ]
238
+
239
+ _apply_test_on(self, RemoveMultipleSpaces(), cases)
240
+
241
+ pass
242
+
243
+
244
+ class TestSubstituteWords(unittest.TestCase):
245
+ def test_normal(self):
246
+ cases = [
247
+ (["you're pretty"], ["i am awesome"]),
248
+ (["your book"], ["your book"]),
249
+ (["foobar"], ["foobar"]),
250
+ ]
251
+
252
+ _apply_test_on(
253
+ self,
254
+ SubstituteWords(
255
+ {"pretty": "awesome", "you": "i", "'re": " am", "foo": "bar"}
256
+ ),
257
+ cases,
258
+ )
259
+
260
+
261
+ class TestSubstituteRegexes(unittest.TestCase):
262
+ def test_normal(self):
263
+ cases = [
264
+ (["is the world doomed or loved?"], ["is the world sacr or lov?"]),
265
+ (["the sun is loved"], ["the sun is lov"]),
266
+ (["edibles are allegedly cultivated"], ["edibles are allegedly cultivat"]),
267
+ ]
268
+
269
+ _apply_test_on(
270
+ self,
271
+ SubstituteRegexes({r"doom": r"sacr", r"\b(\w+)ed\b": r"\1"}),
272
+ cases,
273
+ )
274
+
275
+
276
+ class TestStrip(unittest.TestCase):
277
+ def test_normal(self):
278
+ cases = [
279
+ ([" this is an example "], ["this is an example"]),
280
+ ([" hello goodbye "], ["hello goodbye"]),
281
+ ([" "], [""]),
282
+ ([" "], [""]),
283
+ ]
284
+
285
+ _apply_test_on(self, Strip(), cases)
286
+
287
+
288
+ class TestRemoveEmptyStrings(unittest.TestCase):
289
+ def test_normal(self):
290
+ cases = [
291
+ ([""], []),
292
+ (["this is an example"], ["this is an example"]),
293
+ ([" "], []),
294
+ ([" "], []),
295
+ ]
296
+
297
+ _apply_test_on(self, RemoveEmptyStrings(), cases)
298
+
299
+
300
+ class TestExpandCommonEnglishContractions(unittest.TestCase):
301
+ def test_normal(self):
302
+ cases = [
303
+ (
304
+ ["she'll make sure you can't make it"],
305
+ ["she will make sure you can not make it"],
306
+ ),
307
+ (["let's party!"], ["let us party!"]),
308
+ ]
309
+
310
+ _apply_test_on(self, ExpandCommonEnglishContractions(), cases)
311
+
312
+
313
+ class TestToLowerCase(unittest.TestCase):
314
+ def test_normal(self):
315
+ cases = [
316
+ (["You're PRETTY"], ["you're pretty"]),
317
+ ]
318
+
319
+ _apply_test_on(self, ToLowerCase(), cases)
320
+
321
+
322
+ class TestToUpperCase(unittest.TestCase):
323
+ def test_normal(self):
324
+ cases = [
325
+ (["You're amazing"], ["YOU'RE AMAZING"]),
326
+ ]
327
+
328
+ _apply_test_on(self, ToUpperCase(), cases)
329
+
330
+
331
+ class TestRemoveKaldiNonWords(unittest.TestCase):
332
+ def test_normal(self):
333
+ cases = [
334
+ (["you <unk> like [laugh]"], ["you like "]),
335
+ ]
336
+
337
+ _apply_test_on(self, RemoveKaldiNonWords(), cases)