Add application file
Browse files- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/build_config.yaml +14 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/create_wheel_file.py +44 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.0.5-py3-none-any.whl +0 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.0.5.tar.gz +3 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.0.6-py3-none-any.whl +0 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.0.6.tar.gz +3 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.1.0-py3-none-any.whl +0 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.1.0.tar.gz +3 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer.egg-info/PKG-INFO +12 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer.egg-info/SOURCES.txt +64 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer.egg-info/dependency_links.txt +1 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer.egg-info/top_level.txt +1 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/__init__.py +52 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/analysis_explanation.py +64 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/analyzer_engine.py +372 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/analyzer_request.py +36 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/app_tracer.py +27 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/batch_analyzer_engine.py +145 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/dict_analyzer_result.py +29 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/entity_recognizer.py +199 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/local_recognizer.py +7 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/__init__.py +19 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/client_nlp_engine.py +108 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/nlp_artifacts.py +74 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/nlp_engine.py +42 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/spacy_nlp_engine.py +96 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/stanza_nlp_engine.py +39 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/pattern.py +45 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/pattern_recognizer.py +253 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/recognizer_registry/__init__.py +4 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/recognizer_result.py +189 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/remote_recognizer.py +57 -0
- presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/setup.py +1 -0
- presidio_analyzer/presidio_analyzer/Package_to_wheel.txt +5 -0
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/build_config.yaml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-
|
2 |
+
name: presidio_analyzer
|
3 |
+
version: 4.1.0
|
4 |
+
build: 0.0.1
|
5 |
+
author: Amit Hegde
|
6 |
+
author_email: amitumamaheshwar.h@infosys.com
|
7 |
+
description: Infosys Intelligent Assistant
|
8 |
+
long_description: Infosys Intelligent Assistant
|
9 |
+
classifiers: ["Programming Language :: Python :: 3",
|
10 |
+
"License :: OSI Approved :: MIT License",
|
11 |
+
"Operating System :: OS Independent",]
|
12 |
+
package_dir: {"": "presidio_analyzer"}
|
13 |
+
packages: presidio_analyzer
|
14 |
+
python_requires: ['>=3.6']
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/create_wheel_file.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__copyright__ = """ 2020 - 2021 Infosys Limited, Bangalore, India. All Rights Reserved.
|
2 |
+
Version: 2.5.0.0
|
3 |
+
Except for any free or open source software components embedded in this Infosys proprietary software program (“Program”), this Program is protected by copyright laws, international treaties and other pending or existing intellectual property rights in India, the United States and other countries.
|
4 |
+
Except as expressly permitted, any unauthorized reproduction, storage, transmission in any form or by any means (including without limitation electronic, mechanical, printing, photocopying, recording or otherwise), or any distribution of this Program, or any portion of it, may result in severe civil and criminal penalties, and will be prosecuted to the maximum extent possible under the law.
|
5 |
+
"""
|
6 |
+
import yaml
|
7 |
+
import subprocess
|
8 |
+
import os
|
9 |
+
with open(r'.\build_config.yaml') as build_file:
|
10 |
+
build_config_list = yaml.safe_load(build_file)
|
11 |
+
|
12 |
+
|
13 |
+
for build_config in build_config_list:
|
14 |
+
|
15 |
+
try:
|
16 |
+
print(build_config)
|
17 |
+
|
18 |
+
if os.path.exists(f"./{build_config['packages']}"):
|
19 |
+
|
20 |
+
setup_str = f"import setuptools\r" \
|
21 |
+
f"setuptools.setup(\r \
|
22 |
+
name='{build_config['name']}',\r \
|
23 |
+
version='{build_config['version']}',\r \
|
24 |
+
author='{build_config['author']}',\r \
|
25 |
+
author_email='{build_config['author_email']}',\r \
|
26 |
+
description='{build_config['description']}',\r \
|
27 |
+
long_description='{build_config['long_description']}',\r \
|
28 |
+
classifiers={build_config['classifiers']},\r \
|
29 |
+
package_dir={build_config['package_dir']},\r \
|
30 |
+
packages=setuptools.find_packages(where='{build_config['packages']}'),\r \
|
31 |
+
python_requires='{build_config['python_requires'][0]}',\r \
|
32 |
+
)"
|
33 |
+
|
34 |
+
with open('setup.py','w') as file:
|
35 |
+
file.write(setup_str)
|
36 |
+
|
37 |
+
subprocess.run(["python", "-m","build"])
|
38 |
+
wheel_file = f"{build_config['name']}-{build_config['version']}_build_{build_config['build']}-py3-none-any.whl"
|
39 |
+
print(f"wheel_file: {wheel_file}")
|
40 |
+
subprocess.run(["python", "-m", "pyc_wheel", f"dist\{wheel_file}"])
|
41 |
+
else:
|
42 |
+
print(f"Path does not exist ./{build_config['packages']}")
|
43 |
+
except Exception as e:
|
44 |
+
print(e)
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.0.5-py3-none-any.whl
ADDED
Binary file (78.9 kB). View file
|
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.0.5.tar.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:effdee5c88badc2a4605dcabc7fe1ff43df586df0a7c2be3f4dbc4d440c7e4d6
|
3 |
+
size 44375
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.0.6-py3-none-any.whl
ADDED
Binary file (79.1 kB). View file
|
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.0.6.tar.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c49ca4ee3acda590bb69b68697e02cbfc81b89bd8dcfcaf9ff90b07fec062515
|
3 |
+
size 44656
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.1.0-py3-none-any.whl
ADDED
Binary file (79.1 kB). View file
|
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/dist/presidio_analyzer-4.1.0.tar.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:595ba3a58a473cc94a2a5c421eea075c5db52cb0181335a92f3a222f5cc76736
|
3 |
+
size 44675
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer.egg-info/PKG-INFO
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: presidio_analyzer
|
3 |
+
Version: 4.1.0
|
4 |
+
Summary: Infosys Intelligent Assistant
|
5 |
+
Author: Amit Hegde
|
6 |
+
Author-email: amitumamaheshwar.h@infosys.com
|
7 |
+
Classifier: Programming Language :: Python :: 3
|
8 |
+
Classifier: License :: OSI Approved :: MIT License
|
9 |
+
Classifier: Operating System :: OS Independent
|
10 |
+
Requires-Python: >=3.6
|
11 |
+
|
12 |
+
Infosys Intelligent Assistant
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer.egg-info/SOURCES.txt
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
setup.py
|
2 |
+
presidio_analyzer/presidio_analyzer/__init__.py
|
3 |
+
presidio_analyzer/presidio_analyzer/analysis_explanation.py
|
4 |
+
presidio_analyzer/presidio_analyzer/analyzer_engine.py
|
5 |
+
presidio_analyzer/presidio_analyzer/analyzer_request.py
|
6 |
+
presidio_analyzer/presidio_analyzer/app_tracer.py
|
7 |
+
presidio_analyzer/presidio_analyzer/batch_analyzer_engine.py
|
8 |
+
presidio_analyzer/presidio_analyzer/dict_analyzer_result.py
|
9 |
+
presidio_analyzer/presidio_analyzer/entity_recognizer.py
|
10 |
+
presidio_analyzer/presidio_analyzer/local_recognizer.py
|
11 |
+
presidio_analyzer/presidio_analyzer/pattern.py
|
12 |
+
presidio_analyzer/presidio_analyzer/pattern_recognizer.py
|
13 |
+
presidio_analyzer/presidio_analyzer/recognizer_result.py
|
14 |
+
presidio_analyzer/presidio_analyzer/remote_recognizer.py
|
15 |
+
presidio_analyzer/presidio_analyzer.egg-info/PKG-INFO
|
16 |
+
presidio_analyzer/presidio_analyzer.egg-info/SOURCES.txt
|
17 |
+
presidio_analyzer/presidio_analyzer.egg-info/dependency_links.txt
|
18 |
+
presidio_analyzer/presidio_analyzer.egg-info/top_level.txt
|
19 |
+
presidio_analyzer/presidio_analyzer/context_aware_enhancers/__init__.py
|
20 |
+
presidio_analyzer/presidio_analyzer/context_aware_enhancers/context_aware_enhancer.py
|
21 |
+
presidio_analyzer/presidio_analyzer/context_aware_enhancers/lemma_context_aware_enhancer.py
|
22 |
+
presidio_analyzer/presidio_analyzer/nlp_engine/__init__.py
|
23 |
+
presidio_analyzer/presidio_analyzer/nlp_engine/client_nlp_engine.py
|
24 |
+
presidio_analyzer/presidio_analyzer/nlp_engine/nlp_artifacts.py
|
25 |
+
presidio_analyzer/presidio_analyzer/nlp_engine/nlp_engine.py
|
26 |
+
presidio_analyzer/presidio_analyzer/nlp_engine/nlp_engine_provider.py
|
27 |
+
presidio_analyzer/presidio_analyzer/nlp_engine/spacy_nlp_engine.py
|
28 |
+
presidio_analyzer/presidio_analyzer/nlp_engine/stanza_nlp_engine.py
|
29 |
+
presidio_analyzer/presidio_analyzer/nlp_engine/transformers_nlp_engine.py
|
30 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/Aadhaar_Number.py
|
31 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/PAN_Number.py
|
32 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/__init__.py
|
33 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/au_abn_recognizer.py
|
34 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/au_acn_recognizer.py
|
35 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/au_medicare_recognizer.py
|
36 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/au_tfn_recognizer.py
|
37 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/credit_card_recognizer.py
|
38 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/crypto_recognizer.py
|
39 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/data_recognizer.py
|
40 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/date_recognizer.py
|
41 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/email_recognizer.py
|
42 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/es_nif_recognizer.py
|
43 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/iban_patterns.py
|
44 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/iban_recognizer.py
|
45 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/ip_recognizer.py
|
46 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/it_driver_license_recognizer.py
|
47 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/it_fiscal_code_recognizer.py
|
48 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/it_identity_card_recognizer.py
|
49 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/it_passport_recognizer.py
|
50 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/it_vat_code.py
|
51 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/medical_license_recognizer.py
|
52 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/phone_recognizer.py
|
53 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/sg_fin_recognizer.py
|
54 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/spacy_recognizer.py
|
55 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/stanza_recognizer.py
|
56 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/transformers_recognizer.py
|
57 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/uk_nhs_recognizer.py
|
58 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/url_recognizer.py
|
59 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/us_driver_license_recognizer.py
|
60 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/us_itin_recognizer.py
|
61 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/us_passport_recognizer.py
|
62 |
+
presidio_analyzer/presidio_analyzer/predefined_recognizers/us_ssn_recognizer.py
|
63 |
+
presidio_analyzer/presidio_analyzer/recognizer_registry/__init__.py
|
64 |
+
presidio_analyzer/presidio_analyzer/recognizer_registry/recognizer_registry.py
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer.egg-info/dependency_links.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer.egg-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
presidio_analyzer
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/__init__.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Presidio analyzer package."""
|
2 |
+
|
3 |
+
import logging
|
4 |
+
|
5 |
+
from presidio_analyzer.pattern import Pattern
|
6 |
+
from presidio_analyzer.analysis_explanation import AnalysisExplanation
|
7 |
+
from presidio_analyzer.recognizer_result import RecognizerResult
|
8 |
+
from presidio_analyzer.dict_analyzer_result import DictAnalyzerResult
|
9 |
+
from presidio_analyzer.entity_recognizer import EntityRecognizer
|
10 |
+
from presidio_analyzer.local_recognizer import LocalRecognizer
|
11 |
+
from presidio_analyzer.pattern_recognizer import PatternRecognizer
|
12 |
+
from presidio_analyzer.remote_recognizer import RemoteRecognizer
|
13 |
+
from presidio_analyzer.recognizer_registry import RecognizerRegistry
|
14 |
+
from presidio_analyzer.analyzer_engine import AnalyzerEngine
|
15 |
+
from presidio_analyzer.batch_analyzer_engine import BatchAnalyzerEngine
|
16 |
+
from presidio_analyzer.analyzer_request import AnalyzerRequest
|
17 |
+
from presidio_analyzer.context_aware_enhancers import ContextAwareEnhancer
|
18 |
+
from presidio_analyzer.context_aware_enhancers import LemmaContextAwareEnhancer
|
19 |
+
|
20 |
+
|
21 |
+
# Define default loggers behavior
|
22 |
+
|
23 |
+
# 1. presidio_analyzer logger
|
24 |
+
|
25 |
+
logging.getLogger("presidio_analyzer").addHandler(logging.NullHandler())
|
26 |
+
|
27 |
+
# 2. decision_process logger.
|
28 |
+
# Setting the decision process trace here as we would want it
|
29 |
+
# to be activated using a parameter to AnalyzeEngine and not by default.
|
30 |
+
|
31 |
+
decision_process_logger = logging.getLogger("decision_process")
|
32 |
+
ch = logging.StreamHandler()
|
33 |
+
formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s]%(message)s")
|
34 |
+
ch.setFormatter(formatter)
|
35 |
+
decision_process_logger.addHandler(ch)
|
36 |
+
decision_process_logger.setLevel("INFO")
|
37 |
+
__all__ = [
|
38 |
+
"Pattern",
|
39 |
+
"AnalysisExplanation",
|
40 |
+
"RecognizerResult",
|
41 |
+
"DictAnalyzerResult",
|
42 |
+
"EntityRecognizer",
|
43 |
+
"LocalRecognizer",
|
44 |
+
"PatternRecognizer",
|
45 |
+
"RemoteRecognizer",
|
46 |
+
"RecognizerRegistry",
|
47 |
+
"AnalyzerEngine",
|
48 |
+
"AnalyzerRequest",
|
49 |
+
"ContextAwareEnhancer",
|
50 |
+
"LemmaContextAwareEnhancer",
|
51 |
+
"BatchAnalyzerEngine",
|
52 |
+
]
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/analysis_explanation.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict
|
2 |
+
|
3 |
+
|
4 |
+
class AnalysisExplanation:
|
5 |
+
"""
|
6 |
+
Hold tracing information to explain why PII entities were identified as such.
|
7 |
+
|
8 |
+
:param recognizer: name of recognizer that made the decision
|
9 |
+
:param original_score: recognizer's confidence in result
|
10 |
+
:param pattern_name: name of pattern
|
11 |
+
(if decision was made by a PatternRecognizer)
|
12 |
+
:param pattern: regex pattern that was applied (if PatternRecognizer)
|
13 |
+
:param validation_result: result of a validation (e.g. checksum)
|
14 |
+
:param textual_explanation: Free text for describing
|
15 |
+
a decision of a logic or model
|
16 |
+
"""
|
17 |
+
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
recognizer: str,
|
21 |
+
original_score: float,
|
22 |
+
pattern_name: str = None,
|
23 |
+
pattern: str = None,
|
24 |
+
validation_result: float = None,
|
25 |
+
textual_explanation: str = None,
|
26 |
+
):
|
27 |
+
|
28 |
+
self.recognizer = recognizer
|
29 |
+
self.pattern_name = pattern_name
|
30 |
+
self.pattern = pattern
|
31 |
+
self.original_score = original_score
|
32 |
+
self.score = original_score
|
33 |
+
self.textual_explanation = textual_explanation
|
34 |
+
self.score_context_improvement = 0
|
35 |
+
self.supportive_context_word = ""
|
36 |
+
self.validation_result = validation_result
|
37 |
+
|
38 |
+
def __repr__(self):
|
39 |
+
"""Create string representation of the object."""
|
40 |
+
return str(self.__dict__)
|
41 |
+
|
42 |
+
def set_improved_score(self, score: float) -> None:
|
43 |
+
"""Update the score and calculate the difference from the original score."""
|
44 |
+
self.score = score
|
45 |
+
self.score_context_improvement = self.score - self.original_score
|
46 |
+
|
47 |
+
def set_supportive_context_word(self, word: str) -> None:
|
48 |
+
"""Set the context word which helped increase the score."""
|
49 |
+
self.supportive_context_word = word
|
50 |
+
|
51 |
+
def append_textual_explanation_line(self, text: str) -> None:
|
52 |
+
"""Append a new line to textual_explanation field."""
|
53 |
+
if self.textual_explanation is None:
|
54 |
+
self.textual_explanation = text
|
55 |
+
else:
|
56 |
+
self.textual_explanation = "{}\n{}".format(self.textual_explanation, text)
|
57 |
+
|
58 |
+
def to_dict(self) -> Dict:
|
59 |
+
"""
|
60 |
+
Serialize self to dictionary.
|
61 |
+
|
62 |
+
:return: a dictionary
|
63 |
+
"""
|
64 |
+
return self.__dict__
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/analyzer_engine.py
ADDED
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import logging
|
3 |
+
from typing import List, Optional
|
4 |
+
|
5 |
+
from presidio_analyzer import (
|
6 |
+
RecognizerRegistry,
|
7 |
+
RecognizerResult,
|
8 |
+
EntityRecognizer,
|
9 |
+
)
|
10 |
+
from presidio_analyzer.app_tracer import AppTracer
|
11 |
+
from presidio_analyzer.context_aware_enhancers import (
|
12 |
+
ContextAwareEnhancer,
|
13 |
+
LemmaContextAwareEnhancer,
|
14 |
+
)
|
15 |
+
from presidio_analyzer.nlp_engine import NlpEngine, NlpEngineProvider, NlpArtifacts
|
16 |
+
|
17 |
+
logger = logging.getLogger("presidio_analyzer")
|
18 |
+
|
19 |
+
|
20 |
+
class AnalyzerEngine:
|
21 |
+
"""
|
22 |
+
Entry point for Presidio Analyzer.
|
23 |
+
|
24 |
+
Orchestrating the detection of PII entities and all related logic.
|
25 |
+
|
26 |
+
:param registry: instance of type RecognizerRegistry
|
27 |
+
:param nlp_engine: instance of type NlpEngine
|
28 |
+
(for example SpacyNlpEngine)
|
29 |
+
:param app_tracer: instance of type AppTracer, used to trace the logic
|
30 |
+
used during each request for interpretability reasons.
|
31 |
+
:param log_decision_process: bool,
|
32 |
+
defines whether the decision process within the analyzer should be logged or not.
|
33 |
+
:param default_score_threshold: Minimum confidence value
|
34 |
+
for detected entities to be returned
|
35 |
+
:param supported_languages: List of possible languages this engine could be run on.
|
36 |
+
Used for loading the right NLP models and recognizers for these languages.
|
37 |
+
:param context_aware_enhancer: instance of type ContextAwareEnhancer for enhancing
|
38 |
+
confidence score based on context words, (LemmaContextAwareEnhancer will be created
|
39 |
+
by default if None passed)
|
40 |
+
"""
|
41 |
+
|
42 |
+
def __init__(
|
43 |
+
self,
|
44 |
+
registry: RecognizerRegistry = None,
|
45 |
+
nlp_engine: NlpEngine = None,
|
46 |
+
app_tracer: AppTracer = None,
|
47 |
+
log_decision_process: bool = False,
|
48 |
+
default_score_threshold: float = 0,
|
49 |
+
supported_languages: List[str] = None,
|
50 |
+
context_aware_enhancer: Optional[ContextAwareEnhancer] = None,
|
51 |
+
):
|
52 |
+
if not supported_languages:
|
53 |
+
supported_languages = ["en"]
|
54 |
+
|
55 |
+
if not nlp_engine:
|
56 |
+
logger.info("nlp_engine not provided, creating default.")
|
57 |
+
provider = NlpEngineProvider()
|
58 |
+
nlp_engine = provider.create_engine()
|
59 |
+
|
60 |
+
if not registry:
|
61 |
+
logger.info("registry not provided, creating default.")
|
62 |
+
registry = RecognizerRegistry()
|
63 |
+
if not app_tracer:
|
64 |
+
app_tracer = AppTracer()
|
65 |
+
self.app_tracer = app_tracer
|
66 |
+
|
67 |
+
self.supported_languages = supported_languages
|
68 |
+
|
69 |
+
self.nlp_engine = nlp_engine
|
70 |
+
self.registry = registry
|
71 |
+
|
72 |
+
# load all recognizers
|
73 |
+
if not registry.recognizers:
|
74 |
+
registry.load_predefined_recognizers(
|
75 |
+
nlp_engine=self.nlp_engine, languages=self.supported_languages
|
76 |
+
)
|
77 |
+
|
78 |
+
self.log_decision_process = log_decision_process
|
79 |
+
self.default_score_threshold = default_score_threshold
|
80 |
+
|
81 |
+
if not context_aware_enhancer:
|
82 |
+
logger.debug(
|
83 |
+
"context aware enhancer not provided, creating default"
|
84 |
+
+ " lemma based enhancer."
|
85 |
+
)
|
86 |
+
context_aware_enhancer = LemmaContextAwareEnhancer()
|
87 |
+
|
88 |
+
self.context_aware_enhancer = context_aware_enhancer
|
89 |
+
|
90 |
+
def get_recognizers(self, language: Optional[str] = None) -> List[EntityRecognizer]:
|
91 |
+
"""
|
92 |
+
Return a list of PII recognizers currently loaded.
|
93 |
+
|
94 |
+
:param language: Return the recognizers supporting a given language.
|
95 |
+
:return: List of [Recognizer] as a RecognizersAllResponse
|
96 |
+
"""
|
97 |
+
if not language:
|
98 |
+
languages = self.supported_languages
|
99 |
+
else:
|
100 |
+
languages = [language]
|
101 |
+
|
102 |
+
recognizers = []
|
103 |
+
for language in languages:
|
104 |
+
logger.info(f"Fetching all recognizers for language {language}")
|
105 |
+
recognizers.extend(
|
106 |
+
self.registry.get_recognizers(language=language, all_fields=True)
|
107 |
+
)
|
108 |
+
|
109 |
+
return list(set(recognizers))
|
110 |
+
|
111 |
+
def get_supported_entities(self, language: Optional[str] = None) -> List[str]:
|
112 |
+
"""
|
113 |
+
Return a list of the entities that can be detected.
|
114 |
+
|
115 |
+
:param language: Return only entities supported in a specific language.
|
116 |
+
:return: List of entity names
|
117 |
+
"""
|
118 |
+
recognizers = self.get_recognizers(language=language)
|
119 |
+
supported_entities = []
|
120 |
+
for recognizer in recognizers:
|
121 |
+
supported_entities.extend(recognizer.get_supported_entities())
|
122 |
+
|
123 |
+
return list(set(supported_entities))
|
124 |
+
|
125 |
+
def analyze(
|
126 |
+
self,
|
127 |
+
text: str,
|
128 |
+
language: str,
|
129 |
+
entities: Optional[List[str]] = None,
|
130 |
+
correlation_id: Optional[str] = None,
|
131 |
+
score_threshold: Optional[float] = None,
|
132 |
+
return_decision_process: Optional[bool] = False,
|
133 |
+
ad_hoc_recognizers: Optional[List[EntityRecognizer]] = None,
|
134 |
+
context: Optional[List[str]] = None,
|
135 |
+
allow_list: Optional[List[str]] = None,
|
136 |
+
nlp_artifacts: Optional[NlpArtifacts] = None,
|
137 |
+
) -> List[RecognizerResult]:
|
138 |
+
"""
|
139 |
+
Find PII entities in text using different PII recognizers for a given language.
|
140 |
+
|
141 |
+
:param text: the text to analyze
|
142 |
+
:param language: the language of the text
|
143 |
+
:param entities: List of PII entities that should be looked for in the text.
|
144 |
+
If entities=None then all entities are looked for.
|
145 |
+
:param correlation_id: cross call ID for this request
|
146 |
+
:param score_threshold: A minimum value for which
|
147 |
+
to return an identified entity
|
148 |
+
:param return_decision_process: Whether the analysis decision process steps
|
149 |
+
returned in the response.
|
150 |
+
:param ad_hoc_recognizers: List of recognizers which will be used only
|
151 |
+
for this specific request.
|
152 |
+
:param context: List of context words to enhance confidence score if matched
|
153 |
+
with the recognized entity's recognizer context
|
154 |
+
:param allow_list: List of words that the user defines as being allowed to keep
|
155 |
+
in the text
|
156 |
+
:param nlp_artifacts: precomputed NlpArtifacts
|
157 |
+
:return: an array of the found entities in the text
|
158 |
+
|
159 |
+
:example:
|
160 |
+
|
161 |
+
>>> from presidio_analyzer import AnalyzerEngine
|
162 |
+
|
163 |
+
>>> # Set up the engine, loads the NLP module (spaCy model by default)
|
164 |
+
>>> # and other PII recognizers
|
165 |
+
>>> analyzer = AnalyzerEngine()
|
166 |
+
|
167 |
+
>>> # Call analyzer to get results
|
168 |
+
>>> results = analyzer.analyze(text='My phone number is 212-555-5555', entities=['PHONE_NUMBER'], language='en') # noqa D501
|
169 |
+
>>> print(results)
|
170 |
+
[type: PHONE_NUMBER, start: 19, end: 31, score: 0.85]
|
171 |
+
"""
|
172 |
+
all_fields = not entities
|
173 |
+
|
174 |
+
recognizers = self.registry.get_recognizers(
|
175 |
+
language=language,
|
176 |
+
entities=entities,
|
177 |
+
all_fields=all_fields,
|
178 |
+
ad_hoc_recognizers=ad_hoc_recognizers,
|
179 |
+
)
|
180 |
+
|
181 |
+
if all_fields:
|
182 |
+
# Since all_fields=True, list all entities by iterating
|
183 |
+
# over all recognizers
|
184 |
+
entities = self.get_supported_entities(language=language)
|
185 |
+
|
186 |
+
# run the nlp pipeline over the given text, store the results in
|
187 |
+
# a NlpArtifacts instance
|
188 |
+
if not nlp_artifacts:
|
189 |
+
nlp_artifacts = self.nlp_engine.process_text(text, language)
|
190 |
+
|
191 |
+
if self.log_decision_process:
|
192 |
+
self.app_tracer.trace(
|
193 |
+
correlation_id, "nlp artifacts:" + nlp_artifacts.to_json()
|
194 |
+
)
|
195 |
+
|
196 |
+
results = []
|
197 |
+
for recognizer in recognizers:
|
198 |
+
# Lazy loading of the relevant recognizers
|
199 |
+
if not recognizer.is_loaded:
|
200 |
+
recognizer.load()
|
201 |
+
recognizer.is_loaded = True
|
202 |
+
|
203 |
+
# analyze using the current recognizer and append the results
|
204 |
+
current_results = recognizer.analyze(
|
205 |
+
text=text, entities=entities, nlp_artifacts=nlp_artifacts
|
206 |
+
)
|
207 |
+
|
208 |
+
if current_results:
|
209 |
+
# add recognizer name to recognition metadata inside results
|
210 |
+
# if not exists
|
211 |
+
self.__add_recognizer_id_if_not_exists(current_results, recognizer)
|
212 |
+
results.extend(current_results)
|
213 |
+
|
214 |
+
|
215 |
+
results = self._enhance_using_context(
|
216 |
+
text, results, nlp_artifacts, recognizers, context
|
217 |
+
)
|
218 |
+
|
219 |
+
if self.log_decision_process:
|
220 |
+
self.app_tracer.trace(
|
221 |
+
correlation_id,
|
222 |
+
json.dumps([str(result.to_dict()) for result in results]),
|
223 |
+
)
|
224 |
+
|
225 |
+
# Remove duplicates or low score results
|
226 |
+
results = EntityRecognizer.remove_duplicates(results)
|
227 |
+
results = self.__remove_low_scores(results, score_threshold)
|
228 |
+
|
229 |
+
if allow_list:
|
230 |
+
results = self._remove_allow_list(results, allow_list, text)
|
231 |
+
|
232 |
+
if not return_decision_process:
|
233 |
+
results = self.__remove_decision_process(results)
|
234 |
+
|
235 |
+
return results
|
236 |
+
|
237 |
+
def _enhance_using_context(
|
238 |
+
self,
|
239 |
+
text: str,
|
240 |
+
raw_results: List[RecognizerResult],
|
241 |
+
nlp_artifacts: NlpArtifacts,
|
242 |
+
recognizers: List[EntityRecognizer],
|
243 |
+
context: Optional[List[str]] = None,
|
244 |
+
) -> List[RecognizerResult]:
|
245 |
+
"""
|
246 |
+
Enhance confidence score using context words.
|
247 |
+
|
248 |
+
:param text: The actual text that was analyzed
|
249 |
+
:param raw_results: Recognizer results which didn't take
|
250 |
+
context into consideration
|
251 |
+
:param nlp_artifacts: The nlp artifacts contains elements
|
252 |
+
such as lemmatized tokens for better
|
253 |
+
accuracy of the context enhancement process
|
254 |
+
:param recognizers: the list of recognizers
|
255 |
+
:param context: list of context words
|
256 |
+
"""
|
257 |
+
results = []
|
258 |
+
|
259 |
+
for recognizer in recognizers:
|
260 |
+
recognizer_results = [
|
261 |
+
r
|
262 |
+
for r in raw_results
|
263 |
+
if r.recognition_metadata[RecognizerResult.RECOGNIZER_IDENTIFIER_KEY]
|
264 |
+
== recognizer.id
|
265 |
+
]
|
266 |
+
other_recognizer_results = [
|
267 |
+
r
|
268 |
+
for r in raw_results
|
269 |
+
if r.recognition_metadata[RecognizerResult.RECOGNIZER_IDENTIFIER_KEY]
|
270 |
+
!= recognizer.id
|
271 |
+
]
|
272 |
+
|
273 |
+
# enhance score using context in recognizer level if implemented
|
274 |
+
recognizer_results = recognizer.enhance_using_context(
|
275 |
+
text=text,
|
276 |
+
# each recognizer will get access to all recognizer results
|
277 |
+
# to allow related entities contex enhancement
|
278 |
+
raw_recognizer_results=recognizer_results,
|
279 |
+
other_raw_recognizer_results=other_recognizer_results,
|
280 |
+
nlp_artifacts=nlp_artifacts,
|
281 |
+
context=context,
|
282 |
+
)
|
283 |
+
|
284 |
+
results.extend(recognizer_results)
|
285 |
+
|
286 |
+
# Update results in case surrounding words or external context are relevant to
|
287 |
+
# the context words.
|
288 |
+
results = self.context_aware_enhancer.enhance_using_context(
|
289 |
+
text=text,
|
290 |
+
raw_results=results,
|
291 |
+
nlp_artifacts=nlp_artifacts,
|
292 |
+
recognizers=recognizers,
|
293 |
+
context=context,
|
294 |
+
)
|
295 |
+
|
296 |
+
return results
|
297 |
+
|
298 |
+
def __remove_low_scores(
|
299 |
+
self, results: List[RecognizerResult], score_threshold: float = None
|
300 |
+
) -> List[RecognizerResult]:
|
301 |
+
"""
|
302 |
+
Remove results for which the confidence is lower than the threshold.
|
303 |
+
|
304 |
+
:param results: List of RecognizerResult
|
305 |
+
:param score_threshold: float value for minimum possible confidence
|
306 |
+
:return: List[RecognizerResult]
|
307 |
+
"""
|
308 |
+
if score_threshold is None:
|
309 |
+
score_threshold = self.default_score_threshold
|
310 |
+
|
311 |
+
new_results = [result for result in results if result.score >= score_threshold]
|
312 |
+
return new_results
|
313 |
+
|
314 |
+
@staticmethod
|
315 |
+
def _remove_allow_list(
|
316 |
+
results: List[RecognizerResult], allow_list: List[str], text: str
|
317 |
+
) -> List[RecognizerResult]:
|
318 |
+
"""
|
319 |
+
Remove results which are part of the allow list.
|
320 |
+
|
321 |
+
:param results: List of RecognizerResult
|
322 |
+
:param allow_list: list of allowed terms
|
323 |
+
:param text: the text to analyze
|
324 |
+
:return: List[RecognizerResult]
|
325 |
+
"""
|
326 |
+
new_results = []
|
327 |
+
for result in results:
|
328 |
+
word = text[result.start : result.end]
|
329 |
+
# if the word is not specified to be allowed, keep in the PII entities
|
330 |
+
if word not in allow_list:
|
331 |
+
new_results.append(result)
|
332 |
+
|
333 |
+
return new_results
|
334 |
+
|
335 |
+
@staticmethod
|
336 |
+
def __add_recognizer_id_if_not_exists(
|
337 |
+
results: List[RecognizerResult], recognizer: EntityRecognizer
|
338 |
+
):
|
339 |
+
"""Ensure recognition metadata with recognizer id existence.
|
340 |
+
|
341 |
+
Ensure recognizer result list contains recognizer id inside recognition
|
342 |
+
metadata dictionary, and if not create it. recognizer_id is needed
|
343 |
+
for context aware enhancement.
|
344 |
+
|
345 |
+
:param results: List of RecognizerResult
|
346 |
+
:param recognizer: Entity recognizer
|
347 |
+
"""
|
348 |
+
for result in results:
|
349 |
+
if not result.recognition_metadata:
|
350 |
+
result.recognition_metadata = dict()
|
351 |
+
if (
|
352 |
+
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
|
353 |
+
not in result.recognition_metadata
|
354 |
+
):
|
355 |
+
result.recognition_metadata[
|
356 |
+
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
|
357 |
+
] = recognizer.id
|
358 |
+
if RecognizerResult.RECOGNIZER_NAME_KEY not in result.recognition_metadata:
|
359 |
+
result.recognition_metadata[
|
360 |
+
RecognizerResult.RECOGNIZER_NAME_KEY
|
361 |
+
] = recognizer.name
|
362 |
+
|
363 |
+
@staticmethod
|
364 |
+
def __remove_decision_process(
|
365 |
+
results: List[RecognizerResult],
|
366 |
+
) -> List[RecognizerResult]:
|
367 |
+
"""Remove decision process / analysis explanation from response."""
|
368 |
+
|
369 |
+
for result in results:
|
370 |
+
result.analysis_explanation = None
|
371 |
+
|
372 |
+
return results
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/analyzer_request.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict
|
2 |
+
|
3 |
+
from presidio_analyzer import PatternRecognizer
|
4 |
+
|
5 |
+
|
6 |
+
class AnalyzerRequest:
|
7 |
+
"""
|
8 |
+
Analyzer request data.
|
9 |
+
|
10 |
+
:param req_data: A request dictionary with the following fields:
|
11 |
+
text: the text to analyze
|
12 |
+
language: the language of the text
|
13 |
+
entities: List of PII entities that should be looked for in the text.
|
14 |
+
If entities=None then all entities are looked for.
|
15 |
+
correlation_id: cross call ID for this request
|
16 |
+
score_threshold: A minimum value for which to return an identified entity
|
17 |
+
log_decision_process: Should the decision points within the analysis
|
18 |
+
be logged
|
19 |
+
return_decision_process: Should the decision points within the analysis
|
20 |
+
returned as part of the response
|
21 |
+
"""
|
22 |
+
|
23 |
+
def __init__(self, req_data: Dict):
|
24 |
+
self.text = req_data.get("text")
|
25 |
+
self.language = req_data.get("language")
|
26 |
+
self.entities = req_data.get("entities")
|
27 |
+
self.correlation_id = req_data.get("correlation_id")
|
28 |
+
self.score_threshold = req_data.get("score_threshold")
|
29 |
+
self.return_decision_process = req_data.get("return_decision_process")
|
30 |
+
ad_hoc_recognizers = req_data.get("ad_hoc_recognizers")
|
31 |
+
self.ad_hoc_recognizers = []
|
32 |
+
if ad_hoc_recognizers:
|
33 |
+
self.ad_hoc_recognizers = [
|
34 |
+
PatternRecognizer.from_dict(rec) for rec in ad_hoc_recognizers
|
35 |
+
]
|
36 |
+
self.context = req_data.get("context")
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/app_tracer.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
|
4 |
+
class AppTracer:
|
5 |
+
"""
|
6 |
+
Allow logging/tracing the system's decisions.
|
7 |
+
|
8 |
+
Relevant in cases where we want to know which modules were used for detection,
|
9 |
+
which logic was utilized, what results were given and potentially why.
|
10 |
+
This can be useful for analyzing the detection accuracy of the system.
|
11 |
+
:param enabled: Whether tracing should be activated.
|
12 |
+
"""
|
13 |
+
|
14 |
+
def __init__(self, enabled: bool = True):
|
15 |
+
self.logger = logging.getLogger("decision_process")
|
16 |
+
self.enabled = enabled
|
17 |
+
|
18 |
+
def trace(self, request_id: str, trace_data: str) -> None:
|
19 |
+
"""
|
20 |
+
Write a value associated with a decision for a specific request into the trace.
|
21 |
+
|
22 |
+
Tracing for further inspection if needed.
|
23 |
+
:param request_id: A unique ID, to correlate across calls.
|
24 |
+
:param trace_data: A string to write to the log.
|
25 |
+
"""
|
26 |
+
if self.enabled:
|
27 |
+
self.logger.info("[%s][%s]", request_id, trace_data)
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/batch_analyzer_engine.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from typing import List, Iterable, Dict, Union, Any, Optional, Iterator, Tuple
|
3 |
+
|
4 |
+
from presidio_analyzer import DictAnalyzerResult, RecognizerResult, AnalyzerEngine
|
5 |
+
from presidio_analyzer.nlp_engine import NlpArtifacts
|
6 |
+
|
7 |
+
logger = logging.getLogger("presidio_analyzer")
|
8 |
+
|
9 |
+
|
10 |
+
class BatchAnalyzerEngine:
|
11 |
+
"""
|
12 |
+
Batch analysis of documents (tables, lists, dicts).
|
13 |
+
|
14 |
+
Wrapper class to run Presidio Analyzer Engine on multiple values,
|
15 |
+
either lists/iterators of strings, or dictionaries.
|
16 |
+
|
17 |
+
:param: analyzer_engine: AnalyzerEngine instance to use
|
18 |
+
for handling the values in those collections.
|
19 |
+
"""
|
20 |
+
|
21 |
+
def __init__(self, analyzer_engine: Optional[AnalyzerEngine] = None):
|
22 |
+
|
23 |
+
self.analyzer_engine = analyzer_engine
|
24 |
+
if not analyzer_engine:
|
25 |
+
self.analyzer_engine = AnalyzerEngine()
|
26 |
+
|
27 |
+
def analyze_iterator(
|
28 |
+
self,
|
29 |
+
texts: Iterable[Union[str, bool, float, int]],
|
30 |
+
language: str,
|
31 |
+
**kwargs,
|
32 |
+
) -> List[List[RecognizerResult]]:
|
33 |
+
"""
|
34 |
+
Analyze an iterable of strings.
|
35 |
+
|
36 |
+
:param texts: An list containing strings to be analyzed.
|
37 |
+
:param language: Input language
|
38 |
+
:param kwargs: Additional parameters for the `AnalyzerEngine.analyze` method.
|
39 |
+
"""
|
40 |
+
|
41 |
+
# validate types
|
42 |
+
texts = self._validate_types(texts)
|
43 |
+
|
44 |
+
# Process the texts as batch for improved performance
|
45 |
+
nlp_artifacts_batch: Iterator[
|
46 |
+
Tuple[str, NlpArtifacts]
|
47 |
+
] = self.analyzer_engine.nlp_engine.process_batch(
|
48 |
+
texts=texts, language=language
|
49 |
+
)
|
50 |
+
|
51 |
+
list_results = []
|
52 |
+
for text, nlp_artifacts in nlp_artifacts_batch:
|
53 |
+
results = self.analyzer_engine.analyze(
|
54 |
+
text=str(text), nlp_artifacts=nlp_artifacts, language=language, **kwargs
|
55 |
+
)
|
56 |
+
|
57 |
+
list_results.append(results)
|
58 |
+
|
59 |
+
return list_results
|
60 |
+
|
61 |
+
def analyze_dict(
|
62 |
+
self,
|
63 |
+
input_dict: Dict[str, Union[Any, Iterable[Any]]],
|
64 |
+
language: str,
|
65 |
+
keys_to_skip: Optional[List[str]] = None,
|
66 |
+
**kwargs,
|
67 |
+
) -> Iterator[DictAnalyzerResult]:
|
68 |
+
"""
|
69 |
+
Analyze a dictionary of keys (strings) and values/iterable of values.
|
70 |
+
|
71 |
+
Non-string values are returned as is.
|
72 |
+
|
73 |
+
:param input_dict: The input dictionary for analysis
|
74 |
+
:param language: Input language
|
75 |
+
:param keys_to_skip: Keys to ignore during analysis
|
76 |
+
:param kwargs: Additional keyword arguments
|
77 |
+
for the `AnalyzerEngine.analyze` method.
|
78 |
+
Use this to pass arguments to the analyze method,
|
79 |
+
such as `ad_hoc_recognizers`, `context`, `return_decision_process`.
|
80 |
+
See `AnalyzerEngine.analyze` for the full list.
|
81 |
+
"""
|
82 |
+
|
83 |
+
context = []
|
84 |
+
if "context" in kwargs:
|
85 |
+
context = kwargs["context"]
|
86 |
+
del kwargs["context"]
|
87 |
+
|
88 |
+
if not keys_to_skip:
|
89 |
+
keys_to_skip = []
|
90 |
+
|
91 |
+
for key, value in input_dict.items():
|
92 |
+
if not value or key in keys_to_skip:
|
93 |
+
yield DictAnalyzerResult(key=key, value=value, recognizer_results=[])
|
94 |
+
continue # skip this key as requested
|
95 |
+
|
96 |
+
# Add the key as an additional context
|
97 |
+
specific_context = context[:]
|
98 |
+
specific_context.append(key)
|
99 |
+
|
100 |
+
if type(value) in (str, int, bool, float):
|
101 |
+
results: List[RecognizerResult] = self.analyzer_engine.analyze(
|
102 |
+
text=str(value), language=language, context=[key], **kwargs
|
103 |
+
)
|
104 |
+
elif isinstance(value, dict):
|
105 |
+
new_keys_to_skip = self._get_nested_keys_to_skip(key, keys_to_skip)
|
106 |
+
results = self.analyze_dict(
|
107 |
+
input_dict=value,
|
108 |
+
language=language,
|
109 |
+
context=specific_context,
|
110 |
+
keys_to_skip=new_keys_to_skip,
|
111 |
+
**kwargs,
|
112 |
+
)
|
113 |
+
elif isinstance(value, Iterable):
|
114 |
+
# Recursively iterate nested dicts
|
115 |
+
|
116 |
+
results: List[List[RecognizerResult]] = self.analyze_iterator(
|
117 |
+
texts=value,
|
118 |
+
language=language,
|
119 |
+
context=specific_context,
|
120 |
+
**kwargs,
|
121 |
+
)
|
122 |
+
else:
|
123 |
+
raise ValueError(f"type {type(value)} is unsupported.")
|
124 |
+
|
125 |
+
yield DictAnalyzerResult(key=key, value=value, recognizer_results=results)
|
126 |
+
|
127 |
+
@staticmethod
|
128 |
+
def _validate_types(value_iterator: Iterable[Any]) -> Iterator[Any]:
|
129 |
+
for val in value_iterator:
|
130 |
+
if val and not type(val) in (int, float, bool, str):
|
131 |
+
err_msg = (
|
132 |
+
"Analyzer.analyze_iterator only works "
|
133 |
+
"on primitive types (int, float, bool, str). "
|
134 |
+
"Lists of objects are not yet supported."
|
135 |
+
)
|
136 |
+
logger.error(err_msg)
|
137 |
+
raise ValueError(err_msg)
|
138 |
+
yield val
|
139 |
+
|
140 |
+
@staticmethod
|
141 |
+
def _get_nested_keys_to_skip(key, keys_to_skip):
|
142 |
+
new_keys_to_skip = [
|
143 |
+
k.replace(f"{key}.", "") for k in keys_to_skip if k.startswith(key)
|
144 |
+
]
|
145 |
+
return new_keys_to_skip
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/dict_analyzer_result.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import List, Union, Iterator
|
3 |
+
|
4 |
+
from presidio_analyzer import RecognizerResult
|
5 |
+
|
6 |
+
|
7 |
+
@dataclass
|
8 |
+
class DictAnalyzerResult:
|
9 |
+
"""
|
10 |
+
Data class for holding the output of the Presidio Analyzer on dictionaries.
|
11 |
+
|
12 |
+
:param key: key in dictionary
|
13 |
+
:param value: value to run analysis on (either string or list of strings)
|
14 |
+
:param recognizer_results: Analyzer output for one value.
|
15 |
+
Could be either:
|
16 |
+
- A list of recognizer results if the input is one string
|
17 |
+
- A list of lists of recognizer results, if the input is a list of strings.
|
18 |
+
- An iterator of a DictAnalyzerResult, if the input is a dictionary.
|
19 |
+
In this case the recognizer_results would be the iterator
|
20 |
+
of the DictAnalyzerResults next level in the dictionary.
|
21 |
+
"""
|
22 |
+
|
23 |
+
key: str
|
24 |
+
value: Union[str, List[str], dict]
|
25 |
+
recognizer_results: Union[
|
26 |
+
List[RecognizerResult],
|
27 |
+
List[List[RecognizerResult]],
|
28 |
+
Iterator["DictAnalyzerResult"],
|
29 |
+
]
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/entity_recognizer.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from abc import abstractmethod
|
3 |
+
from typing import List, Dict, Optional
|
4 |
+
|
5 |
+
from presidio_analyzer import RecognizerResult
|
6 |
+
from presidio_analyzer.nlp_engine import NlpArtifacts
|
7 |
+
|
8 |
+
logger = logging.getLogger("presidio_analyzer")
|
9 |
+
|
10 |
+
|
11 |
+
class EntityRecognizer:
|
12 |
+
"""
|
13 |
+
A class representing an abstract PII entity recognizer.
|
14 |
+
|
15 |
+
EntityRecognizer is an abstract class to be inherited by
|
16 |
+
Recognizers which hold the logic for recognizing specific PII entities.
|
17 |
+
|
18 |
+
EntityRecognizer exposes a method called enhance_using_context which
|
19 |
+
can be overridden in case a custom context aware enhancement is needed
|
20 |
+
in derived class of a recognizer.
|
21 |
+
|
22 |
+
:param supported_entities: the entities supported by this recognizer
|
23 |
+
(for example, phone number, address, etc.)
|
24 |
+
:param supported_language: the language supported by this recognizer.
|
25 |
+
The supported langauge code is iso6391Name
|
26 |
+
:param name: the name of this recognizer (optional)
|
27 |
+
:param version: the recognizer current version
|
28 |
+
:param context: a list of words which can help boost confidence score
|
29 |
+
when they appear in context of the matched entity
|
30 |
+
"""
|
31 |
+
|
32 |
+
MIN_SCORE = 0
|
33 |
+
MAX_SCORE = 1.0
|
34 |
+
|
35 |
+
def __init__(
|
36 |
+
self,
|
37 |
+
supported_entities: List[str],
|
38 |
+
name: str = None,
|
39 |
+
supported_language: str = "en",
|
40 |
+
version: str = "0.0.1",
|
41 |
+
context: Optional[List[str]] = None,
|
42 |
+
):
|
43 |
+
|
44 |
+
self.supported_entities = supported_entities
|
45 |
+
|
46 |
+
if name is None:
|
47 |
+
self.name = self.__class__.__name__ # assign class name as name
|
48 |
+
else:
|
49 |
+
self.name = name
|
50 |
+
|
51 |
+
self._id = f"{self.name}_{id(self)}"
|
52 |
+
|
53 |
+
self.supported_language = supported_language
|
54 |
+
self.version = version
|
55 |
+
self.is_loaded = False
|
56 |
+
self.context = context if context else []
|
57 |
+
|
58 |
+
self.load()
|
59 |
+
logger.info("Loaded recognizer: %s", self.name)
|
60 |
+
self.is_loaded = True
|
61 |
+
|
62 |
+
@property
|
63 |
+
def id(self):
|
64 |
+
"""Return a unique identifier of this recognizer."""
|
65 |
+
|
66 |
+
return self._id
|
67 |
+
|
68 |
+
@abstractmethod
|
69 |
+
def load(self) -> None:
|
70 |
+
"""
|
71 |
+
Initialize the recognizer assets if needed.
|
72 |
+
|
73 |
+
(e.g. machine learning models)
|
74 |
+
"""
|
75 |
+
|
76 |
+
@abstractmethod
|
77 |
+
def analyze(
|
78 |
+
self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts
|
79 |
+
) -> List[RecognizerResult]:
|
80 |
+
"""
|
81 |
+
Analyze text to identify entities.
|
82 |
+
|
83 |
+
:param text: The text to be analyzed
|
84 |
+
:param entities: The list of entities this recognizer is able to detect
|
85 |
+
:param nlp_artifacts: A group of attributes which are the result of
|
86 |
+
an NLP process over the input text.
|
87 |
+
:return: List of results detected by this recognizer.
|
88 |
+
"""
|
89 |
+
return None
|
90 |
+
|
91 |
+
def enhance_using_context(
|
92 |
+
self,
|
93 |
+
text: str,
|
94 |
+
raw_recognizer_results: List[RecognizerResult],
|
95 |
+
other_raw_recognizer_results: List[RecognizerResult],
|
96 |
+
nlp_artifacts: NlpArtifacts,
|
97 |
+
context: Optional[List[str]] = None,
|
98 |
+
) -> List[RecognizerResult]:
|
99 |
+
"""Enhance confidence score using context of the entity.
|
100 |
+
|
101 |
+
Override this method in derived class in case a custom logic
|
102 |
+
is needed, otherwise return value will be equal to
|
103 |
+
raw_results.
|
104 |
+
|
105 |
+
in case a result score is boosted, derived class need to update
|
106 |
+
result.recognition_metadata[RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY]
|
107 |
+
|
108 |
+
:param text: The actual text that was analyzed
|
109 |
+
:param raw_recognizer_results: This recognizer's results, to be updated
|
110 |
+
based on recognizer specific context.
|
111 |
+
:param other_raw_recognizer_results: Other recognizer results matched in
|
112 |
+
the given text to allow related entity context enhancement
|
113 |
+
:param nlp_artifacts: The nlp artifacts contains elements
|
114 |
+
such as lemmatized tokens for better
|
115 |
+
accuracy of the context enhancement process
|
116 |
+
:param context: list of context words
|
117 |
+
"""
|
118 |
+
return raw_recognizer_results
|
119 |
+
|
120 |
+
def get_supported_entities(self) -> List[str]:
|
121 |
+
"""
|
122 |
+
Return the list of entities this recognizer can identify.
|
123 |
+
|
124 |
+
:return: A list of the supported entities by this recognizer
|
125 |
+
"""
|
126 |
+
return self.supported_entities
|
127 |
+
|
128 |
+
def get_supported_language(self) -> str:
|
129 |
+
"""
|
130 |
+
Return the language this recognizer can support.
|
131 |
+
|
132 |
+
:return: A list of the supported language by this recognizer
|
133 |
+
"""
|
134 |
+
return self.supported_language
|
135 |
+
|
136 |
+
def get_version(self) -> str:
|
137 |
+
"""
|
138 |
+
Return the version of this recognizer.
|
139 |
+
|
140 |
+
:return: The current version of this recognizer
|
141 |
+
"""
|
142 |
+
return self.version
|
143 |
+
|
144 |
+
def to_dict(self) -> Dict:
|
145 |
+
"""
|
146 |
+
Serialize self to dictionary.
|
147 |
+
|
148 |
+
:return: a dictionary
|
149 |
+
"""
|
150 |
+
return_dict = {
|
151 |
+
"supported_entities": self.supported_entities,
|
152 |
+
"supported_language": self.supported_language,
|
153 |
+
"name": self.name,
|
154 |
+
"version": self.version,
|
155 |
+
}
|
156 |
+
return return_dict
|
157 |
+
|
158 |
+
@classmethod
|
159 |
+
def from_dict(cls, entity_recognizer_dict: Dict) -> "EntityRecognizer":
|
160 |
+
"""
|
161 |
+
Create EntityRecognizer from a dict input.
|
162 |
+
|
163 |
+
:param entity_recognizer_dict: Dict containing keys and values for instantiation
|
164 |
+
"""
|
165 |
+
return cls(**entity_recognizer_dict)
|
166 |
+
|
167 |
+
@staticmethod
|
168 |
+
def remove_duplicates(results: List[RecognizerResult]) -> List[RecognizerResult]:
|
169 |
+
"""
|
170 |
+
Remove duplicate results.
|
171 |
+
|
172 |
+
Remove duplicates in case the two results
|
173 |
+
have identical start and ends and types.
|
174 |
+
:param results: List[RecognizerResult]
|
175 |
+
:return: List[RecognizerResult]
|
176 |
+
"""
|
177 |
+
results = list(set(results))
|
178 |
+
results = sorted(results, key=lambda x: (-x.score, x.start, -(x.end - x.start)))
|
179 |
+
filtered_results = []
|
180 |
+
|
181 |
+
for result in results:
|
182 |
+
if result.score == 0:
|
183 |
+
continue
|
184 |
+
|
185 |
+
to_keep = result not in filtered_results # equals based comparison
|
186 |
+
if to_keep:
|
187 |
+
for filtered in filtered_results:
|
188 |
+
# If result is contained in one of the other results
|
189 |
+
if (
|
190 |
+
result.contained_in(filtered)
|
191 |
+
and result.entity_type == filtered.entity_type
|
192 |
+
):
|
193 |
+
to_keep = False
|
194 |
+
break
|
195 |
+
|
196 |
+
if to_keep:
|
197 |
+
filtered_results.append(result)
|
198 |
+
|
199 |
+
return filtered_results
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/local_recognizer.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC
|
2 |
+
|
3 |
+
from presidio_analyzer import EntityRecognizer
|
4 |
+
|
5 |
+
|
6 |
+
class LocalRecognizer(ABC, EntityRecognizer):
|
7 |
+
"""PII entity recognizer which runs on the same process as the AnalyzerEngine."""
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/__init__.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""NLP engine package. Performs text pre-processing."""
|
2 |
+
|
3 |
+
from .nlp_artifacts import NlpArtifacts
|
4 |
+
from .nlp_engine import NlpEngine
|
5 |
+
from .spacy_nlp_engine import SpacyNlpEngine
|
6 |
+
from .client_nlp_engine import ClientNlpEngine
|
7 |
+
from .stanza_nlp_engine import StanzaNlpEngine
|
8 |
+
from .transformers_nlp_engine import TransformersNlpEngine
|
9 |
+
from .nlp_engine_provider import NlpEngineProvider
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
"NlpArtifacts",
|
13 |
+
"NlpEngine",
|
14 |
+
"SpacyNlpEngine",
|
15 |
+
"ClientNlpEngine",
|
16 |
+
"StanzaNlpEngine",
|
17 |
+
"NlpEngineProvider",
|
18 |
+
"TransformersNlpEngine",
|
19 |
+
]
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/client_nlp_engine.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
# import logging
|
3 |
+
|
4 |
+
try:
|
5 |
+
import client
|
6 |
+
import spacy_client
|
7 |
+
except ImportError:
|
8 |
+
client = None
|
9 |
+
|
10 |
+
from typing import Optional, Dict, Iterator, Tuple, Union, List
|
11 |
+
|
12 |
+
import spacy
|
13 |
+
from spacy.language import Language
|
14 |
+
from spacy.tokens import Doc
|
15 |
+
|
16 |
+
from presidio_analyzer.nlp_engine import NlpArtifacts, NlpEngine
|
17 |
+
|
18 |
+
logger = logging.getLogger("presidio_analyzer")
|
19 |
+
|
20 |
+
|
21 |
+
class ClientNlpEngine(NlpEngine):
|
22 |
+
"""
|
23 |
+
SpacyNlpEngine is an abstraction layer over the nlp module.
|
24 |
+
|
25 |
+
It provides processing functionality as well as other queries
|
26 |
+
on tokens.
|
27 |
+
The SpacyNlpEngine uses SpaCy as its NLP module
|
28 |
+
"""
|
29 |
+
|
30 |
+
|
31 |
+
engine_name="spacy"
|
32 |
+
|
33 |
+
is_available = bool(spacy)
|
34 |
+
|
35 |
+
|
36 |
+
def __init__(self, models: Optional[Dict[str, str]] = None):
|
37 |
+
"""
|
38 |
+
Initialize a wrapper on spaCy functionality.
|
39 |
+
|
40 |
+
:param models: Dictionary with the name of the spaCy model per language.
|
41 |
+
For example: models = {"en": "en_core_web_lg"}
|
42 |
+
"""
|
43 |
+
if not models:
|
44 |
+
models = {"en": "en_core_web_lg"}
|
45 |
+
logger.debug(f"Loading SpaCy models: {models.values()}")
|
46 |
+
|
47 |
+
self.nlp = {
|
48 |
+
lang_code: spacy.load(model_name, disable=["parser"])
|
49 |
+
for lang_code, model_name in models.items()
|
50 |
+
}
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
def process_text(self, text: str, language: str) -> NlpArtifacts:
|
55 |
+
"""Execute the SpaCy NLP pipeline on the given text and language."""
|
56 |
+
|
57 |
+
doc = self.nlp[language](text)
|
58 |
+
return self._doc_to_nlp_artifact(doc, language)
|
59 |
+
|
60 |
+
def process_batch(
|
61 |
+
self,
|
62 |
+
texts: Union[List[str], List[Tuple[str, object]]],
|
63 |
+
language: str,
|
64 |
+
as_tuples: bool = False,
|
65 |
+
) -> Iterator[Optional[NlpArtifacts]]:
|
66 |
+
"""Execute the NLP pipeline on a batch of texts using spacy pipe."""
|
67 |
+
texts = (str(text) for text in texts)
|
68 |
+
docs = self.nlp[language].pipe(texts, as_tuples=as_tuples)
|
69 |
+
for doc in docs:
|
70 |
+
yield doc.text, self._doc_to_nlp_artifact(doc, language)
|
71 |
+
|
72 |
+
def is_stopword(self, word: str, language: str) -> bool:
|
73 |
+
"""
|
74 |
+
Return true if the given word is a stop word.
|
75 |
+
|
76 |
+
(within the given language)
|
77 |
+
"""
|
78 |
+
return self.nlp[language].vocab[word].is_stop
|
79 |
+
|
80 |
+
def is_punct(self, word: str, language: str) -> bool:
|
81 |
+
"""
|
82 |
+
Return true if the given word is a punctuation word.
|
83 |
+
|
84 |
+
(within the given language).
|
85 |
+
"""
|
86 |
+
return self.nlp[language].vocab[word].is_punct
|
87 |
+
|
88 |
+
def get_nlp(self, language: str) -> Language:
|
89 |
+
"""
|
90 |
+
Return the language model loaded for a language.
|
91 |
+
|
92 |
+
:param language: Name of language
|
93 |
+
:return: Language model from spaCy
|
94 |
+
"""
|
95 |
+
return self.nlp[language]
|
96 |
+
|
97 |
+
def _doc_to_nlp_artifact(self, doc: Doc, language: str) -> NlpArtifacts:
|
98 |
+
lemmas = [token.lemma_ for token in doc]
|
99 |
+
tokens_indices = [token.idx for token in doc]
|
100 |
+
entities = doc.ents
|
101 |
+
return NlpArtifacts(
|
102 |
+
entities=entities,
|
103 |
+
tokens=doc,
|
104 |
+
tokens_indices=tokens_indices,
|
105 |
+
lemmas=lemmas,
|
106 |
+
nlp_engine=self,
|
107 |
+
language=language,
|
108 |
+
)
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/nlp_artifacts.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
from spacy.tokens import Doc, Span
|
5 |
+
|
6 |
+
|
7 |
+
class NlpArtifacts:
|
8 |
+
"""
|
9 |
+
NlpArtifacts is an abstraction layer over the results of an NLP pipeline.
|
10 |
+
|
11 |
+
processing over a given text, it holds attributes such as entities,
|
12 |
+
tokens and lemmas which can be used by any recognizer
|
13 |
+
"""
|
14 |
+
|
15 |
+
def __init__(
|
16 |
+
self,
|
17 |
+
entities: List[Span],
|
18 |
+
tokens: Doc,
|
19 |
+
tokens_indices: List[int],
|
20 |
+
lemmas: List[str],
|
21 |
+
nlp_engine, # noqa ANN001
|
22 |
+
language: str,
|
23 |
+
):
|
24 |
+
self.entities = entities
|
25 |
+
self.tokens = tokens
|
26 |
+
self.lemmas = lemmas
|
27 |
+
self.tokens_indices = tokens_indices
|
28 |
+
self.keywords = self.set_keywords(nlp_engine, lemmas, language)
|
29 |
+
self.nlp_engine = nlp_engine
|
30 |
+
|
31 |
+
@staticmethod
|
32 |
+
def set_keywords(
|
33 |
+
nlp_engine, lemmas: List[str], language: str # noqa ANN001
|
34 |
+
) -> List[str]:
|
35 |
+
"""
|
36 |
+
Return keywords fpr text.
|
37 |
+
|
38 |
+
Extracts lemmas with certain conditions as keywords.
|
39 |
+
"""
|
40 |
+
if not nlp_engine:
|
41 |
+
return []
|
42 |
+
keywords = [
|
43 |
+
k.lower()
|
44 |
+
for k in lemmas
|
45 |
+
if not nlp_engine.is_stopword(k, language)
|
46 |
+
and not nlp_engine.is_punct(k, language)
|
47 |
+
and k != "-PRON-"
|
48 |
+
and k != "be"
|
49 |
+
]
|
50 |
+
|
51 |
+
# best effort, try even further to break tokens into sub tokens,
|
52 |
+
# this can result in reducing false negatives
|
53 |
+
keywords = [i.split(":") for i in keywords]
|
54 |
+
|
55 |
+
# splitting the list can, if happened, will result in list of lists,
|
56 |
+
# we flatten the list
|
57 |
+
keywords = [item for sublist in keywords for item in sublist]
|
58 |
+
return keywords
|
59 |
+
|
60 |
+
def to_json(self) -> str:
|
61 |
+
"""Convert nlp artifacts to json."""
|
62 |
+
|
63 |
+
return_dict = self.__dict__.copy()
|
64 |
+
|
65 |
+
# Ignore NLP engine as it's not serializable currently
|
66 |
+
del return_dict["nlp_engine"]
|
67 |
+
|
68 |
+
# Converting spaCy tokens and spans to string as they are not serializable
|
69 |
+
if "tokens" in return_dict:
|
70 |
+
return_dict["tokens"] = [token.text for token in self.tokens]
|
71 |
+
if "entities" in return_dict:
|
72 |
+
return_dict["entities"] = [entity.text for entity in self.entities]
|
73 |
+
|
74 |
+
return json.dumps(return_dict)
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/nlp_engine.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
from typing import Iterable, Iterator, Tuple
|
3 |
+
|
4 |
+
from presidio_analyzer.nlp_engine import NlpArtifacts
|
5 |
+
|
6 |
+
|
7 |
+
class NlpEngine(ABC):
|
8 |
+
"""
|
9 |
+
NlpEngine is an abstraction layer over the nlp module.
|
10 |
+
|
11 |
+
It provides NLP preprocessing functionality as well as other queries
|
12 |
+
on tokens.
|
13 |
+
"""
|
14 |
+
|
15 |
+
@abstractmethod
|
16 |
+
def process_text(self, text: str, language: str) -> NlpArtifacts:
|
17 |
+
"""Execute the NLP pipeline on the given text and language."""
|
18 |
+
|
19 |
+
@abstractmethod
|
20 |
+
def process_batch(
|
21 |
+
self, texts: Iterable[str], language: str, **kwargs
|
22 |
+
) -> Iterator[Tuple[str, NlpArtifacts]]:
|
23 |
+
"""Execute the NLP pipeline on a batch of texts.
|
24 |
+
|
25 |
+
Returns a tuple of (text, NlpArtifacts)
|
26 |
+
"""
|
27 |
+
|
28 |
+
@abstractmethod
|
29 |
+
def is_stopword(self, word: str, language: str) -> bool:
|
30 |
+
"""
|
31 |
+
Return true if the given word is a stop word.
|
32 |
+
|
33 |
+
(within the given language)
|
34 |
+
"""
|
35 |
+
|
36 |
+
@abstractmethod
|
37 |
+
def is_punct(self, word: str, language: str) -> bool:
|
38 |
+
"""
|
39 |
+
Return true if the given word is a punctuation word.
|
40 |
+
|
41 |
+
(within the given language)
|
42 |
+
"""
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/spacy_nlp_engine.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from typing import Optional, Dict, Iterator, Tuple, Union, List
|
3 |
+
|
4 |
+
import spacy
|
5 |
+
from spacy.language import Language
|
6 |
+
from spacy.tokens import Doc
|
7 |
+
|
8 |
+
from presidio_analyzer.nlp_engine import NlpArtifacts, NlpEngine
|
9 |
+
|
10 |
+
logger = logging.getLogger("presidio_analyzer")
|
11 |
+
|
12 |
+
|
13 |
+
class SpacyNlpEngine(NlpEngine):
|
14 |
+
"""
|
15 |
+
SpacyNlpEngine is an abstraction layer over the nlp module.
|
16 |
+
|
17 |
+
It provides processing functionality as well as other queries
|
18 |
+
on tokens.
|
19 |
+
The SpacyNlpEngine uses SpaCy as its NLP module
|
20 |
+
"""
|
21 |
+
|
22 |
+
engine_name = "spacy"
|
23 |
+
is_available = bool(spacy)
|
24 |
+
|
25 |
+
|
26 |
+
def __init__(self, models: Optional[Dict[str, str]] = None):
|
27 |
+
"""
|
28 |
+
Initialize a wrapper on spaCy functionality.
|
29 |
+
|
30 |
+
:param models: Dictionary with the name of the spaCy model per language.
|
31 |
+
For example: models = {"en": "en_core_web_lg"}
|
32 |
+
"""
|
33 |
+
if not models:
|
34 |
+
models = {"en": "en_core_web_lg"}
|
35 |
+
logger.debug(f"Loading SpaCy models: {models.values()}")
|
36 |
+
|
37 |
+
self.nlp = {
|
38 |
+
lang_code: spacy.load(model_name, disable=["parser"])
|
39 |
+
for lang_code, model_name in models.items()
|
40 |
+
}
|
41 |
+
|
42 |
+
def process_text(self, text: str, language: str) -> NlpArtifacts:
|
43 |
+
"""Execute the SpaCy NLP pipeline on the given text and language."""
|
44 |
+
|
45 |
+
doc = self.nlp[language](text)
|
46 |
+
return self._doc_to_nlp_artifact(doc, language)
|
47 |
+
|
48 |
+
def process_batch(
|
49 |
+
self,
|
50 |
+
texts: Union[List[str], List[Tuple[str, object]]],
|
51 |
+
language: str,
|
52 |
+
as_tuples: bool = False,
|
53 |
+
) -> Iterator[Optional[NlpArtifacts]]:
|
54 |
+
"""Execute the NLP pipeline on a batch of texts using spacy pipe."""
|
55 |
+
texts = (str(text) for text in texts)
|
56 |
+
docs = self.nlp[language].pipe(texts, as_tuples=as_tuples)
|
57 |
+
for doc in docs:
|
58 |
+
yield doc.text, self._doc_to_nlp_artifact(doc, language)
|
59 |
+
|
60 |
+
def is_stopword(self, word: str, language: str) -> bool:
|
61 |
+
"""
|
62 |
+
Return true if the given word is a stop word.
|
63 |
+
|
64 |
+
(within the given language)
|
65 |
+
"""
|
66 |
+
return self.nlp[language].vocab[word].is_stop
|
67 |
+
|
68 |
+
def is_punct(self, word: str, language: str) -> bool:
|
69 |
+
"""
|
70 |
+
Return true if the given word is a punctuation word.
|
71 |
+
|
72 |
+
(within the given language).
|
73 |
+
"""
|
74 |
+
return self.nlp[language].vocab[word].is_punct
|
75 |
+
|
76 |
+
def get_nlp(self, language: str) -> Language:
|
77 |
+
"""
|
78 |
+
Return the language model loaded for a language.
|
79 |
+
|
80 |
+
:param language: Name of language
|
81 |
+
:return: Language model from spaCy
|
82 |
+
"""
|
83 |
+
return self.nlp[language]
|
84 |
+
|
85 |
+
def _doc_to_nlp_artifact(self, doc: Doc, language: str) -> NlpArtifacts:
|
86 |
+
lemmas = [token.lemma_ for token in doc]
|
87 |
+
tokens_indices = [token.idx for token in doc]
|
88 |
+
entities = doc.ents
|
89 |
+
return NlpArtifacts(
|
90 |
+
entities=entities,
|
91 |
+
tokens=doc,
|
92 |
+
tokens_indices=tokens_indices,
|
93 |
+
lemmas=lemmas,
|
94 |
+
nlp_engine=self,
|
95 |
+
language=language,
|
96 |
+
)
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/nlp_engine/stanza_nlp_engine.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
try:
|
4 |
+
import stanza
|
5 |
+
import spacy_stanza
|
6 |
+
except ImportError:
|
7 |
+
stanza = None
|
8 |
+
|
9 |
+
from presidio_analyzer.nlp_engine import SpacyNlpEngine
|
10 |
+
|
11 |
+
logger = logging.getLogger("presidio_analyzer")
|
12 |
+
|
13 |
+
|
14 |
+
class StanzaNlpEngine(SpacyNlpEngine):
|
15 |
+
"""
|
16 |
+
StanzaNlpEngine is an abstraction layer over the nlp module.
|
17 |
+
|
18 |
+
It provides processing functionality as well as other queries
|
19 |
+
on tokens.
|
20 |
+
The StanzaNlpEngine uses spacy-stanza and stanza as its NLP module
|
21 |
+
|
22 |
+
:param models: Dictionary with the name of the stanza model per language.
|
23 |
+
For example: models = {"en": "en"}
|
24 |
+
"""
|
25 |
+
|
26 |
+
engine_name = "stanza"
|
27 |
+
is_available = bool(stanza)
|
28 |
+
def __init__(self, models=None): # noqa ANN201
|
29 |
+
if not models:
|
30 |
+
models = {"en": "en"}
|
31 |
+
logger.debug(f"Loading Stanza models: {models.values()}")
|
32 |
+
|
33 |
+
self.nlp = {
|
34 |
+
lang_code: spacy_stanza.load_pipeline(
|
35 |
+
model_name,
|
36 |
+
processors="tokenize,pos,lemma,ner",
|
37 |
+
)
|
38 |
+
for lang_code, model_name in models.items()
|
39 |
+
}
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/pattern.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import Dict
|
3 |
+
|
4 |
+
|
5 |
+
class Pattern:
|
6 |
+
"""
|
7 |
+
A class that represents a regex pattern.
|
8 |
+
|
9 |
+
:param name: the name of the pattern
|
10 |
+
:param regex: the regex pattern to detect
|
11 |
+
:param score: the pattern's strength (values varies 0-1)
|
12 |
+
"""
|
13 |
+
|
14 |
+
def __init__(self, name: str, regex: str, score: float):
|
15 |
+
|
16 |
+
self.name = name
|
17 |
+
self.regex = regex
|
18 |
+
self.score = score
|
19 |
+
|
20 |
+
def to_dict(self) -> Dict:
|
21 |
+
"""
|
22 |
+
Turn this instance into a dictionary.
|
23 |
+
|
24 |
+
:return: a dictionary
|
25 |
+
"""
|
26 |
+
return_dict = {"name": self.name, "score": self.score, "regex": self.regex}
|
27 |
+
return return_dict
|
28 |
+
|
29 |
+
@classmethod
|
30 |
+
def from_dict(cls, pattern_dict: Dict) -> "Pattern":
|
31 |
+
"""
|
32 |
+
Load an instance from a dictionary.
|
33 |
+
|
34 |
+
:param pattern_dict: a dictionary holding the pattern's parameters
|
35 |
+
:return: a Pattern instance
|
36 |
+
"""
|
37 |
+
return cls(**pattern_dict)
|
38 |
+
|
39 |
+
def __repr__(self):
|
40 |
+
"""Return string representation of instance."""
|
41 |
+
return json.dumps(self.to_dict())
|
42 |
+
|
43 |
+
def __str__(self):
|
44 |
+
"""Return string representation of instance."""
|
45 |
+
return json.dumps(self.to_dict())
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/pattern_recognizer.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import logging
|
3 |
+
from typing import List, Optional, Dict
|
4 |
+
|
5 |
+
import regex as re
|
6 |
+
|
7 |
+
from presidio_analyzer import (
|
8 |
+
LocalRecognizer,
|
9 |
+
Pattern,
|
10 |
+
RecognizerResult,
|
11 |
+
EntityRecognizer,
|
12 |
+
AnalysisExplanation,
|
13 |
+
)
|
14 |
+
from presidio_analyzer.nlp_engine import NlpArtifacts
|
15 |
+
|
16 |
+
logger = logging.getLogger("presidio_analyzer")
|
17 |
+
|
18 |
+
|
19 |
+
class PatternRecognizer(LocalRecognizer):
|
20 |
+
"""
|
21 |
+
PII entity recognizer using regular expressions or deny-lists.
|
22 |
+
|
23 |
+
:param patterns: A list of patterns to detect
|
24 |
+
:param deny_list: A list of words to detect,
|
25 |
+
in case our recognizer uses a predefined list of words (deny list)
|
26 |
+
:param context: list of context words
|
27 |
+
:param deny_list_score: confidence score for a term
|
28 |
+
identified using a deny-list
|
29 |
+
"""
|
30 |
+
|
31 |
+
def __init__(
|
32 |
+
self,
|
33 |
+
supported_entity: str,
|
34 |
+
name: str = None,
|
35 |
+
supported_language: str = "en",
|
36 |
+
patterns: List[Pattern] = None,
|
37 |
+
deny_list: List[str] = None,
|
38 |
+
context: List[str] = None,
|
39 |
+
deny_list_score: float = 1.0,
|
40 |
+
version: str = "0.0.1",
|
41 |
+
):
|
42 |
+
|
43 |
+
if not supported_entity:
|
44 |
+
raise ValueError("Pattern recognizer should be initialized with entity")
|
45 |
+
|
46 |
+
if not patterns and not deny_list:
|
47 |
+
raise ValueError(
|
48 |
+
"Pattern recognizer should be initialized with patterns"
|
49 |
+
" or with deny list"
|
50 |
+
)
|
51 |
+
|
52 |
+
super().__init__(
|
53 |
+
supported_entities=[supported_entity],
|
54 |
+
supported_language=supported_language,
|
55 |
+
name=name,
|
56 |
+
version=version,
|
57 |
+
)
|
58 |
+
if patterns is None:
|
59 |
+
self.patterns = []
|
60 |
+
else:
|
61 |
+
self.patterns = patterns
|
62 |
+
self.context = context
|
63 |
+
self.deny_list_score = deny_list_score
|
64 |
+
|
65 |
+
if deny_list:
|
66 |
+
deny_list_pattern = self._deny_list_to_regex(deny_list)
|
67 |
+
self.patterns.append(deny_list_pattern)
|
68 |
+
self.deny_list = deny_list
|
69 |
+
else:
|
70 |
+
self.deny_list = []
|
71 |
+
|
72 |
+
def load(self): # noqa D102
|
73 |
+
pass
|
74 |
+
|
75 |
+
def analyze(
|
76 |
+
self,
|
77 |
+
text: str,
|
78 |
+
entities: List[str],
|
79 |
+
nlp_artifacts: NlpArtifacts = None,
|
80 |
+
regex_flags: int = None,
|
81 |
+
) -> List[RecognizerResult]:
|
82 |
+
"""
|
83 |
+
Analyzes text to detect PII using regular expressions or deny-lists.
|
84 |
+
|
85 |
+
:param text: Text to be analyzed
|
86 |
+
:param entities: Entities this recognizer can detect
|
87 |
+
:param nlp_artifacts: Output values from the NLP engine
|
88 |
+
:param regex_flags:
|
89 |
+
:return:
|
90 |
+
"""
|
91 |
+
results = []
|
92 |
+
|
93 |
+
if self.patterns:
|
94 |
+
pattern_result = self.__analyze_patterns(text, regex_flags)
|
95 |
+
results.extend(pattern_result)
|
96 |
+
|
97 |
+
return results
|
98 |
+
|
99 |
+
def _deny_list_to_regex(self, deny_list: List[str]) -> Pattern:
|
100 |
+
"""
|
101 |
+
Convert a list of words to a matching regex.
|
102 |
+
|
103 |
+
To be analyzed by the analyze method as any other regex patterns.
|
104 |
+
|
105 |
+
:param deny_list: the list of words to detect
|
106 |
+
:return:the regex of the words for detection
|
107 |
+
"""
|
108 |
+
|
109 |
+
# Escape deny list elements as preparation for regex
|
110 |
+
escaped_deny_list = [re.escape(element) for element in deny_list]
|
111 |
+
regex = r"(?:^|(?<=\W))(" + "|".join(escaped_deny_list) + r")(?:(?=\W)|$)"
|
112 |
+
return Pattern(name="deny_list", regex=regex, score=self.deny_list_score)
|
113 |
+
|
114 |
+
def validate_result(self, pattern_text: str) -> Optional[bool]:
|
115 |
+
"""
|
116 |
+
Validate the pattern logic e.g., by running checksum on a detected pattern.
|
117 |
+
|
118 |
+
:param pattern_text: the text to validated.
|
119 |
+
Only the part in text that was detected by the regex engine
|
120 |
+
:return: A bool indicating whether the validation was successful.
|
121 |
+
"""
|
122 |
+
return None
|
123 |
+
|
124 |
+
def invalidate_result(self, pattern_text: str) -> Optional[bool]:
|
125 |
+
"""
|
126 |
+
Logic to check for result invalidation by running pruning logic.
|
127 |
+
|
128 |
+
For example, each SSN number group should not consist of all the same digits.
|
129 |
+
|
130 |
+
:param pattern_text: the text to validated.
|
131 |
+
Only the part in text that was detected by the regex engine
|
132 |
+
:return: A bool indicating whether the result is invalidated
|
133 |
+
"""
|
134 |
+
return None
|
135 |
+
|
136 |
+
@staticmethod
|
137 |
+
def build_regex_explanation(
|
138 |
+
recognizer_name: str,
|
139 |
+
pattern_name: str,
|
140 |
+
pattern: str,
|
141 |
+
original_score: float,
|
142 |
+
validation_result: bool,
|
143 |
+
) -> AnalysisExplanation:
|
144 |
+
"""
|
145 |
+
Construct an explanation for why this entity was detected.
|
146 |
+
|
147 |
+
:param recognizer_name: Name of recognizer detecting the entity
|
148 |
+
:param pattern_name: Regex pattern name which detected the entity
|
149 |
+
:param pattern: Regex pattern logic
|
150 |
+
:param original_score: Score given by the recognizer
|
151 |
+
:param validation_result: Whether validation was used and its result
|
152 |
+
:return: Analysis explanation
|
153 |
+
"""
|
154 |
+
explanation = AnalysisExplanation(
|
155 |
+
recognizer=recognizer_name,
|
156 |
+
original_score=original_score,
|
157 |
+
pattern_name=pattern_name,
|
158 |
+
pattern=pattern,
|
159 |
+
validation_result=validation_result,
|
160 |
+
)
|
161 |
+
return explanation
|
162 |
+
|
163 |
+
def __analyze_patterns(
|
164 |
+
self, text: str, flags: int = None
|
165 |
+
) -> List[RecognizerResult]:
|
166 |
+
"""
|
167 |
+
Evaluate all patterns in the provided text.
|
168 |
+
|
169 |
+
Including words in the provided deny-list
|
170 |
+
|
171 |
+
:param text: text to analyze
|
172 |
+
:param flags: regex flags
|
173 |
+
:return: A list of RecognizerResult
|
174 |
+
"""
|
175 |
+
flags = flags if flags else re.DOTALL | re.MULTILINE
|
176 |
+
results = []
|
177 |
+
for pattern in self.patterns:
|
178 |
+
match_start_time = datetime.datetime.now()
|
179 |
+
matches = re.finditer(pattern.regex, text, flags=flags)
|
180 |
+
match_time = datetime.datetime.now() - match_start_time
|
181 |
+
logger.debug(
|
182 |
+
"--- match_time[%s]: %s.%s seconds",
|
183 |
+
pattern.name,
|
184 |
+
match_time.seconds,
|
185 |
+
match_time.microseconds,
|
186 |
+
)
|
187 |
+
|
188 |
+
for match in matches:
|
189 |
+
start, end = match.span()
|
190 |
+
current_match = text[start:end]
|
191 |
+
|
192 |
+
# Skip empty results
|
193 |
+
if current_match == "":
|
194 |
+
continue
|
195 |
+
|
196 |
+
score = pattern.score
|
197 |
+
|
198 |
+
validation_result = self.validate_result(current_match)
|
199 |
+
description = self.build_regex_explanation(
|
200 |
+
self.name, pattern.name, pattern.regex, score, validation_result
|
201 |
+
)
|
202 |
+
pattern_result = RecognizerResult(
|
203 |
+
entity_type=self.supported_entities[0],
|
204 |
+
start=start,
|
205 |
+
end=end,
|
206 |
+
score=score,
|
207 |
+
analysis_explanation=description,
|
208 |
+
recognition_metadata={
|
209 |
+
RecognizerResult.RECOGNIZER_NAME_KEY: self.name,
|
210 |
+
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY: self.id,
|
211 |
+
},
|
212 |
+
)
|
213 |
+
|
214 |
+
if validation_result is not None:
|
215 |
+
if validation_result:
|
216 |
+
pattern_result.score = EntityRecognizer.MAX_SCORE
|
217 |
+
else:
|
218 |
+
pattern_result.score = EntityRecognizer.MIN_SCORE
|
219 |
+
|
220 |
+
invalidation_result = self.invalidate_result(current_match)
|
221 |
+
if invalidation_result is not None and invalidation_result:
|
222 |
+
pattern_result.score = EntityRecognizer.MIN_SCORE
|
223 |
+
|
224 |
+
if pattern_result.score > EntityRecognizer.MIN_SCORE:
|
225 |
+
results.append(pattern_result)
|
226 |
+
|
227 |
+
# Update analysis explanation score following validation or invalidation
|
228 |
+
description.score = pattern_result.score
|
229 |
+
|
230 |
+
results = EntityRecognizer.remove_duplicates(results)
|
231 |
+
return results
|
232 |
+
|
233 |
+
def to_dict(self) -> Dict:
|
234 |
+
"""Serialize instance into a dictionary."""
|
235 |
+
return_dict = super().to_dict()
|
236 |
+
|
237 |
+
return_dict["patterns"] = [pat.to_dict() for pat in self.patterns]
|
238 |
+
return_dict["deny_list"] = self.deny_list
|
239 |
+
return_dict["context"] = self.context
|
240 |
+
return_dict["supported_entity"] = return_dict["supported_entities"][0]
|
241 |
+
del return_dict["supported_entities"]
|
242 |
+
|
243 |
+
return return_dict
|
244 |
+
|
245 |
+
@classmethod
|
246 |
+
def from_dict(cls, entity_recognizer_dict: Dict) -> "PatternRecognizer":
|
247 |
+
"""Create instance from a serialized dict."""
|
248 |
+
patterns = entity_recognizer_dict.get("patterns")
|
249 |
+
if patterns:
|
250 |
+
patterns_list = [Pattern.from_dict(pat) for pat in patterns]
|
251 |
+
entity_recognizer_dict["patterns"] = patterns_list
|
252 |
+
|
253 |
+
return cls(**entity_recognizer_dict)
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/recognizer_registry/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Recognizer registry init."""
|
2 |
+
from .recognizer_registry import RecognizerRegistry
|
3 |
+
|
4 |
+
__all__ = ["RecognizerRegistry"]
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/recognizer_result.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from typing import Dict
|
3 |
+
|
4 |
+
from presidio_analyzer import AnalysisExplanation
|
5 |
+
|
6 |
+
|
7 |
+
class RecognizerResult:
|
8 |
+
"""
|
9 |
+
Recognizer Result represents the findings of the detected entity.
|
10 |
+
|
11 |
+
Result of a recognizer analyzing the text.
|
12 |
+
|
13 |
+
:param entity_type: the type of the entity
|
14 |
+
:param start: the start location of the detected entity
|
15 |
+
:param end: the end location of the detected entity
|
16 |
+
:param score: the score of the detection
|
17 |
+
:param analysis_explanation: contains the explanation of why this
|
18 |
+
entity was identified
|
19 |
+
:param recognition_metadata: a dictionary of metadata to be used in
|
20 |
+
recognizer specific cases, for example specific recognized context words
|
21 |
+
and recognizer name
|
22 |
+
"""
|
23 |
+
|
24 |
+
# Keys for recognizer metadata
|
25 |
+
RECOGNIZER_NAME_KEY = "recognizer_name"
|
26 |
+
RECOGNIZER_IDENTIFIER_KEY = "recognizer_identifier"
|
27 |
+
|
28 |
+
# Key of a flag inside recognition_metadata dictionary
|
29 |
+
# which is set to true if the result enhanced by context
|
30 |
+
IS_SCORE_ENHANCED_BY_CONTEXT_KEY = "is_score_enhanced_by_context"
|
31 |
+
|
32 |
+
logger = logging.getLogger("presidio_analyzer")
|
33 |
+
|
34 |
+
def __init__(
|
35 |
+
self,
|
36 |
+
entity_type: str,
|
37 |
+
start: int,
|
38 |
+
end: int,
|
39 |
+
score: float,
|
40 |
+
analysis_explanation: AnalysisExplanation = None,
|
41 |
+
recognition_metadata: Dict = None,
|
42 |
+
):
|
43 |
+
|
44 |
+
self.entity_type = entity_type
|
45 |
+
self.start = start
|
46 |
+
self.end = end
|
47 |
+
self.score = score
|
48 |
+
self.analysis_explanation = analysis_explanation
|
49 |
+
|
50 |
+
if not recognition_metadata:
|
51 |
+
self.logger.debug(
|
52 |
+
"recognition_metadata should be passed, "
|
53 |
+
"containing a recognizer_name value"
|
54 |
+
)
|
55 |
+
|
56 |
+
self.recognition_metadata = recognition_metadata
|
57 |
+
|
58 |
+
def append_analysis_explanation_text(self, text: str) -> None:
|
59 |
+
"""Add text to the analysis explanation."""
|
60 |
+
if self.analysis_explanation:
|
61 |
+
self.analysis_explanation.append_textual_explanation_line(text)
|
62 |
+
|
63 |
+
def to_dict(self) -> Dict:
|
64 |
+
"""
|
65 |
+
Serialize self to dictionary.
|
66 |
+
|
67 |
+
:return: a dictionary
|
68 |
+
"""
|
69 |
+
return self.__dict__
|
70 |
+
|
71 |
+
@classmethod
|
72 |
+
def from_json(cls, data: Dict) -> "RecognizerResult":
|
73 |
+
"""
|
74 |
+
Create RecognizerResult from json.
|
75 |
+
|
76 |
+
:param data: e.g. {
|
77 |
+
"start": 24,
|
78 |
+
"end": 32,
|
79 |
+
"score": 0.8,
|
80 |
+
"entity_type": "NAME"
|
81 |
+
}
|
82 |
+
:return: RecognizerResult
|
83 |
+
"""
|
84 |
+
score = data.get("score")
|
85 |
+
entity_type = data.get("entity_type")
|
86 |
+
start = data.get("start")
|
87 |
+
end = data.get("end")
|
88 |
+
return cls(entity_type, start, end, score)
|
89 |
+
|
90 |
+
def __repr__(self) -> str:
|
91 |
+
"""Return a string representation of the instance."""
|
92 |
+
return self.__str__()
|
93 |
+
|
94 |
+
def intersects(self, other: "RecognizerResult") -> int:
|
95 |
+
"""
|
96 |
+
Check if self intersects with a different RecognizerResult.
|
97 |
+
|
98 |
+
:return: If intersecting, returns the number of
|
99 |
+
intersecting characters.
|
100 |
+
If not, returns 0
|
101 |
+
"""
|
102 |
+
# if they do not overlap the intersection is 0
|
103 |
+
if self.end < other.start or other.end < self.start:
|
104 |
+
return 0
|
105 |
+
|
106 |
+
# otherwise the intersection is min(end) - max(start)
|
107 |
+
return min(self.end, other.end) - max(self.start, other.start)
|
108 |
+
|
109 |
+
def contained_in(self, other: "RecognizerResult") -> bool:
|
110 |
+
"""
|
111 |
+
Check if self is contained in a different RecognizerResult.
|
112 |
+
|
113 |
+
:return: true if contained
|
114 |
+
"""
|
115 |
+
return self.start >= other.start and self.end <= other.end
|
116 |
+
|
117 |
+
def contains(self, other: "RecognizerResult") -> bool:
|
118 |
+
"""
|
119 |
+
Check if one result is contained or equal to another result.
|
120 |
+
|
121 |
+
:param other: another RecognizerResult
|
122 |
+
:return: bool
|
123 |
+
"""
|
124 |
+
return self.start <= other.start and self.end >= other.end
|
125 |
+
|
126 |
+
def equal_indices(self, other: "RecognizerResult") -> bool:
|
127 |
+
"""
|
128 |
+
Check if the indices are equal between two results.
|
129 |
+
|
130 |
+
:param other: another RecognizerResult
|
131 |
+
:return:
|
132 |
+
"""
|
133 |
+
return self.start == other.start and self.end == other.end
|
134 |
+
|
135 |
+
def __gt__(self, other: "RecognizerResult") -> bool:
|
136 |
+
"""
|
137 |
+
Check if one result is greater by using the results indices in the text.
|
138 |
+
|
139 |
+
:param other: another RecognizerResult
|
140 |
+
:return: bool
|
141 |
+
"""
|
142 |
+
if self.start == other.start:
|
143 |
+
return self.end > other.end
|
144 |
+
return self.start > other.start
|
145 |
+
|
146 |
+
def __eq__(self, other: "RecognizerResult") -> bool:
|
147 |
+
"""
|
148 |
+
Check two results are equal by using all class fields.
|
149 |
+
|
150 |
+
:param other: another RecognizerResult
|
151 |
+
:return: bool
|
152 |
+
"""
|
153 |
+
equal_type = self.entity_type == other.entity_type
|
154 |
+
equal_score = self.score == other.score
|
155 |
+
return self.equal_indices(other) and equal_type and equal_score
|
156 |
+
|
157 |
+
def __hash__(self):
|
158 |
+
"""
|
159 |
+
Hash the result data by using all class fields.
|
160 |
+
|
161 |
+
:return: int
|
162 |
+
"""
|
163 |
+
return hash(
|
164 |
+
f"{str(self.start)} {str(self.end)} {str(self.score)} {self.entity_type}"
|
165 |
+
)
|
166 |
+
|
167 |
+
def __str__(self) -> str:
|
168 |
+
"""Return a string representation of the instance."""
|
169 |
+
return (
|
170 |
+
f"type: {self.entity_type}, "
|
171 |
+
f"start: {self.start}, "
|
172 |
+
f"end: {self.end}, "
|
173 |
+
f"score: {self.score}"
|
174 |
+
)
|
175 |
+
|
176 |
+
def has_conflict(self, other: "RecognizerResult") -> bool:
|
177 |
+
"""
|
178 |
+
Check if two recognizer results are conflicted or not.
|
179 |
+
|
180 |
+
I have a conflict if:
|
181 |
+
1. My indices are the same as the other and my score is lower.
|
182 |
+
2. If my indices are contained in another.
|
183 |
+
|
184 |
+
:param other: RecognizerResult
|
185 |
+
:return:
|
186 |
+
"""
|
187 |
+
if self.equal_indices(other):
|
188 |
+
return self.score <= other.score
|
189 |
+
return other.contains(self)
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/presidio_analyzer/presidio_analyzer/remote_recognizer.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
from typing import List, Optional
|
3 |
+
|
4 |
+
from presidio_analyzer import EntityRecognizer
|
5 |
+
from presidio_analyzer.nlp_engine import NlpArtifacts
|
6 |
+
|
7 |
+
|
8 |
+
class RemoteRecognizer(ABC, EntityRecognizer):
|
9 |
+
"""
|
10 |
+
A configuration for a recognizer that runs on a different process / remote machine.
|
11 |
+
|
12 |
+
:param supported_entities: A list of entities this recognizer can identify
|
13 |
+
:param name: name of recognizer
|
14 |
+
:param supported_language: The language this recognizer can detect entities in
|
15 |
+
:param version: Version of this recognizer
|
16 |
+
"""
|
17 |
+
|
18 |
+
def __init__(
|
19 |
+
self,
|
20 |
+
supported_entities: List[str],
|
21 |
+
name: Optional[str],
|
22 |
+
supported_language: str,
|
23 |
+
version: str,
|
24 |
+
context: Optional[List[str]] = None,
|
25 |
+
):
|
26 |
+
super().__init__(
|
27 |
+
supported_entities=supported_entities,
|
28 |
+
name=name,
|
29 |
+
supported_language=supported_language,
|
30 |
+
version=version,
|
31 |
+
context=context,
|
32 |
+
)
|
33 |
+
|
34 |
+
@abstractmethod
|
35 |
+
def load(self): # noqa D102
|
36 |
+
pass
|
37 |
+
|
38 |
+
@abstractmethod
|
39 |
+
def analyze(
|
40 |
+
self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts
|
41 |
+
): # noqa ANN201
|
42 |
+
"""
|
43 |
+
Call an external service for PII detection.
|
44 |
+
|
45 |
+
:param text: text to be analyzed
|
46 |
+
:param entities: Entities that should be looked for
|
47 |
+
:param nlp_artifacts: Additional metadata from the NLP engine
|
48 |
+
:return: List of identified PII entities
|
49 |
+
"""
|
50 |
+
|
51 |
+
# 1. Call the external service.
|
52 |
+
# 2. Translate results into List[RecognizerResult]
|
53 |
+
pass
|
54 |
+
|
55 |
+
@abstractmethod
|
56 |
+
def get_supported_entities(self) -> List[str]: # noqa D102
|
57 |
+
pass
|
presidio_analyzer/presidio_analyzer/Infosys_presidio_analyzer/setup.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
0 |
name='presidio_analyzer',
|
1 |
version='4.1.0',
|
2 |
author='Amit Hegde',
|
3 |
author_email='amitumamaheshwar.h@infosys.com',
|
4 |
description='Infosys Intelligent Assistant',
|
5 |
long_description='Infosys Intelligent Assistant',
|
6 |
classifiers=['Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent'],
|
7 |
package_dir={'': 'presidio_analyzer'},
|
8 |
packages=setuptools.find_packages(where='presidio_analyzer'),
|
9 |
python_requires='>=3.6',
|
10 |
)
|
|
|
1 |
+
import setuptools
|
2 |
name='presidio_analyzer',
|
3 |
version='4.1.0',
|
4 |
author='Amit Hegde',
|
5 |
author_email='amitumamaheshwar.h@infosys.com',
|
6 |
description='Infosys Intelligent Assistant',
|
7 |
long_description='Infosys Intelligent Assistant',
|
8 |
classifiers=['Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent'],
|
9 |
package_dir={'': 'presidio_analyzer'},
|
10 |
packages=setuptools.find_packages(where='presidio_analyzer'),
|
11 |
python_requires='>=3.6',
|
12 |
)
|
presidio_analyzer/presidio_analyzer/Package_to_wheel.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
1.Make necessary changes in build_config file Like Package name and version
|
2 |
+
|
3 |
+
2.pip install pyc_wheel build
|
4 |
+
|
5 |
+
3.python create_wheel_file.py --> Creates Wheel file
|