ooggss commited on
Commit
f0f4f2b
·
1 Parent(s): 20b61db
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. Dataset_Construction/__pycache__/generation.cpython-312.pyc +0 -0
  3. Dataset_Construction/__pycache__/generation.cpython-38.pyc +0 -0
  4. Dataset_Construction/extract_function.py +174 -0
  5. Dataset_Construction/generation.py +4 -0
  6. Dataset_Construction/match_function_throughBm25.py +104 -0
  7. Dataset_Construction/match_function_throughLLM.py +70 -0
  8. Dataset_Construction/projects/charset-normalizer/python/__init__.py +46 -0
  9. Dataset_Construction/projects/charset-normalizer/python/__main__.py +4 -0
  10. Dataset_Construction/projects/charset-normalizer/python/api.py +626 -0
  11. Dataset_Construction/projects/charset-normalizer/python/cd.py +395 -0
  12. Dataset_Construction/projects/charset-normalizer/python/cli/__init__.py +6 -0
  13. Dataset_Construction/projects/charset-normalizer/python/cli/__main__.py +296 -0
  14. Dataset_Construction/projects/charset-normalizer/python/constant.py +1995 -0
  15. Dataset_Construction/projects/charset-normalizer/python/legacy.py +65 -0
  16. Dataset_Construction/projects/charset-normalizer/python/md.py +615 -0
  17. Dataset_Construction/projects/charset-normalizer/python/models.py +338 -0
  18. Dataset_Construction/projects/charset-normalizer/python/py.typed +0 -0
  19. Dataset_Construction/projects/charset-normalizer/python/utils.py +421 -0
  20. Dataset_Construction/projects/charset-normalizer/python/version.py +6 -0
  21. Dataset_Construction/projects/charset-normalizer/rust/CODE_OF_CONDUCT.md +76 -0
  22. Dataset_Construction/projects/charset-normalizer/rust/CONTRIBUTING.md +73 -0
  23. Dataset_Construction/projects/charset-normalizer/rust/Cargo.lock +1795 -0
  24. Dataset_Construction/projects/charset-normalizer/rust/Cargo.toml +70 -0
  25. Dataset_Construction/projects/charset-normalizer/rust/LICENSE +22 -0
  26. Dataset_Construction/projects/charset-normalizer/rust/README.md +199 -0
  27. Dataset_Construction/projects/charset-normalizer/rust/benches/large_datasets.rs +31 -0
  28. Dataset_Construction/projects/charset-normalizer/rust/benches/large_payload.rs +18 -0
  29. Dataset_Construction/projects/charset-normalizer/rust/rust-toolchain.toml +2 -0
  30. Dataset_Construction/projects/charset-normalizer/rust/src/assets.rs +66 -0
  31. Dataset_Construction/projects/charset-normalizer/rust/src/cd.rs +252 -0
  32. Dataset_Construction/projects/charset-normalizer/rust/src/consts.rs +752 -0
  33. Dataset_Construction/projects/charset-normalizer/rust/src/entity.rs +518 -0
  34. Dataset_Construction/projects/charset-normalizer/rust/src/lib.rs +597 -0
  35. Dataset_Construction/projects/charset-normalizer/rust/src/md.rs +82 -0
  36. Dataset_Construction/projects/charset-normalizer/rust/src/md/plugins.rs +448 -0
  37. Dataset_Construction/projects/charset-normalizer/rust/src/md/structs.rs +197 -0
  38. Dataset_Construction/projects/charset-normalizer/rust/src/normalizer.rs +171 -0
  39. Dataset_Construction/projects/charset-normalizer/rust/src/performance.rs +219 -0
  40. Dataset_Construction/projects/charset-normalizer/rust/src/tests/cd.rs +195 -0
  41. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.gif +3 -0
  42. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.jpg +3 -0
  43. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.mp4 +3 -0
  44. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.png +3 -0
  45. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.webp +3 -0
  46. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.xlsx +0 -0
  47. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-2.png +3 -0
  48. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-3.png +3 -0
  49. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/ascii/CHANGELOG.md +200 -0
  50. Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/ascii/_chromium_iso-8859-1_with_no_encoding_specified.html +10 -0
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ *.json filter=lfs diff=lfs merge=lfs -text
61
+ *.csv filter=lfs diff=lfs merge=lfs -text
Dataset_Construction/__pycache__/generation.cpython-312.pyc ADDED
Binary file (1.74 kB). View file
 
Dataset_Construction/__pycache__/generation.cpython-38.pyc ADDED
Binary file (1.11 kB). View file
 
Dataset_Construction/extract_function.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import os
3
+ import sys
4
+
5
+
6
+ total_functions = set()
7
+
8
+ def extract_functions_from_code(code, pattern):
9
+
10
+ function_head_pattern = re.compile(pattern)
11
+
12
+ lines = code.split('\n')
13
+ functions = []
14
+ brace_count = 0
15
+ function_code = []
16
+ inside_function = False
17
+ key = False
18
+ for i, line in enumerate(lines):
19
+ if not inside_function and function_head_pattern.search(line):
20
+ inside_function = True
21
+
22
+ if inside_function:
23
+ function_code.append(line)
24
+ if not line.lstrip(" ").startswith("//"):
25
+ brace_count += line.count('{')
26
+ brace_count -= line.count('}')
27
+ if brace_count == 0 :
28
+ if line.strip().endswith('}'):
29
+ inside_function = False
30
+ functions.append('\n'.join(function_code))
31
+ function_code = []
32
+ elif line.strip().endswith(';'):
33
+ inside_function = False
34
+ function_code = []
35
+ return functions
36
+
37
+ def extract_functions_from_code_py(code):
38
+ lines = code.split('\n')
39
+ functions = []
40
+ function_code = []
41
+ inside_function = False
42
+
43
+ for line in lines:
44
+ if not inside_function and line.lstrip().startswith("def "):
45
+ # 获取函数起始的缩进长度
46
+ pre_cnt = len(line) - len(line.lstrip())
47
+ function_code.append(line)
48
+ inside_function = True
49
+ # 跳过def那一行
50
+ continue
51
+
52
+ if inside_function:
53
+ # 空行和缩进比def要小表示还在函数内
54
+ if len(line) == 0 or len(line) - len(line.lstrip()) >= pre_cnt + 4:
55
+ function_code.append(line)
56
+ else:
57
+ functions.append('\n'.join(function_code))
58
+ function_code = []
59
+ # 当前行有可能是下一个函数的声明行,不处理会跳过该函数
60
+ if line.lstrip().startswith("def "):
61
+ pre_cnt = len(line) - len(line.lstrip())
62
+ function_code.append(line)
63
+ else:
64
+ inside_function = False
65
+
66
+ # 处理在文件末尾声明定义的function
67
+ if function_code:
68
+ functions.append('\n'.join(function_code))
69
+
70
+ return functions
71
+
72
+ def extract_functions_from_code_rb(code):
73
+ lines = code.split('\n')
74
+ functions = []
75
+ function_code = []
76
+ inside_function = False
77
+
78
+ for line in lines:
79
+ if not inside_function and line.lstrip().startswith("def "):
80
+ # 获取函数起始的缩进长度
81
+ pre_cnt = len(line) - len(line.lstrip())
82
+ function_code.append(line)
83
+ inside_function = True
84
+ # 跳过def那一行
85
+ continue
86
+
87
+ if inside_function:
88
+ if len(line) - len(line.lstrip()) == pre_cnt and line.lstrip().startswith("end"):
89
+ inside_function = False
90
+ functions.append('\n'.join(function_code))
91
+ function_code = []
92
+ else:
93
+ function_code.append(line)
94
+
95
+ return functions
96
+
97
+ def save_functions_to_files(functions, output_dir, output_file_name):
98
+ if not os.path.exists(output_dir):
99
+ os.makedirs(output_dir)
100
+ try:
101
+ for i, func in enumerate(functions):
102
+ # influxdb-1.8\\client\\influxdb_test.go -> influxdb-1.8__client__influxdb_test
103
+ output_file = os.path.splitext(output_file_name)
104
+ output_file = output_file[0].replace("/", "__") + "__" + output_file[1]
105
+ file_path = os.path.join(output_dir, f'{output_file}__function__{i + 1}.txt')
106
+ with open(file_path, 'w', encoding='utf-8') as file:
107
+ file.write(f"<path>\n{output_file_name}\n</path>\n")
108
+ file.write(f"<function>\n{func}\n</function>")
109
+ except Exception as e:
110
+ print(e)
111
+ pass
112
+
113
+ def process_file(input_file, lang, output_dir, pattern):
114
+
115
+ with open(input_file, 'r', encoding='utf-8', errors='ignore') as file:
116
+ code = file.read()
117
+
118
+ if lang == "py":
119
+ functions = extract_functions_from_code_py(code)
120
+ elif lang == "rb":
121
+ functions = extract_functions_from_code_rb(code)
122
+ else:
123
+ functions = extract_functions_from_code(code, pattern)
124
+
125
+ save_functions_to_files(functions, output_dir, input_file)
126
+
127
+ def main():
128
+ project_dir = "projects"
129
+ target_project = sys.argv[1]
130
+
131
+
132
+ patterns = {
133
+ 'cpp': r'^\s*[\w\s\*\[\]\<\>\:]+\s+[\w\s\*\[\]\<\>\:]+\s*\(',
134
+ 'cxx': r'^\s*[\w\s\*\[\]\<\>\:]+\s+[\w\s\*\[\]\<\>\:]+\s*\(',
135
+ 'h': r'^\s*[\w\s\*\[\]\<\>\:]+\s+[\w\s\*\[\]\<\>\:]+\s*\(',
136
+ 'java': r'^\s*(public|protected|private|static|final|synchronized|native|abstract|strictfp|default)?\s*(public|protected|private|static|final|synchronized|native|abstract|strictfp|default)?\s*[\w\<\>\[\] ]+\s+[\w\<\>\[\]]+\s*\(',
137
+ 'rs': r'^\s*(unsafe)?\s*(pub(\(crate\))?)?\s*(async)?\s*fn\s',
138
+ 'c': r'^\s*[\w\s\*\[\]]*\s*\w+\s*\(',
139
+ 'py': r''
140
+ }
141
+ lang_to_fileType = {
142
+ 'cpp' : ['cpp', 'cxx', 'h'],
143
+ 'c' : ['c', 'h'],
144
+ 'java' : ['java'],
145
+ 'rust' : ['rs'],
146
+ 'python' : ['py']
147
+ }
148
+
149
+ projects = os.listdir(project_dir)
150
+ for project in projects:
151
+ if project != target_project:
152
+ continue
153
+ project_pair_path = os.path.join(project_dir, project)
154
+ langs = os.listdir(project_pair_path)
155
+ for lang in langs:
156
+ root_dir = os.path.join(project_pair_path, lang)
157
+ # 对项目进行遍历
158
+ for current_path, dirs, files in os.walk(root_dir):
159
+ dirs[:] = [d for d in dirs if not d.startswith('.')]
160
+
161
+ for file in files:
162
+ try :
163
+ file_lang = file.split('.')[-1]
164
+ except:
165
+ continue
166
+ if file_lang in lang_to_fileType[lang]:
167
+ file_path = os.path.join(current_path, file)
168
+ if "test" in file_path or "Test" in file_path:
169
+ continue
170
+ process_file(file_path, file_lang, root_dir.replace("projects", "functions"), patterns[file_lang])
171
+
172
+
173
+ if __name__ == '__main__':
174
+ main()
Dataset_Construction/generation.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ def generation(message):
3
+ # todo : complete the generation function with target LLM
4
+ return None
Dataset_Construction/match_function_throughBm25.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from rank_bm25 import BM25Plus
2
+ import os
3
+ import sys
4
+ import re
5
+ from nltk.corpus import stopwords
6
+ from nltk.stem import PorterStemmer, WordNetLemmatizer
7
+
8
+
9
+ def read_corpus(corpus_files_path):
10
+ corpus = []
11
+
12
+ corpus_files = os.listdir(corpus_files_path)
13
+ for corpus_file in corpus_files:
14
+ with open(os.path.join(corpus_files_path, corpus_file), 'r') as input_file:
15
+ tmp = input_file.read()
16
+
17
+ corpus.append(tmp)
18
+
19
+ return corpus
20
+
21
+ def normalize_text(text):
22
+ # 大小写归一化
23
+ text = text.lower()
24
+
25
+ # 分词
26
+ words = re.findall(r'\w+|[^\s\w]+', text)
27
+
28
+ # 去除停用词
29
+ stop_words = set(stopwords.words('english'))
30
+ words = [word for word in words if word not in stop_words]
31
+
32
+ # 词干提取
33
+ stemmer = PorterStemmer()
34
+ words = [stemmer.stem(word) for word in words]
35
+
36
+ # 词形还原
37
+ lemmatizer = WordNetLemmatizer()
38
+ words = [lemmatizer.lemmatize(word) for word in words]
39
+
40
+ return words
41
+
42
+
43
+ # 使用正则表达式进行分词
44
+ def tokenize_code(code):
45
+ # 使用归一化
46
+ return normalize_text(code)
47
+
48
+ def main():
49
+ corpus_files_path = "functions"
50
+ query_files_path = "functions_with_unitTest"
51
+ match_results_path = "potential_function_pair"
52
+
53
+ project = sys.argv[1]
54
+ corpus_lang = sys.argv[2]
55
+ query_lang = sys.argv[3]
56
+
57
+ corpus_files_path = os.path.join(corpus_files_path, project, corpus_lang)
58
+ query_files_path = os.path.join(query_files_path, project, query_lang)
59
+ match_results_path = os.path.join(match_results_path, project, f"{query_lang}__{corpus_lang}")
60
+ query_files = os.listdir(query_files_path)
61
+
62
+ # 获取匹配池子
63
+ corpus = read_corpus(corpus_files_path)
64
+ tokenized_corpus = [tokenize_code(doc) for doc in corpus]
65
+ bm25 = BM25Plus(tokenized_corpus)
66
+
67
+ # 获取请求
68
+ for query_file in query_files:
69
+ with open(os.path.join(query_files_path, query_file), 'r') as input_file:
70
+ query = input_file.read()
71
+
72
+ # 对于每个请求计算前n个匹配结果
73
+ # 放大函数名的权重
74
+ tokenized_query = tokenize_code(query)
75
+
76
+ # 获取相关性评分
77
+ scores = bm25.get_scores(tokenized_query)
78
+ # 获取最相关的前几个函数定义
79
+ top_n = 10
80
+ match_results_index = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:top_n]
81
+
82
+ # 如果文件夹不存在,则创建它
83
+ if not os.path.exists(match_results_path):
84
+ os.makedirs(match_results_path)
85
+
86
+ # 记录匹配结果
87
+ with open(os.path.join(match_results_path, query_file), 'w') as output_file:
88
+ output_file.write("<Target function>\n")
89
+ output_file.write(query)
90
+ output_file.write("\n</Target function>\n\n")
91
+ # for match_result in match_results:
92
+ # output_file.write(match_result)
93
+ # output_file.write("\n")
94
+ output_file.write("<Possible matching functions>\n")
95
+ i = 1
96
+ for index in match_results_index:
97
+ output_file.write("<Function {}> \n{}\n</Function {}>\n\n".format(i, corpus[index], i))
98
+ # output_file.write("Score: {}\n".format(scores[index]))
99
+ i += 1
100
+ output_file.write("</Possible matching functions>\n")
101
+
102
+
103
+ if __name__ == "__main__" :
104
+ main()
Dataset_Construction/match_function_throughLLM.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import time
4
+ import sys
5
+ from generation import generation
6
+
7
+ source_dir = sys.argv[1]
8
+ target_dir = sys.argv[2]
9
+ project = sys.argv[3]
10
+
11
+ # 设置日志配置
12
+ logging.basicConfig(filename=f"match_function_throughLLM_{project}.log", level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
13
+
14
+
15
+ def process(projects_path, project, target_dir):
16
+
17
+
18
+ lang_pairs = os.listdir(projects_path)
19
+ for lang_pair in lang_pairs:
20
+ # print(lang_pair)
21
+ if not os.path.exists(os.path.join(target_dir, project, lang_pair)):
22
+ os.makedirs(os.path.join(target_dir, project, lang_pair))
23
+ query_lang = lang_pair.split("__")[0]
24
+ corpus_lang = lang_pair.split("__")[1]
25
+
26
+ questions_path = os.listdir(os.path.join(projects_path, lang_pair))
27
+ for question_path in questions_path:
28
+
29
+ if os.path.exists(os.path.join(target_dir, project, lang_pair, question_path)):
30
+ logging.info(f"{lang_pair}'s {question_path} already exist")
31
+ continue
32
+
33
+ # match
34
+ with open(os.path.join(projects_path, lang_pair, question_path), 'r', encoding='utf-8') as input_file:
35
+ question = input_file.read()
36
+ query_func = question[len("<Target function>\n"):question.find("</Target function>")]
37
+
38
+ while True:
39
+
40
+ try:
41
+ # match function
42
+ message = f"You are a professional who is expert in programming language {query_lang} and programming language {corpus_lang}. You will be provided with 1 Target function written in {query_lang} and 10 Possible matching functions written in {corpus_lang}(delimited with XML tags).Please select a function that has the same functionality as the Target function from 10 Possible matching functions.You should only response the serial number of the matching function or \"None\" if it doesn't exit.\n{question}"
43
+
44
+ response = generation(message)
45
+
46
+ # print(f"successfully get {question_path}")
47
+ with open(os.path.join(target_dir, project, lang_pair, question_path), 'w', encoding='utf-8', errors='ignore') as output_file:
48
+ # match
49
+ if response == "None":
50
+ output_file.write("None")
51
+
52
+ else:
53
+ start = question.find(f"<Function {response}>") + len(f"<Function {response}>\n")
54
+ end = question.find(f"</Function {response}>")
55
+ output_file.write(query_func)
56
+ output_file.write("------\n")
57
+ output_file.write(question[start:end])
58
+ break
59
+
60
+ except Exception as e:
61
+ logging.error(f"error with {lang_pair} {question_path}, detail is: {e}")
62
+ print(f"error with {lang_pair} {question_path}, detail is: {e}")
63
+ time.sleep(10)
64
+ break
65
+
66
+
67
+
68
+ if __name__ == "__main__":
69
+ projects_path = os.path.join(source_dir, project) # 替换为实际的文件路径
70
+ process(projects_path, project, target_dir)
Dataset_Construction/projects/charset-normalizer/python/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Charset-Normalizer
4
+ ~~~~~~~~~~~~~~
5
+ The Real First Universal Charset Detector.
6
+ A library that helps you read text from an unknown charset encoding.
7
+ Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
8
+ All IANA character set names for which the Python core library provides codecs are supported.
9
+
10
+ Basic usage:
11
+ >>> from charset_normalizer import from_bytes
12
+ >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
13
+ >>> best_guess = results.best()
14
+ >>> str(best_guess)
15
+ 'Bсеки човек има право на образование. Oбразованието!'
16
+
17
+ Others methods and usages are available - see the full documentation
18
+ at <https://github.com/Ousret/charset_normalizer>.
19
+ :copyright: (c) 2021 by Ahmed TAHRI
20
+ :license: MIT, see LICENSE for more details.
21
+ """
22
+ import logging
23
+
24
+ from .api import from_bytes, from_fp, from_path, is_binary
25
+ from .legacy import detect
26
+ from .models import CharsetMatch, CharsetMatches
27
+ from .utils import set_logging_handler
28
+ from .version import VERSION, __version__
29
+
30
+ __all__ = (
31
+ "from_fp",
32
+ "from_path",
33
+ "from_bytes",
34
+ "is_binary",
35
+ "detect",
36
+ "CharsetMatch",
37
+ "CharsetMatches",
38
+ "__version__",
39
+ "VERSION",
40
+ "set_logging_handler",
41
+ )
42
+
43
+ # Attach a NullHandler to the top level logger by default
44
+ # https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
45
+
46
+ logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
Dataset_Construction/projects/charset-normalizer/python/__main__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .cli import cli_detect
2
+
3
+ if __name__ == "__main__":
4
+ cli_detect()
Dataset_Construction/projects/charset-normalizer/python/api.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from os import PathLike
3
+ from typing import BinaryIO, List, Optional, Set, Union
4
+
5
+ from .cd import (
6
+ coherence_ratio,
7
+ encoding_languages,
8
+ mb_encoding_languages,
9
+ merge_coherence_ratios,
10
+ )
11
+ from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
12
+ from .md import mess_ratio
13
+ from .models import CharsetMatch, CharsetMatches
14
+ from .utils import (
15
+ any_specified_encoding,
16
+ cut_sequence_chunks,
17
+ iana_name,
18
+ identify_sig_or_bom,
19
+ is_cp_similar,
20
+ is_multi_byte_encoding,
21
+ should_strip_sig_or_bom,
22
+ )
23
+
24
+ # Will most likely be controversial
25
+ # logging.addLevelName(TRACE, "TRACE")
26
+ logger = logging.getLogger("charset_normalizer")
27
+ explain_handler = logging.StreamHandler()
28
+ explain_handler.setFormatter(
29
+ logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
30
+ )
31
+
32
+
33
+ def from_bytes(
34
+ sequences: Union[bytes, bytearray],
35
+ steps: int = 5,
36
+ chunk_size: int = 512,
37
+ threshold: float = 0.2,
38
+ cp_isolation: Optional[List[str]] = None,
39
+ cp_exclusion: Optional[List[str]] = None,
40
+ preemptive_behaviour: bool = True,
41
+ explain: bool = False,
42
+ language_threshold: float = 0.1,
43
+ enable_fallback: bool = True,
44
+ ) -> CharsetMatches:
45
+ """
46
+ Given a raw bytes sequence, return the best possibles charset usable to render str objects.
47
+ If there is no results, it is a strong indicator that the source is binary/not text.
48
+ By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
49
+ And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
50
+
51
+ The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
52
+ but never take it for granted. Can improve the performance.
53
+
54
+ You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
55
+ purpose.
56
+
57
+ This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
58
+ By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
59
+ toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
60
+ Custom logging format and handler can be set manually.
61
+ """
62
+
63
+ if not isinstance(sequences, (bytearray, bytes)):
64
+ raise TypeError(
65
+ "Expected object of type bytes or bytearray, got: {0}".format(
66
+ type(sequences)
67
+ )
68
+ )
69
+
70
+ if explain:
71
+ previous_logger_level: int = logger.level
72
+ logger.addHandler(explain_handler)
73
+ logger.setLevel(TRACE)
74
+
75
+ length: int = len(sequences)
76
+
77
+ if length == 0:
78
+ logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
79
+ if explain:
80
+ logger.removeHandler(explain_handler)
81
+ logger.setLevel(previous_logger_level or logging.WARNING)
82
+ return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
83
+
84
+ if cp_isolation is not None:
85
+ logger.log(
86
+ TRACE,
87
+ "cp_isolation is set. use this flag for debugging purpose. "
88
+ "limited list of encoding allowed : %s.",
89
+ ", ".join(cp_isolation),
90
+ )
91
+ cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
92
+ else:
93
+ cp_isolation = []
94
+
95
+ if cp_exclusion is not None:
96
+ logger.log(
97
+ TRACE,
98
+ "cp_exclusion is set. use this flag for debugging purpose. "
99
+ "limited list of encoding excluded : %s.",
100
+ ", ".join(cp_exclusion),
101
+ )
102
+ cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
103
+ else:
104
+ cp_exclusion = []
105
+
106
+ if length <= (chunk_size * steps):
107
+ logger.log(
108
+ TRACE,
109
+ "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
110
+ steps,
111
+ chunk_size,
112
+ length,
113
+ )
114
+ steps = 1
115
+ chunk_size = length
116
+
117
+ if steps > 1 and length / steps < chunk_size:
118
+ chunk_size = int(length / steps)
119
+
120
+ is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
121
+ is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
122
+
123
+ if is_too_small_sequence:
124
+ logger.log(
125
+ TRACE,
126
+ "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
127
+ length
128
+ ),
129
+ )
130
+ elif is_too_large_sequence:
131
+ logger.log(
132
+ TRACE,
133
+ "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
134
+ length
135
+ ),
136
+ )
137
+
138
+ prioritized_encodings: List[str] = []
139
+
140
+ specified_encoding: Optional[str] = (
141
+ any_specified_encoding(sequences) if preemptive_behaviour else None
142
+ )
143
+
144
+ if specified_encoding is not None:
145
+ prioritized_encodings.append(specified_encoding)
146
+ logger.log(
147
+ TRACE,
148
+ "Detected declarative mark in sequence. Priority +1 given for %s.",
149
+ specified_encoding,
150
+ )
151
+
152
+ tested: Set[str] = set()
153
+ tested_but_hard_failure: List[str] = []
154
+ tested_but_soft_failure: List[str] = []
155
+
156
+ fallback_ascii: Optional[CharsetMatch] = None
157
+ fallback_u8: Optional[CharsetMatch] = None
158
+ fallback_specified: Optional[CharsetMatch] = None
159
+
160
+ results: CharsetMatches = CharsetMatches()
161
+
162
+ sig_encoding, sig_payload = identify_sig_or_bom(sequences)
163
+
164
+ if sig_encoding is not None:
165
+ prioritized_encodings.append(sig_encoding)
166
+ logger.log(
167
+ TRACE,
168
+ "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
169
+ len(sig_payload),
170
+ sig_encoding,
171
+ )
172
+
173
+ prioritized_encodings.append("ascii")
174
+
175
+ if "utf_8" not in prioritized_encodings:
176
+ prioritized_encodings.append("utf_8")
177
+
178
+ for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
179
+ if cp_isolation and encoding_iana not in cp_isolation:
180
+ continue
181
+
182
+ if cp_exclusion and encoding_iana in cp_exclusion:
183
+ continue
184
+
185
+ if encoding_iana in tested:
186
+ continue
187
+
188
+ tested.add(encoding_iana)
189
+
190
+ decoded_payload: Optional[str] = None
191
+ bom_or_sig_available: bool = sig_encoding == encoding_iana
192
+ strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
193
+ encoding_iana
194
+ )
195
+
196
+ if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
197
+ logger.log(
198
+ TRACE,
199
+ "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
200
+ encoding_iana,
201
+ )
202
+ continue
203
+ if encoding_iana in {"utf_7"} and not bom_or_sig_available:
204
+ logger.log(
205
+ TRACE,
206
+ "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
207
+ encoding_iana,
208
+ )
209
+ continue
210
+
211
+ try:
212
+ is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
213
+ except (ModuleNotFoundError, ImportError):
214
+ logger.log(
215
+ TRACE,
216
+ "Encoding %s does not provide an IncrementalDecoder",
217
+ encoding_iana,
218
+ )
219
+ continue
220
+
221
+ try:
222
+ if is_too_large_sequence and is_multi_byte_decoder is False:
223
+ str(
224
+ sequences[: int(50e4)]
225
+ if strip_sig_or_bom is False
226
+ else sequences[len(sig_payload) : int(50e4)],
227
+ encoding=encoding_iana,
228
+ )
229
+ else:
230
+ decoded_payload = str(
231
+ sequences
232
+ if strip_sig_or_bom is False
233
+ else sequences[len(sig_payload) :],
234
+ encoding=encoding_iana,
235
+ )
236
+ except (UnicodeDecodeError, LookupError) as e:
237
+ if not isinstance(e, LookupError):
238
+ logger.log(
239
+ TRACE,
240
+ "Code page %s does not fit given bytes sequence at ALL. %s",
241
+ encoding_iana,
242
+ str(e),
243
+ )
244
+ tested_but_hard_failure.append(encoding_iana)
245
+ continue
246
+
247
+ similar_soft_failure_test: bool = False
248
+
249
+ for encoding_soft_failed in tested_but_soft_failure:
250
+ if is_cp_similar(encoding_iana, encoding_soft_failed):
251
+ similar_soft_failure_test = True
252
+ break
253
+
254
+ if similar_soft_failure_test:
255
+ logger.log(
256
+ TRACE,
257
+ "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
258
+ encoding_iana,
259
+ encoding_soft_failed,
260
+ )
261
+ continue
262
+
263
+ r_ = range(
264
+ 0 if not bom_or_sig_available else len(sig_payload),
265
+ length,
266
+ int(length / steps),
267
+ )
268
+
269
+ multi_byte_bonus: bool = (
270
+ is_multi_byte_decoder
271
+ and decoded_payload is not None
272
+ and len(decoded_payload) < length
273
+ )
274
+
275
+ if multi_byte_bonus:
276
+ logger.log(
277
+ TRACE,
278
+ "Code page %s is a multi byte encoding table and it appear that at least one character "
279
+ "was encoded using n-bytes.",
280
+ encoding_iana,
281
+ )
282
+
283
+ max_chunk_gave_up: int = int(len(r_) / 4)
284
+
285
+ max_chunk_gave_up = max(max_chunk_gave_up, 2)
286
+ early_stop_count: int = 0
287
+ lazy_str_hard_failure = False
288
+
289
+ md_chunks: List[str] = []
290
+ md_ratios = []
291
+
292
+ try:
293
+ for chunk in cut_sequence_chunks(
294
+ sequences,
295
+ encoding_iana,
296
+ r_,
297
+ chunk_size,
298
+ bom_or_sig_available,
299
+ strip_sig_or_bom,
300
+ sig_payload,
301
+ is_multi_byte_decoder,
302
+ decoded_payload,
303
+ ):
304
+ md_chunks.append(chunk)
305
+
306
+ md_ratios.append(
307
+ mess_ratio(
308
+ chunk,
309
+ threshold,
310
+ explain is True and 1 <= len(cp_isolation) <= 2,
311
+ )
312
+ )
313
+
314
+ if md_ratios[-1] >= threshold:
315
+ early_stop_count += 1
316
+
317
+ if (early_stop_count >= max_chunk_gave_up) or (
318
+ bom_or_sig_available and strip_sig_or_bom is False
319
+ ):
320
+ break
321
+ except (
322
+ UnicodeDecodeError
323
+ ) as e: # Lazy str loading may have missed something there
324
+ logger.log(
325
+ TRACE,
326
+ "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
327
+ encoding_iana,
328
+ str(e),
329
+ )
330
+ early_stop_count = max_chunk_gave_up
331
+ lazy_str_hard_failure = True
332
+
333
+ # We might want to check the sequence again with the whole content
334
+ # Only if initial MD tests passes
335
+ if (
336
+ not lazy_str_hard_failure
337
+ and is_too_large_sequence
338
+ and not is_multi_byte_decoder
339
+ ):
340
+ try:
341
+ sequences[int(50e3) :].decode(encoding_iana, errors="strict")
342
+ except UnicodeDecodeError as e:
343
+ logger.log(
344
+ TRACE,
345
+ "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
346
+ encoding_iana,
347
+ str(e),
348
+ )
349
+ tested_but_hard_failure.append(encoding_iana)
350
+ continue
351
+
352
+ mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
353
+ if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
354
+ tested_but_soft_failure.append(encoding_iana)
355
+ logger.log(
356
+ TRACE,
357
+ "%s was excluded because of initial chaos probing. Gave up %i time(s). "
358
+ "Computed mean chaos is %f %%.",
359
+ encoding_iana,
360
+ early_stop_count,
361
+ round(mean_mess_ratio * 100, ndigits=3),
362
+ )
363
+ # Preparing those fallbacks in case we got nothing.
364
+ if (
365
+ enable_fallback
366
+ and encoding_iana in ["ascii", "utf_8", specified_encoding]
367
+ and not lazy_str_hard_failure
368
+ ):
369
+ fallback_entry = CharsetMatch(
370
+ sequences, encoding_iana, threshold, False, [], decoded_payload
371
+ )
372
+ if encoding_iana == specified_encoding:
373
+ fallback_specified = fallback_entry
374
+ elif encoding_iana == "ascii":
375
+ fallback_ascii = fallback_entry
376
+ else:
377
+ fallback_u8 = fallback_entry
378
+ continue
379
+
380
+ logger.log(
381
+ TRACE,
382
+ "%s passed initial chaos probing. Mean measured chaos is %f %%",
383
+ encoding_iana,
384
+ round(mean_mess_ratio * 100, ndigits=3),
385
+ )
386
+
387
+ if not is_multi_byte_decoder:
388
+ target_languages: List[str] = encoding_languages(encoding_iana)
389
+ else:
390
+ target_languages = mb_encoding_languages(encoding_iana)
391
+
392
+ if target_languages:
393
+ logger.log(
394
+ TRACE,
395
+ "{} should target any language(s) of {}".format(
396
+ encoding_iana, str(target_languages)
397
+ ),
398
+ )
399
+
400
+ cd_ratios = []
401
+
402
+ # We shall skip the CD when its about ASCII
403
+ # Most of the time its not relevant to run "language-detection" on it.
404
+ if encoding_iana != "ascii":
405
+ for chunk in md_chunks:
406
+ chunk_languages = coherence_ratio(
407
+ chunk,
408
+ language_threshold,
409
+ ",".join(target_languages) if target_languages else None,
410
+ )
411
+
412
+ cd_ratios.append(chunk_languages)
413
+
414
+ cd_ratios_merged = merge_coherence_ratios(cd_ratios)
415
+
416
+ if cd_ratios_merged:
417
+ logger.log(
418
+ TRACE,
419
+ "We detected language {} using {}".format(
420
+ cd_ratios_merged, encoding_iana
421
+ ),
422
+ )
423
+
424
+ results.append(
425
+ CharsetMatch(
426
+ sequences,
427
+ encoding_iana,
428
+ mean_mess_ratio,
429
+ bom_or_sig_available,
430
+ cd_ratios_merged,
431
+ decoded_payload,
432
+ )
433
+ )
434
+
435
+ if (
436
+ encoding_iana in [specified_encoding, "ascii", "utf_8"]
437
+ and mean_mess_ratio < 0.1
438
+ ):
439
+ logger.debug(
440
+ "Encoding detection: %s is most likely the one.", encoding_iana
441
+ )
442
+ if explain:
443
+ logger.removeHandler(explain_handler)
444
+ logger.setLevel(previous_logger_level)
445
+ return CharsetMatches([results[encoding_iana]])
446
+
447
+ if encoding_iana == sig_encoding:
448
+ logger.debug(
449
+ "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
450
+ "the beginning of the sequence.",
451
+ encoding_iana,
452
+ )
453
+ if explain:
454
+ logger.removeHandler(explain_handler)
455
+ logger.setLevel(previous_logger_level)
456
+ return CharsetMatches([results[encoding_iana]])
457
+
458
+ if len(results) == 0:
459
+ if fallback_u8 or fallback_ascii or fallback_specified:
460
+ logger.log(
461
+ TRACE,
462
+ "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
463
+ )
464
+
465
+ if fallback_specified:
466
+ logger.debug(
467
+ "Encoding detection: %s will be used as a fallback match",
468
+ fallback_specified.encoding,
469
+ )
470
+ results.append(fallback_specified)
471
+ elif (
472
+ (fallback_u8 and fallback_ascii is None)
473
+ or (
474
+ fallback_u8
475
+ and fallback_ascii
476
+ and fallback_u8.fingerprint != fallback_ascii.fingerprint
477
+ )
478
+ or (fallback_u8 is not None)
479
+ ):
480
+ logger.debug("Encoding detection: utf_8 will be used as a fallback match")
481
+ results.append(fallback_u8)
482
+ elif fallback_ascii:
483
+ logger.debug("Encoding detection: ascii will be used as a fallback match")
484
+ results.append(fallback_ascii)
485
+
486
+ if results:
487
+ logger.debug(
488
+ "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
489
+ results.best().encoding, # type: ignore
490
+ len(results) - 1,
491
+ )
492
+ else:
493
+ logger.debug("Encoding detection: Unable to determine any suitable charset.")
494
+
495
+ if explain:
496
+ logger.removeHandler(explain_handler)
497
+ logger.setLevel(previous_logger_level)
498
+
499
+ return results
500
+
501
+
502
+ def from_fp(
503
+ fp: BinaryIO,
504
+ steps: int = 5,
505
+ chunk_size: int = 512,
506
+ threshold: float = 0.20,
507
+ cp_isolation: Optional[List[str]] = None,
508
+ cp_exclusion: Optional[List[str]] = None,
509
+ preemptive_behaviour: bool = True,
510
+ explain: bool = False,
511
+ language_threshold: float = 0.1,
512
+ enable_fallback: bool = True,
513
+ ) -> CharsetMatches:
514
+ """
515
+ Same thing than the function from_bytes but using a file pointer that is already ready.
516
+ Will not close the file pointer.
517
+ """
518
+ return from_bytes(
519
+ fp.read(),
520
+ steps,
521
+ chunk_size,
522
+ threshold,
523
+ cp_isolation,
524
+ cp_exclusion,
525
+ preemptive_behaviour,
526
+ explain,
527
+ language_threshold,
528
+ enable_fallback,
529
+ )
530
+
531
+
532
+ def from_path(
533
+ path: Union[str, bytes, PathLike], # type: ignore[type-arg]
534
+ steps: int = 5,
535
+ chunk_size: int = 512,
536
+ threshold: float = 0.20,
537
+ cp_isolation: Optional[List[str]] = None,
538
+ cp_exclusion: Optional[List[str]] = None,
539
+ preemptive_behaviour: bool = True,
540
+ explain: bool = False,
541
+ language_threshold: float = 0.1,
542
+ enable_fallback: bool = True,
543
+ ) -> CharsetMatches:
544
+ """
545
+ Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
546
+ Can raise IOError.
547
+ """
548
+ with open(path, "rb") as fp:
549
+ return from_fp(
550
+ fp,
551
+ steps,
552
+ chunk_size,
553
+ threshold,
554
+ cp_isolation,
555
+ cp_exclusion,
556
+ preemptive_behaviour,
557
+ explain,
558
+ language_threshold,
559
+ enable_fallback,
560
+ )
561
+
562
+
563
+ def is_binary(
564
+ fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg]
565
+ steps: int = 5,
566
+ chunk_size: int = 512,
567
+ threshold: float = 0.20,
568
+ cp_isolation: Optional[List[str]] = None,
569
+ cp_exclusion: Optional[List[str]] = None,
570
+ preemptive_behaviour: bool = True,
571
+ explain: bool = False,
572
+ language_threshold: float = 0.1,
573
+ enable_fallback: bool = False,
574
+ ) -> bool:
575
+ """
576
+ Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
577
+ Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
578
+ are disabled to be stricter around ASCII-compatible but unlikely to be a string.
579
+ """
580
+ if isinstance(fp_or_path_or_payload, (str, PathLike)):
581
+ guesses = from_path(
582
+ fp_or_path_or_payload,
583
+ steps=steps,
584
+ chunk_size=chunk_size,
585
+ threshold=threshold,
586
+ cp_isolation=cp_isolation,
587
+ cp_exclusion=cp_exclusion,
588
+ preemptive_behaviour=preemptive_behaviour,
589
+ explain=explain,
590
+ language_threshold=language_threshold,
591
+ enable_fallback=enable_fallback,
592
+ )
593
+ elif isinstance(
594
+ fp_or_path_or_payload,
595
+ (
596
+ bytes,
597
+ bytearray,
598
+ ),
599
+ ):
600
+ guesses = from_bytes(
601
+ fp_or_path_or_payload,
602
+ steps=steps,
603
+ chunk_size=chunk_size,
604
+ threshold=threshold,
605
+ cp_isolation=cp_isolation,
606
+ cp_exclusion=cp_exclusion,
607
+ preemptive_behaviour=preemptive_behaviour,
608
+ explain=explain,
609
+ language_threshold=language_threshold,
610
+ enable_fallback=enable_fallback,
611
+ )
612
+ else:
613
+ guesses = from_fp(
614
+ fp_or_path_or_payload,
615
+ steps=steps,
616
+ chunk_size=chunk_size,
617
+ threshold=threshold,
618
+ cp_isolation=cp_isolation,
619
+ cp_exclusion=cp_exclusion,
620
+ preemptive_behaviour=preemptive_behaviour,
621
+ explain=explain,
622
+ language_threshold=language_threshold,
623
+ enable_fallback=enable_fallback,
624
+ )
625
+
626
+ return not guesses
Dataset_Construction/projects/charset-normalizer/python/cd.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ from codecs import IncrementalDecoder
3
+ from collections import Counter
4
+ from functools import lru_cache
5
+ from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
6
+
7
+ from .constant import (
8
+ FREQUENCIES,
9
+ KO_NAMES,
10
+ LANGUAGE_SUPPORTED_COUNT,
11
+ TOO_SMALL_SEQUENCE,
12
+ ZH_NAMES,
13
+ )
14
+ from .md import is_suspiciously_successive_range
15
+ from .models import CoherenceMatches
16
+ from .utils import (
17
+ is_accentuated,
18
+ is_latin,
19
+ is_multi_byte_encoding,
20
+ is_unicode_range_secondary,
21
+ unicode_range,
22
+ )
23
+
24
+
25
+ def encoding_unicode_range(iana_name: str) -> List[str]:
26
+ """
27
+ Return associated unicode ranges in a single byte code page.
28
+ """
29
+ if is_multi_byte_encoding(iana_name):
30
+ raise IOError("Function not supported on multi-byte code page")
31
+
32
+ decoder = importlib.import_module(
33
+ "encodings.{}".format(iana_name)
34
+ ).IncrementalDecoder
35
+
36
+ p: IncrementalDecoder = decoder(errors="ignore")
37
+ seen_ranges: Dict[str, int] = {}
38
+ character_count: int = 0
39
+
40
+ for i in range(0x40, 0xFF):
41
+ chunk: str = p.decode(bytes([i]))
42
+
43
+ if chunk:
44
+ character_range: Optional[str] = unicode_range(chunk)
45
+
46
+ if character_range is None:
47
+ continue
48
+
49
+ if is_unicode_range_secondary(character_range) is False:
50
+ if character_range not in seen_ranges:
51
+ seen_ranges[character_range] = 0
52
+ seen_ranges[character_range] += 1
53
+ character_count += 1
54
+
55
+ return sorted(
56
+ [
57
+ character_range
58
+ for character_range in seen_ranges
59
+ if seen_ranges[character_range] / character_count >= 0.15
60
+ ]
61
+ )
62
+
63
+
64
+ def unicode_range_languages(primary_range: str) -> List[str]:
65
+ """
66
+ Return inferred languages used with a unicode range.
67
+ """
68
+ languages: List[str] = []
69
+
70
+ for language, characters in FREQUENCIES.items():
71
+ for character in characters:
72
+ if unicode_range(character) == primary_range:
73
+ languages.append(language)
74
+ break
75
+
76
+ return languages
77
+
78
+
79
+ @lru_cache()
80
+ def encoding_languages(iana_name: str) -> List[str]:
81
+ """
82
+ Single-byte encoding language association. Some code page are heavily linked to particular language(s).
83
+ This function does the correspondence.
84
+ """
85
+ unicode_ranges: List[str] = encoding_unicode_range(iana_name)
86
+ primary_range: Optional[str] = None
87
+
88
+ for specified_range in unicode_ranges:
89
+ if "Latin" not in specified_range:
90
+ primary_range = specified_range
91
+ break
92
+
93
+ if primary_range is None:
94
+ return ["Latin Based"]
95
+
96
+ return unicode_range_languages(primary_range)
97
+
98
+
99
+ @lru_cache()
100
+ def mb_encoding_languages(iana_name: str) -> List[str]:
101
+ """
102
+ Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
103
+ This function does the correspondence.
104
+ """
105
+ if (
106
+ iana_name.startswith("shift_")
107
+ or iana_name.startswith("iso2022_jp")
108
+ or iana_name.startswith("euc_j")
109
+ or iana_name == "cp932"
110
+ ):
111
+ return ["Japanese"]
112
+ if iana_name.startswith("gb") or iana_name in ZH_NAMES:
113
+ return ["Chinese"]
114
+ if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
115
+ return ["Korean"]
116
+
117
+ return []
118
+
119
+
120
+ @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
121
+ def get_target_features(language: str) -> Tuple[bool, bool]:
122
+ """
123
+ Determine main aspects from a supported language if it contains accents and if is pure Latin.
124
+ """
125
+ target_have_accents: bool = False
126
+ target_pure_latin: bool = True
127
+
128
+ for character in FREQUENCIES[language]:
129
+ if not target_have_accents and is_accentuated(character):
130
+ target_have_accents = True
131
+ if target_pure_latin and is_latin(character) is False:
132
+ target_pure_latin = False
133
+
134
+ return target_have_accents, target_pure_latin
135
+
136
+
137
+ def alphabet_languages(
138
+ characters: List[str], ignore_non_latin: bool = False
139
+ ) -> List[str]:
140
+ """
141
+ Return associated languages associated to given characters.
142
+ """
143
+ languages: List[Tuple[str, float]] = []
144
+
145
+ source_have_accents = any(is_accentuated(character) for character in characters)
146
+
147
+ for language, language_characters in FREQUENCIES.items():
148
+ target_have_accents, target_pure_latin = get_target_features(language)
149
+
150
+ if ignore_non_latin and target_pure_latin is False:
151
+ continue
152
+
153
+ if target_have_accents is False and source_have_accents:
154
+ continue
155
+
156
+ character_count: int = len(language_characters)
157
+
158
+ character_match_count: int = len(
159
+ [c for c in language_characters if c in characters]
160
+ )
161
+
162
+ ratio: float = character_match_count / character_count
163
+
164
+ if ratio >= 0.2:
165
+ languages.append((language, ratio))
166
+
167
+ languages = sorted(languages, key=lambda x: x[1], reverse=True)
168
+
169
+ return [compatible_language[0] for compatible_language in languages]
170
+
171
+
172
+ def characters_popularity_compare(
173
+ language: str, ordered_characters: List[str]
174
+ ) -> float:
175
+ """
176
+ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
177
+ The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
178
+ Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
179
+ """
180
+ if language not in FREQUENCIES:
181
+ raise ValueError("{} not available".format(language))
182
+
183
+ character_approved_count: int = 0
184
+ FREQUENCIES_language_set = set(FREQUENCIES[language])
185
+
186
+ ordered_characters_count: int = len(ordered_characters)
187
+ target_language_characters_count: int = len(FREQUENCIES[language])
188
+
189
+ large_alphabet: bool = target_language_characters_count > 26
190
+
191
+ for character, character_rank in zip(
192
+ ordered_characters, range(0, ordered_characters_count)
193
+ ):
194
+ if character not in FREQUENCIES_language_set:
195
+ continue
196
+
197
+ character_rank_in_language: int = FREQUENCIES[language].index(character)
198
+ expected_projection_ratio: float = (
199
+ target_language_characters_count / ordered_characters_count
200
+ )
201
+ character_rank_projection: int = int(character_rank * expected_projection_ratio)
202
+
203
+ if (
204
+ large_alphabet is False
205
+ and abs(character_rank_projection - character_rank_in_language) > 4
206
+ ):
207
+ continue
208
+
209
+ if (
210
+ large_alphabet is True
211
+ and abs(character_rank_projection - character_rank_in_language)
212
+ < target_language_characters_count / 3
213
+ ):
214
+ character_approved_count += 1
215
+ continue
216
+
217
+ characters_before_source: List[str] = FREQUENCIES[language][
218
+ 0:character_rank_in_language
219
+ ]
220
+ characters_after_source: List[str] = FREQUENCIES[language][
221
+ character_rank_in_language:
222
+ ]
223
+ characters_before: List[str] = ordered_characters[0:character_rank]
224
+ characters_after: List[str] = ordered_characters[character_rank:]
225
+
226
+ before_match_count: int = len(
227
+ set(characters_before) & set(characters_before_source)
228
+ )
229
+
230
+ after_match_count: int = len(
231
+ set(characters_after) & set(characters_after_source)
232
+ )
233
+
234
+ if len(characters_before_source) == 0 and before_match_count <= 4:
235
+ character_approved_count += 1
236
+ continue
237
+
238
+ if len(characters_after_source) == 0 and after_match_count <= 4:
239
+ character_approved_count += 1
240
+ continue
241
+
242
+ if (
243
+ before_match_count / len(characters_before_source) >= 0.4
244
+ or after_match_count / len(characters_after_source) >= 0.4
245
+ ):
246
+ character_approved_count += 1
247
+ continue
248
+
249
+ return character_approved_count / len(ordered_characters)
250
+
251
+
252
+ def alpha_unicode_split(decoded_sequence: str) -> List[str]:
253
+ """
254
+ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
255
+ Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
256
+ One containing the latin letters and the other hebrew.
257
+ """
258
+ layers: Dict[str, str] = {}
259
+
260
+ for character in decoded_sequence:
261
+ if character.isalpha() is False:
262
+ continue
263
+
264
+ character_range: Optional[str] = unicode_range(character)
265
+
266
+ if character_range is None:
267
+ continue
268
+
269
+ layer_target_range: Optional[str] = None
270
+
271
+ for discovered_range in layers:
272
+ if (
273
+ is_suspiciously_successive_range(discovered_range, character_range)
274
+ is False
275
+ ):
276
+ layer_target_range = discovered_range
277
+ break
278
+
279
+ if layer_target_range is None:
280
+ layer_target_range = character_range
281
+
282
+ if layer_target_range not in layers:
283
+ layers[layer_target_range] = character.lower()
284
+ continue
285
+
286
+ layers[layer_target_range] += character.lower()
287
+
288
+ return list(layers.values())
289
+
290
+
291
+ def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
292
+ """
293
+ This function merge results previously given by the function coherence_ratio.
294
+ The return type is the same as coherence_ratio.
295
+ """
296
+ per_language_ratios: Dict[str, List[float]] = {}
297
+ for result in results:
298
+ for sub_result in result:
299
+ language, ratio = sub_result
300
+ if language not in per_language_ratios:
301
+ per_language_ratios[language] = [ratio]
302
+ continue
303
+ per_language_ratios[language].append(ratio)
304
+
305
+ merge = [
306
+ (
307
+ language,
308
+ round(
309
+ sum(per_language_ratios[language]) / len(per_language_ratios[language]),
310
+ 4,
311
+ ),
312
+ )
313
+ for language in per_language_ratios
314
+ ]
315
+
316
+ return sorted(merge, key=lambda x: x[1], reverse=True)
317
+
318
+
319
+ def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
320
+ """
321
+ We shall NOT return "English—" in CoherenceMatches because it is an alternative
322
+ of "English". This function only keeps the best match and remove the em-dash in it.
323
+ """
324
+ index_results: Dict[str, List[float]] = dict()
325
+
326
+ for result in results:
327
+ language, ratio = result
328
+ no_em_name: str = language.replace("—", "")
329
+
330
+ if no_em_name not in index_results:
331
+ index_results[no_em_name] = []
332
+
333
+ index_results[no_em_name].append(ratio)
334
+
335
+ if any(len(index_results[e]) > 1 for e in index_results):
336
+ filtered_results: CoherenceMatches = []
337
+
338
+ for language in index_results:
339
+ filtered_results.append((language, max(index_results[language])))
340
+
341
+ return filtered_results
342
+
343
+ return results
344
+
345
+
346
+ @lru_cache(maxsize=2048)
347
+ def coherence_ratio(
348
+ decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
349
+ ) -> CoherenceMatches:
350
+ """
351
+ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
352
+ A layer = Character extraction by alphabets/ranges.
353
+ """
354
+
355
+ results: List[Tuple[str, float]] = []
356
+ ignore_non_latin: bool = False
357
+
358
+ sufficient_match_count: int = 0
359
+
360
+ lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
361
+ if "Latin Based" in lg_inclusion_list:
362
+ ignore_non_latin = True
363
+ lg_inclusion_list.remove("Latin Based")
364
+
365
+ for layer in alpha_unicode_split(decoded_sequence):
366
+ sequence_frequencies: TypeCounter[str] = Counter(layer)
367
+ most_common = sequence_frequencies.most_common()
368
+
369
+ character_count: int = sum(o for c, o in most_common)
370
+
371
+ if character_count <= TOO_SMALL_SEQUENCE:
372
+ continue
373
+
374
+ popular_character_ordered: List[str] = [c for c, o in most_common]
375
+
376
+ for language in lg_inclusion_list or alphabet_languages(
377
+ popular_character_ordered, ignore_non_latin
378
+ ):
379
+ ratio: float = characters_popularity_compare(
380
+ language, popular_character_ordered
381
+ )
382
+
383
+ if ratio < threshold:
384
+ continue
385
+ elif ratio >= 0.8:
386
+ sufficient_match_count += 1
387
+
388
+ results.append((language, round(ratio, 4)))
389
+
390
+ if sufficient_match_count >= 3:
391
+ break
392
+
393
+ return sorted(
394
+ filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
395
+ )
Dataset_Construction/projects/charset-normalizer/python/cli/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .__main__ import cli_detect, query_yes_no
2
+
3
+ __all__ = (
4
+ "cli_detect",
5
+ "query_yes_no",
6
+ )
Dataset_Construction/projects/charset-normalizer/python/cli/__main__.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import sys
3
+ from json import dumps
4
+ from os.path import abspath, basename, dirname, join, realpath
5
+ from platform import python_version
6
+ from typing import List, Optional
7
+ from unicodedata import unidata_version
8
+
9
+ import charset_normalizer.md as md_module
10
+ from charset_normalizer import from_fp
11
+ from charset_normalizer.models import CliDetectionResult
12
+ from charset_normalizer.version import __version__
13
+
14
+
15
+ def query_yes_no(question: str, default: str = "yes") -> bool:
16
+ """Ask a yes/no question via input() and return their answer.
17
+
18
+ "question" is a string that is presented to the user.
19
+ "default" is the presumed answer if the user just hits <Enter>.
20
+ It must be "yes" (the default), "no" or None (meaning
21
+ an answer is required of the user).
22
+
23
+ The "answer" return value is True for "yes" or False for "no".
24
+
25
+ Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input
26
+ """
27
+ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
28
+ if default is None:
29
+ prompt = " [y/n] "
30
+ elif default == "yes":
31
+ prompt = " [Y/n] "
32
+ elif default == "no":
33
+ prompt = " [y/N] "
34
+ else:
35
+ raise ValueError("invalid default answer: '%s'" % default)
36
+
37
+ while True:
38
+ sys.stdout.write(question + prompt)
39
+ choice = input().lower()
40
+ if default is not None and choice == "":
41
+ return valid[default]
42
+ elif choice in valid:
43
+ return valid[choice]
44
+ else:
45
+ sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n")
46
+
47
+
48
+ def cli_detect(argv: Optional[List[str]] = None) -> int:
49
+ """
50
+ CLI assistant using ARGV and ArgumentParser
51
+ :param argv:
52
+ :return: 0 if everything is fine, anything else equal trouble
53
+ """
54
+ parser = argparse.ArgumentParser(
55
+ description="The Real First Universal Charset Detector. "
56
+ "Discover originating encoding used on text file. "
57
+ "Normalize text to unicode."
58
+ )
59
+
60
+ parser.add_argument(
61
+ "files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed"
62
+ )
63
+ parser.add_argument(
64
+ "-v",
65
+ "--verbose",
66
+ action="store_true",
67
+ default=False,
68
+ dest="verbose",
69
+ help="Display complementary information about file if any. "
70
+ "Stdout will contain logs about the detection process.",
71
+ )
72
+ parser.add_argument(
73
+ "-a",
74
+ "--with-alternative",
75
+ action="store_true",
76
+ default=False,
77
+ dest="alternatives",
78
+ help="Output complementary possibilities if any. Top-level JSON WILL be a list.",
79
+ )
80
+ parser.add_argument(
81
+ "-n",
82
+ "--normalize",
83
+ action="store_true",
84
+ default=False,
85
+ dest="normalize",
86
+ help="Permit to normalize input file. If not set, program does not write anything.",
87
+ )
88
+ parser.add_argument(
89
+ "-m",
90
+ "--minimal",
91
+ action="store_true",
92
+ default=False,
93
+ dest="minimal",
94
+ help="Only output the charset detected to STDOUT. Disabling JSON output.",
95
+ )
96
+ parser.add_argument(
97
+ "-r",
98
+ "--replace",
99
+ action="store_true",
100
+ default=False,
101
+ dest="replace",
102
+ help="Replace file when trying to normalize it instead of creating a new one.",
103
+ )
104
+ parser.add_argument(
105
+ "-f",
106
+ "--force",
107
+ action="store_true",
108
+ default=False,
109
+ dest="force",
110
+ help="Replace file without asking if you are sure, use this flag with caution.",
111
+ )
112
+ parser.add_argument(
113
+ "-t",
114
+ "--threshold",
115
+ action="store",
116
+ default=0.2,
117
+ type=float,
118
+ dest="threshold",
119
+ help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.",
120
+ )
121
+ parser.add_argument(
122
+ "--version",
123
+ action="version",
124
+ version="Charset-Normalizer {} - Python {} - Unicode {} - SpeedUp {}".format(
125
+ __version__,
126
+ python_version(),
127
+ unidata_version,
128
+ "OFF" if md_module.__file__.lower().endswith(".py") else "ON",
129
+ ),
130
+ help="Show version information and exit.",
131
+ )
132
+
133
+ args = parser.parse_args(argv)
134
+
135
+ if args.replace is True and args.normalize is False:
136
+ print("Use --replace in addition of --normalize only.", file=sys.stderr)
137
+ return 1
138
+
139
+ if args.force is True and args.replace is False:
140
+ print("Use --force in addition of --replace only.", file=sys.stderr)
141
+ return 1
142
+
143
+ if args.threshold < 0.0 or args.threshold > 1.0:
144
+ print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr)
145
+ return 1
146
+
147
+ x_ = []
148
+
149
+ for my_file in args.files:
150
+ matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose)
151
+
152
+ best_guess = matches.best()
153
+
154
+ if best_guess is None:
155
+ print(
156
+ 'Unable to identify originating encoding for "{}". {}'.format(
157
+ my_file.name,
158
+ "Maybe try increasing maximum amount of chaos."
159
+ if args.threshold < 1.0
160
+ else "",
161
+ ),
162
+ file=sys.stderr,
163
+ )
164
+ x_.append(
165
+ CliDetectionResult(
166
+ abspath(my_file.name),
167
+ None,
168
+ [],
169
+ [],
170
+ "Unknown",
171
+ [],
172
+ False,
173
+ 1.0,
174
+ 0.0,
175
+ None,
176
+ True,
177
+ )
178
+ )
179
+ else:
180
+ x_.append(
181
+ CliDetectionResult(
182
+ abspath(my_file.name),
183
+ best_guess.encoding,
184
+ best_guess.encoding_aliases,
185
+ [
186
+ cp
187
+ for cp in best_guess.could_be_from_charset
188
+ if cp != best_guess.encoding
189
+ ],
190
+ best_guess.language,
191
+ best_guess.alphabets,
192
+ best_guess.bom,
193
+ best_guess.percent_chaos,
194
+ best_guess.percent_coherence,
195
+ None,
196
+ True,
197
+ )
198
+ )
199
+
200
+ if len(matches) > 1 and args.alternatives:
201
+ for el in matches:
202
+ if el != best_guess:
203
+ x_.append(
204
+ CliDetectionResult(
205
+ abspath(my_file.name),
206
+ el.encoding,
207
+ el.encoding_aliases,
208
+ [
209
+ cp
210
+ for cp in el.could_be_from_charset
211
+ if cp != el.encoding
212
+ ],
213
+ el.language,
214
+ el.alphabets,
215
+ el.bom,
216
+ el.percent_chaos,
217
+ el.percent_coherence,
218
+ None,
219
+ False,
220
+ )
221
+ )
222
+
223
+ if args.normalize is True:
224
+ if best_guess.encoding.startswith("utf") is True:
225
+ print(
226
+ '"{}" file does not need to be normalized, as it already came from unicode.'.format(
227
+ my_file.name
228
+ ),
229
+ file=sys.stderr,
230
+ )
231
+ if my_file.closed is False:
232
+ my_file.close()
233
+ continue
234
+
235
+ dir_path = dirname(realpath(my_file.name))
236
+ file_name = basename(realpath(my_file.name))
237
+
238
+ o_: List[str] = file_name.split(".")
239
+
240
+ if args.replace is False:
241
+ o_.insert(-1, best_guess.encoding)
242
+ if my_file.closed is False:
243
+ my_file.close()
244
+ elif (
245
+ args.force is False
246
+ and query_yes_no(
247
+ 'Are you sure to normalize "{}" by replacing it ?'.format(
248
+ my_file.name
249
+ ),
250
+ "no",
251
+ )
252
+ is False
253
+ ):
254
+ if my_file.closed is False:
255
+ my_file.close()
256
+ continue
257
+
258
+ try:
259
+ x_[0].unicode_path = join(dir_path, ".".join(o_))
260
+
261
+ with open(x_[0].unicode_path, "w", encoding="utf-8") as fp:
262
+ fp.write(str(best_guess))
263
+ except IOError as e:
264
+ print(str(e), file=sys.stderr)
265
+ if my_file.closed is False:
266
+ my_file.close()
267
+ return 2
268
+
269
+ if my_file.closed is False:
270
+ my_file.close()
271
+
272
+ if args.minimal is False:
273
+ print(
274
+ dumps(
275
+ [el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__,
276
+ ensure_ascii=True,
277
+ indent=4,
278
+ )
279
+ )
280
+ else:
281
+ for my_file in args.files:
282
+ print(
283
+ ", ".join(
284
+ [
285
+ el.encoding or "undefined"
286
+ for el in x_
287
+ if el.path == abspath(my_file.name)
288
+ ]
289
+ )
290
+ )
291
+
292
+ return 0
293
+
294
+
295
+ if __name__ == "__main__":
296
+ cli_detect()
Dataset_Construction/projects/charset-normalizer/python/constant.py ADDED
@@ -0,0 +1,1995 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
3
+ from encodings.aliases import aliases
4
+ from re import IGNORECASE, compile as re_compile
5
+ from typing import Dict, List, Set, Union
6
+
7
+ # Contain for each eligible encoding a list of/item bytes SIG/BOM
8
+ ENCODING_MARKS: Dict[str, Union[bytes, List[bytes]]] = {
9
+ "utf_8": BOM_UTF8,
10
+ "utf_7": [
11
+ b"\x2b\x2f\x76\x38",
12
+ b"\x2b\x2f\x76\x39",
13
+ b"\x2b\x2f\x76\x2b",
14
+ b"\x2b\x2f\x76\x2f",
15
+ b"\x2b\x2f\x76\x38\x2d",
16
+ ],
17
+ "gb18030": b"\x84\x31\x95\x33",
18
+ "utf_32": [BOM_UTF32_BE, BOM_UTF32_LE],
19
+ "utf_16": [BOM_UTF16_BE, BOM_UTF16_LE],
20
+ }
21
+
22
+ TOO_SMALL_SEQUENCE: int = 32
23
+ TOO_BIG_SEQUENCE: int = int(10e6)
24
+
25
+ UTF8_MAXIMAL_ALLOCATION: int = 1_112_064
26
+
27
+ # Up-to-date Unicode ucd/15.0.0
28
+ UNICODE_RANGES_COMBINED: Dict[str, range] = {
29
+ "Control character": range(32),
30
+ "Basic Latin": range(32, 128),
31
+ "Latin-1 Supplement": range(128, 256),
32
+ "Latin Extended-A": range(256, 384),
33
+ "Latin Extended-B": range(384, 592),
34
+ "IPA Extensions": range(592, 688),
35
+ "Spacing Modifier Letters": range(688, 768),
36
+ "Combining Diacritical Marks": range(768, 880),
37
+ "Greek and Coptic": range(880, 1024),
38
+ "Cyrillic": range(1024, 1280),
39
+ "Cyrillic Supplement": range(1280, 1328),
40
+ "Armenian": range(1328, 1424),
41
+ "Hebrew": range(1424, 1536),
42
+ "Arabic": range(1536, 1792),
43
+ "Syriac": range(1792, 1872),
44
+ "Arabic Supplement": range(1872, 1920),
45
+ "Thaana": range(1920, 1984),
46
+ "NKo": range(1984, 2048),
47
+ "Samaritan": range(2048, 2112),
48
+ "Mandaic": range(2112, 2144),
49
+ "Syriac Supplement": range(2144, 2160),
50
+ "Arabic Extended-B": range(2160, 2208),
51
+ "Arabic Extended-A": range(2208, 2304),
52
+ "Devanagari": range(2304, 2432),
53
+ "Bengali": range(2432, 2560),
54
+ "Gurmukhi": range(2560, 2688),
55
+ "Gujarati": range(2688, 2816),
56
+ "Oriya": range(2816, 2944),
57
+ "Tamil": range(2944, 3072),
58
+ "Telugu": range(3072, 3200),
59
+ "Kannada": range(3200, 3328),
60
+ "Malayalam": range(3328, 3456),
61
+ "Sinhala": range(3456, 3584),
62
+ "Thai": range(3584, 3712),
63
+ "Lao": range(3712, 3840),
64
+ "Tibetan": range(3840, 4096),
65
+ "Myanmar": range(4096, 4256),
66
+ "Georgian": range(4256, 4352),
67
+ "Hangul Jamo": range(4352, 4608),
68
+ "Ethiopic": range(4608, 4992),
69
+ "Ethiopic Supplement": range(4992, 5024),
70
+ "Cherokee": range(5024, 5120),
71
+ "Unified Canadian Aboriginal Syllabics": range(5120, 5760),
72
+ "Ogham": range(5760, 5792),
73
+ "Runic": range(5792, 5888),
74
+ "Tagalog": range(5888, 5920),
75
+ "Hanunoo": range(5920, 5952),
76
+ "Buhid": range(5952, 5984),
77
+ "Tagbanwa": range(5984, 6016),
78
+ "Khmer": range(6016, 6144),
79
+ "Mongolian": range(6144, 6320),
80
+ "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6400),
81
+ "Limbu": range(6400, 6480),
82
+ "Tai Le": range(6480, 6528),
83
+ "New Tai Lue": range(6528, 6624),
84
+ "Khmer Symbols": range(6624, 6656),
85
+ "Buginese": range(6656, 6688),
86
+ "Tai Tham": range(6688, 6832),
87
+ "Combining Diacritical Marks Extended": range(6832, 6912),
88
+ "Balinese": range(6912, 7040),
89
+ "Sundanese": range(7040, 7104),
90
+ "Batak": range(7104, 7168),
91
+ "Lepcha": range(7168, 7248),
92
+ "Ol Chiki": range(7248, 7296),
93
+ "Cyrillic Extended-C": range(7296, 7312),
94
+ "Georgian Extended": range(7312, 7360),
95
+ "Sundanese Supplement": range(7360, 7376),
96
+ "Vedic Extensions": range(7376, 7424),
97
+ "Phonetic Extensions": range(7424, 7552),
98
+ "Phonetic Extensions Supplement": range(7552, 7616),
99
+ "Combining Diacritical Marks Supplement": range(7616, 7680),
100
+ "Latin Extended Additional": range(7680, 7936),
101
+ "Greek Extended": range(7936, 8192),
102
+ "General Punctuation": range(8192, 8304),
103
+ "Superscripts and Subscripts": range(8304, 8352),
104
+ "Currency Symbols": range(8352, 8400),
105
+ "Combining Diacritical Marks for Symbols": range(8400, 8448),
106
+ "Letterlike Symbols": range(8448, 8528),
107
+ "Number Forms": range(8528, 8592),
108
+ "Arrows": range(8592, 8704),
109
+ "Mathematical Operators": range(8704, 8960),
110
+ "Miscellaneous Technical": range(8960, 9216),
111
+ "Control Pictures": range(9216, 9280),
112
+ "Optical Character Recognition": range(9280, 9312),
113
+ "Enclosed Alphanumerics": range(9312, 9472),
114
+ "Box Drawing": range(9472, 9600),
115
+ "Block Elements": range(9600, 9632),
116
+ "Geometric Shapes": range(9632, 9728),
117
+ "Miscellaneous Symbols": range(9728, 9984),
118
+ "Dingbats": range(9984, 10176),
119
+ "Miscellaneous Mathematical Symbols-A": range(10176, 10224),
120
+ "Supplemental Arrows-A": range(10224, 10240),
121
+ "Braille Patterns": range(10240, 10496),
122
+ "Supplemental Arrows-B": range(10496, 10624),
123
+ "Miscellaneous Mathematical Symbols-B": range(10624, 10752),
124
+ "Supplemental Mathematical Operators": range(10752, 11008),
125
+ "Miscellaneous Symbols and Arrows": range(11008, 11264),
126
+ "Glagolitic": range(11264, 11360),
127
+ "Latin Extended-C": range(11360, 11392),
128
+ "Coptic": range(11392, 11520),
129
+ "Georgian Supplement": range(11520, 11568),
130
+ "Tifinagh": range(11568, 11648),
131
+ "Ethiopic Extended": range(11648, 11744),
132
+ "Cyrillic Extended-A": range(11744, 11776),
133
+ "Supplemental Punctuation": range(11776, 11904),
134
+ "CJK Radicals Supplement": range(11904, 12032),
135
+ "Kangxi Radicals": range(12032, 12256),
136
+ "Ideographic Description Characters": range(12272, 12288),
137
+ "CJK Symbols and Punctuation": range(12288, 12352),
138
+ "Hiragana": range(12352, 12448),
139
+ "Katakana": range(12448, 12544),
140
+ "Bopomofo": range(12544, 12592),
141
+ "Hangul Compatibility Jamo": range(12592, 12688),
142
+ "Kanbun": range(12688, 12704),
143
+ "Bopomofo Extended": range(12704, 12736),
144
+ "CJK Strokes": range(12736, 12784),
145
+ "Katakana Phonetic Extensions": range(12784, 12800),
146
+ "Enclosed CJK Letters and Months": range(12800, 13056),
147
+ "CJK Compatibility": range(13056, 13312),
148
+ "CJK Unified Ideographs Extension A": range(13312, 19904),
149
+ "Yijing Hexagram Symbols": range(19904, 19968),
150
+ "CJK Unified Ideographs": range(19968, 40960),
151
+ "Yi Syllables": range(40960, 42128),
152
+ "Yi Radicals": range(42128, 42192),
153
+ "Lisu": range(42192, 42240),
154
+ "Vai": range(42240, 42560),
155
+ "Cyrillic Extended-B": range(42560, 42656),
156
+ "Bamum": range(42656, 42752),
157
+ "Modifier Tone Letters": range(42752, 42784),
158
+ "Latin Extended-D": range(42784, 43008),
159
+ "Syloti Nagri": range(43008, 43056),
160
+ "Common Indic Number Forms": range(43056, 43072),
161
+ "Phags-pa": range(43072, 43136),
162
+ "Saurashtra": range(43136, 43232),
163
+ "Devanagari Extended": range(43232, 43264),
164
+ "Kayah Li": range(43264, 43312),
165
+ "Rejang": range(43312, 43360),
166
+ "Hangul Jamo Extended-A": range(43360, 43392),
167
+ "Javanese": range(43392, 43488),
168
+ "Myanmar Extended-B": range(43488, 43520),
169
+ "Cham": range(43520, 43616),
170
+ "Myanmar Extended-A": range(43616, 43648),
171
+ "Tai Viet": range(43648, 43744),
172
+ "Meetei Mayek Extensions": range(43744, 43776),
173
+ "Ethiopic Extended-A": range(43776, 43824),
174
+ "Latin Extended-E": range(43824, 43888),
175
+ "Cherokee Supplement": range(43888, 43968),
176
+ "Meetei Mayek": range(43968, 44032),
177
+ "Hangul Syllables": range(44032, 55216),
178
+ "Hangul Jamo Extended-B": range(55216, 55296),
179
+ "High Surrogates": range(55296, 56192),
180
+ "High Private Use Surrogates": range(56192, 56320),
181
+ "Low Surrogates": range(56320, 57344),
182
+ "Private Use Area": range(57344, 63744),
183
+ "CJK Compatibility Ideographs": range(63744, 64256),
184
+ "Alphabetic Presentation Forms": range(64256, 64336),
185
+ "Arabic Presentation Forms-A": range(64336, 65024),
186
+ "Variation Selectors": range(65024, 65040),
187
+ "Vertical Forms": range(65040, 65056),
188
+ "Combining Half Marks": range(65056, 65072),
189
+ "CJK Compatibility Forms": range(65072, 65104),
190
+ "Small Form Variants": range(65104, 65136),
191
+ "Arabic Presentation Forms-B": range(65136, 65280),
192
+ "Halfwidth and Fullwidth Forms": range(65280, 65520),
193
+ "Specials": range(65520, 65536),
194
+ "Linear B Syllabary": range(65536, 65664),
195
+ "Linear B Ideograms": range(65664, 65792),
196
+ "Aegean Numbers": range(65792, 65856),
197
+ "Ancient Greek Numbers": range(65856, 65936),
198
+ "Ancient Symbols": range(65936, 66000),
199
+ "Phaistos Disc": range(66000, 66048),
200
+ "Lycian": range(66176, 66208),
201
+ "Carian": range(66208, 66272),
202
+ "Coptic Epact Numbers": range(66272, 66304),
203
+ "Old Italic": range(66304, 66352),
204
+ "Gothic": range(66352, 66384),
205
+ "Old Permic": range(66384, 66432),
206
+ "Ugaritic": range(66432, 66464),
207
+ "Old Persian": range(66464, 66528),
208
+ "Deseret": range(66560, 66640),
209
+ "Shavian": range(66640, 66688),
210
+ "Osmanya": range(66688, 66736),
211
+ "Osage": range(66736, 66816),
212
+ "Elbasan": range(66816, 66864),
213
+ "Caucasian Albanian": range(66864, 66928),
214
+ "Vithkuqi": range(66928, 67008),
215
+ "Linear A": range(67072, 67456),
216
+ "Latin Extended-F": range(67456, 67520),
217
+ "Cypriot Syllabary": range(67584, 67648),
218
+ "Imperial Aramaic": range(67648, 67680),
219
+ "Palmyrene": range(67680, 67712),
220
+ "Nabataean": range(67712, 67760),
221
+ "Hatran": range(67808, 67840),
222
+ "Phoenician": range(67840, 67872),
223
+ "Lydian": range(67872, 67904),
224
+ "Meroitic Hieroglyphs": range(67968, 68000),
225
+ "Meroitic Cursive": range(68000, 68096),
226
+ "Kharoshthi": range(68096, 68192),
227
+ "Old South Arabian": range(68192, 68224),
228
+ "Old North Arabian": range(68224, 68256),
229
+ "Manichaean": range(68288, 68352),
230
+ "Avestan": range(68352, 68416),
231
+ "Inscriptional Parthian": range(68416, 68448),
232
+ "Inscriptional Pahlavi": range(68448, 68480),
233
+ "Psalter Pahlavi": range(68480, 68528),
234
+ "Old Turkic": range(68608, 68688),
235
+ "Old Hungarian": range(68736, 68864),
236
+ "Hanifi Rohingya": range(68864, 68928),
237
+ "Rumi Numeral Symbols": range(69216, 69248),
238
+ "Yezidi": range(69248, 69312),
239
+ "Arabic Extended-C": range(69312, 69376),
240
+ "Old Sogdian": range(69376, 69424),
241
+ "Sogdian": range(69424, 69488),
242
+ "Old Uyghur": range(69488, 69552),
243
+ "Chorasmian": range(69552, 69600),
244
+ "Elymaic": range(69600, 69632),
245
+ "Brahmi": range(69632, 69760),
246
+ "Kaithi": range(69760, 69840),
247
+ "Sora Sompeng": range(69840, 69888),
248
+ "Chakma": range(69888, 69968),
249
+ "Mahajani": range(69968, 70016),
250
+ "Sharada": range(70016, 70112),
251
+ "Sinhala Archaic Numbers": range(70112, 70144),
252
+ "Khojki": range(70144, 70224),
253
+ "Multani": range(70272, 70320),
254
+ "Khudawadi": range(70320, 70400),
255
+ "Grantha": range(70400, 70528),
256
+ "Newa": range(70656, 70784),
257
+ "Tirhuta": range(70784, 70880),
258
+ "Siddham": range(71040, 71168),
259
+ "Modi": range(71168, 71264),
260
+ "Mongolian Supplement": range(71264, 71296),
261
+ "Takri": range(71296, 71376),
262
+ "Ahom": range(71424, 71504),
263
+ "Dogra": range(71680, 71760),
264
+ "Warang Citi": range(71840, 71936),
265
+ "Dives Akuru": range(71936, 72032),
266
+ "Nandinagari": range(72096, 72192),
267
+ "Zanabazar Square": range(72192, 72272),
268
+ "Soyombo": range(72272, 72368),
269
+ "Unified Canadian Aboriginal Syllabics Extended-A": range(72368, 72384),
270
+ "Pau Cin Hau": range(72384, 72448),
271
+ "Devanagari Extended-A": range(72448, 72544),
272
+ "Bhaiksuki": range(72704, 72816),
273
+ "Marchen": range(72816, 72896),
274
+ "Masaram Gondi": range(72960, 73056),
275
+ "Gunjala Gondi": range(73056, 73136),
276
+ "Makasar": range(73440, 73472),
277
+ "Kawi": range(73472, 73568),
278
+ "Lisu Supplement": range(73648, 73664),
279
+ "Tamil Supplement": range(73664, 73728),
280
+ "Cuneiform": range(73728, 74752),
281
+ "Cuneiform Numbers and Punctuation": range(74752, 74880),
282
+ "Early Dynastic Cuneiform": range(74880, 75088),
283
+ "Cypro-Minoan": range(77712, 77824),
284
+ "Egyptian Hieroglyphs": range(77824, 78896),
285
+ "Egyptian Hieroglyph Format Controls": range(78896, 78944),
286
+ "Anatolian Hieroglyphs": range(82944, 83584),
287
+ "Bamum Supplement": range(92160, 92736),
288
+ "Mro": range(92736, 92784),
289
+ "Tangsa": range(92784, 92880),
290
+ "Bassa Vah": range(92880, 92928),
291
+ "Pahawh Hmong": range(92928, 93072),
292
+ "Medefaidrin": range(93760, 93856),
293
+ "Miao": range(93952, 94112),
294
+ "Ideographic Symbols and Punctuation": range(94176, 94208),
295
+ "Tangut": range(94208, 100352),
296
+ "Tangut Components": range(100352, 101120),
297
+ "Khitan Small Script": range(101120, 101632),
298
+ "Tangut Supplement": range(101632, 101760),
299
+ "Kana Extended-B": range(110576, 110592),
300
+ "Kana Supplement": range(110592, 110848),
301
+ "Kana Extended-A": range(110848, 110896),
302
+ "Small Kana Extension": range(110896, 110960),
303
+ "Nushu": range(110960, 111360),
304
+ "Duployan": range(113664, 113824),
305
+ "Shorthand Format Controls": range(113824, 113840),
306
+ "Znamenny Musical Notation": range(118528, 118736),
307
+ "Byzantine Musical Symbols": range(118784, 119040),
308
+ "Musical Symbols": range(119040, 119296),
309
+ "Ancient Greek Musical Notation": range(119296, 119376),
310
+ "Kaktovik Numerals": range(119488, 119520),
311
+ "Mayan Numerals": range(119520, 119552),
312
+ "Tai Xuan Jing Symbols": range(119552, 119648),
313
+ "Counting Rod Numerals": range(119648, 119680),
314
+ "Mathematical Alphanumeric Symbols": range(119808, 120832),
315
+ "Sutton SignWriting": range(120832, 121520),
316
+ "Latin Extended-G": range(122624, 122880),
317
+ "Glagolitic Supplement": range(122880, 122928),
318
+ "Cyrillic Extended-D": range(122928, 123024),
319
+ "Nyiakeng Puachue Hmong": range(123136, 123216),
320
+ "Toto": range(123536, 123584),
321
+ "Wancho": range(123584, 123648),
322
+ "Nag Mundari": range(124112, 124160),
323
+ "Ethiopic Extended-B": range(124896, 124928),
324
+ "Mende Kikakui": range(124928, 125152),
325
+ "Adlam": range(125184, 125280),
326
+ "Indic Siyaq Numbers": range(126064, 126144),
327
+ "Ottoman Siyaq Numbers": range(126208, 126288),
328
+ "Arabic Mathematical Alphabetic Symbols": range(126464, 126720),
329
+ "Mahjong Tiles": range(126976, 127024),
330
+ "Domino Tiles": range(127024, 127136),
331
+ "Playing Cards": range(127136, 127232),
332
+ "Enclosed Alphanumeric Supplement": range(127232, 127488),
333
+ "Enclosed Ideographic Supplement": range(127488, 127744),
334
+ "Miscellaneous Symbols and Pictographs": range(127744, 128512),
335
+ "Emoticons range(Emoji)": range(128512, 128592),
336
+ "Ornamental Dingbats": range(128592, 128640),
337
+ "Transport and Map Symbols": range(128640, 128768),
338
+ "Alchemical Symbols": range(128768, 128896),
339
+ "Geometric Shapes Extended": range(128896, 129024),
340
+ "Supplemental Arrows-C": range(129024, 129280),
341
+ "Supplemental Symbols and Pictographs": range(129280, 129536),
342
+ "Chess Symbols": range(129536, 129648),
343
+ "Symbols and Pictographs Extended-A": range(129648, 129792),
344
+ "Symbols for Legacy Computing": range(129792, 130048),
345
+ "CJK Unified Ideographs Extension B": range(131072, 173792),
346
+ "CJK Unified Ideographs Extension C": range(173824, 177984),
347
+ "CJK Unified Ideographs Extension D": range(177984, 178208),
348
+ "CJK Unified Ideographs Extension E": range(178208, 183984),
349
+ "CJK Unified Ideographs Extension F": range(183984, 191472),
350
+ "CJK Compatibility Ideographs Supplement": range(194560, 195104),
351
+ "CJK Unified Ideographs Extension G": range(196608, 201552),
352
+ "CJK Unified Ideographs Extension H": range(201552, 205744),
353
+ "Tags": range(917504, 917632),
354
+ "Variation Selectors Supplement": range(917760, 918000),
355
+ "Supplementary Private Use Area-A": range(983040, 1048576),
356
+ "Supplementary Private Use Area-B": range(1048576, 1114112),
357
+ }
358
+
359
+
360
+ UNICODE_SECONDARY_RANGE_KEYWORD: List[str] = [
361
+ "Supplement",
362
+ "Extended",
363
+ "Extensions",
364
+ "Modifier",
365
+ "Marks",
366
+ "Punctuation",
367
+ "Symbols",
368
+ "Forms",
369
+ "Operators",
370
+ "Miscellaneous",
371
+ "Drawing",
372
+ "Block",
373
+ "Shapes",
374
+ "Supplemental",
375
+ "Tags",
376
+ ]
377
+
378
+ RE_POSSIBLE_ENCODING_INDICATION = re_compile(
379
+ r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
380
+ IGNORECASE,
381
+ )
382
+
383
+ IANA_NO_ALIASES = [
384
+ "cp720",
385
+ "cp737",
386
+ "cp856",
387
+ "cp874",
388
+ "cp875",
389
+ "cp1006",
390
+ "koi8_r",
391
+ "koi8_t",
392
+ "koi8_u",
393
+ ]
394
+
395
+ IANA_SUPPORTED: List[str] = sorted(
396
+ filter(
397
+ lambda x: x.endswith("_codec") is False
398
+ and x not in {"rot_13", "tactis", "mbcs"},
399
+ list(set(aliases.values())) + IANA_NO_ALIASES,
400
+ )
401
+ )
402
+
403
+ IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED)
404
+
405
+ # pre-computed code page that are similar using the function cp_similarity.
406
+ IANA_SUPPORTED_SIMILAR: Dict[str, List[str]] = {
407
+ "cp037": ["cp1026", "cp1140", "cp273", "cp500"],
408
+ "cp1026": ["cp037", "cp1140", "cp273", "cp500"],
409
+ "cp1125": ["cp866"],
410
+ "cp1140": ["cp037", "cp1026", "cp273", "cp500"],
411
+ "cp1250": ["iso8859_2"],
412
+ "cp1251": ["kz1048", "ptcp154"],
413
+ "cp1252": ["iso8859_15", "iso8859_9", "latin_1"],
414
+ "cp1253": ["iso8859_7"],
415
+ "cp1254": ["iso8859_15", "iso8859_9", "latin_1"],
416
+ "cp1257": ["iso8859_13"],
417
+ "cp273": ["cp037", "cp1026", "cp1140", "cp500"],
418
+ "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"],
419
+ "cp500": ["cp037", "cp1026", "cp1140", "cp273"],
420
+ "cp850": ["cp437", "cp857", "cp858", "cp865"],
421
+ "cp857": ["cp850", "cp858", "cp865"],
422
+ "cp858": ["cp437", "cp850", "cp857", "cp865"],
423
+ "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"],
424
+ "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"],
425
+ "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"],
426
+ "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"],
427
+ "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"],
428
+ "cp866": ["cp1125"],
429
+ "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"],
430
+ "iso8859_11": ["tis_620"],
431
+ "iso8859_13": ["cp1257"],
432
+ "iso8859_14": [
433
+ "iso8859_10",
434
+ "iso8859_15",
435
+ "iso8859_16",
436
+ "iso8859_3",
437
+ "iso8859_9",
438
+ "latin_1",
439
+ ],
440
+ "iso8859_15": [
441
+ "cp1252",
442
+ "cp1254",
443
+ "iso8859_10",
444
+ "iso8859_14",
445
+ "iso8859_16",
446
+ "iso8859_3",
447
+ "iso8859_9",
448
+ "latin_1",
449
+ ],
450
+ "iso8859_16": [
451
+ "iso8859_14",
452
+ "iso8859_15",
453
+ "iso8859_2",
454
+ "iso8859_3",
455
+ "iso8859_9",
456
+ "latin_1",
457
+ ],
458
+ "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"],
459
+ "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"],
460
+ "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"],
461
+ "iso8859_7": ["cp1253"],
462
+ "iso8859_9": [
463
+ "cp1252",
464
+ "cp1254",
465
+ "cp1258",
466
+ "iso8859_10",
467
+ "iso8859_14",
468
+ "iso8859_15",
469
+ "iso8859_16",
470
+ "iso8859_3",
471
+ "iso8859_4",
472
+ "latin_1",
473
+ ],
474
+ "kz1048": ["cp1251", "ptcp154"],
475
+ "latin_1": [
476
+ "cp1252",
477
+ "cp1254",
478
+ "cp1258",
479
+ "iso8859_10",
480
+ "iso8859_14",
481
+ "iso8859_15",
482
+ "iso8859_16",
483
+ "iso8859_3",
484
+ "iso8859_4",
485
+ "iso8859_9",
486
+ ],
487
+ "mac_iceland": ["mac_roman", "mac_turkish"],
488
+ "mac_roman": ["mac_iceland", "mac_turkish"],
489
+ "mac_turkish": ["mac_iceland", "mac_roman"],
490
+ "ptcp154": ["cp1251", "kz1048"],
491
+ "tis_620": ["iso8859_11"],
492
+ }
493
+
494
+
495
+ CHARDET_CORRESPONDENCE: Dict[str, str] = {
496
+ "iso2022_kr": "ISO-2022-KR",
497
+ "iso2022_jp": "ISO-2022-JP",
498
+ "euc_kr": "EUC-KR",
499
+ "tis_620": "TIS-620",
500
+ "utf_32": "UTF-32",
501
+ "euc_jp": "EUC-JP",
502
+ "koi8_r": "KOI8-R",
503
+ "iso8859_1": "ISO-8859-1",
504
+ "iso8859_2": "ISO-8859-2",
505
+ "iso8859_5": "ISO-8859-5",
506
+ "iso8859_6": "ISO-8859-6",
507
+ "iso8859_7": "ISO-8859-7",
508
+ "iso8859_8": "ISO-8859-8",
509
+ "utf_16": "UTF-16",
510
+ "cp855": "IBM855",
511
+ "mac_cyrillic": "MacCyrillic",
512
+ "gb2312": "GB2312",
513
+ "gb18030": "GB18030",
514
+ "cp932": "CP932",
515
+ "cp866": "IBM866",
516
+ "utf_8": "utf-8",
517
+ "utf_8_sig": "UTF-8-SIG",
518
+ "shift_jis": "SHIFT_JIS",
519
+ "big5": "Big5",
520
+ "cp1250": "windows-1250",
521
+ "cp1251": "windows-1251",
522
+ "cp1252": "Windows-1252",
523
+ "cp1253": "windows-1253",
524
+ "cp1255": "windows-1255",
525
+ "cp1256": "windows-1256",
526
+ "cp1254": "Windows-1254",
527
+ "cp949": "CP949",
528
+ }
529
+
530
+
531
+ COMMON_SAFE_ASCII_CHARACTERS: Set[str] = {
532
+ "<",
533
+ ">",
534
+ "=",
535
+ ":",
536
+ "/",
537
+ "&",
538
+ ";",
539
+ "{",
540
+ "}",
541
+ "[",
542
+ "]",
543
+ ",",
544
+ "|",
545
+ '"',
546
+ "-",
547
+ }
548
+
549
+
550
+ KO_NAMES: Set[str] = {"johab", "cp949", "euc_kr"}
551
+ ZH_NAMES: Set[str] = {"big5", "cp950", "big5hkscs", "hz"}
552
+
553
+ # Logging LEVEL below DEBUG
554
+ TRACE: int = 5
555
+
556
+
557
+ # Language label that contain the em dash "—"
558
+ # character are to be considered alternative seq to origin
559
+ FREQUENCIES: Dict[str, List[str]] = {
560
+ "English": [
561
+ "e",
562
+ "a",
563
+ "t",
564
+ "i",
565
+ "o",
566
+ "n",
567
+ "s",
568
+ "r",
569
+ "h",
570
+ "l",
571
+ "d",
572
+ "c",
573
+ "u",
574
+ "m",
575
+ "f",
576
+ "p",
577
+ "g",
578
+ "w",
579
+ "y",
580
+ "b",
581
+ "v",
582
+ "k",
583
+ "x",
584
+ "j",
585
+ "z",
586
+ "q",
587
+ ],
588
+ "English—": [
589
+ "e",
590
+ "a",
591
+ "t",
592
+ "i",
593
+ "o",
594
+ "n",
595
+ "s",
596
+ "r",
597
+ "h",
598
+ "l",
599
+ "d",
600
+ "c",
601
+ "m",
602
+ "u",
603
+ "f",
604
+ "p",
605
+ "g",
606
+ "w",
607
+ "b",
608
+ "y",
609
+ "v",
610
+ "k",
611
+ "j",
612
+ "x",
613
+ "z",
614
+ "q",
615
+ ],
616
+ "German": [
617
+ "e",
618
+ "n",
619
+ "i",
620
+ "r",
621
+ "s",
622
+ "t",
623
+ "a",
624
+ "d",
625
+ "h",
626
+ "u",
627
+ "l",
628
+ "g",
629
+ "o",
630
+ "c",
631
+ "m",
632
+ "b",
633
+ "f",
634
+ "k",
635
+ "w",
636
+ "z",
637
+ "p",
638
+ "v",
639
+ "ü",
640
+ "ä",
641
+ "ö",
642
+ "j",
643
+ ],
644
+ "French": [
645
+ "e",
646
+ "a",
647
+ "s",
648
+ "n",
649
+ "i",
650
+ "t",
651
+ "r",
652
+ "l",
653
+ "u",
654
+ "o",
655
+ "d",
656
+ "c",
657
+ "p",
658
+ "m",
659
+ "é",
660
+ "v",
661
+ "g",
662
+ "f",
663
+ "b",
664
+ "h",
665
+ "q",
666
+ "à",
667
+ "x",
668
+ "è",
669
+ "y",
670
+ "j",
671
+ ],
672
+ "Dutch": [
673
+ "e",
674
+ "n",
675
+ "a",
676
+ "i",
677
+ "r",
678
+ "t",
679
+ "o",
680
+ "d",
681
+ "s",
682
+ "l",
683
+ "g",
684
+ "h",
685
+ "v",
686
+ "m",
687
+ "u",
688
+ "k",
689
+ "c",
690
+ "p",
691
+ "b",
692
+ "w",
693
+ "j",
694
+ "z",
695
+ "f",
696
+ "y",
697
+ "x",
698
+ "ë",
699
+ ],
700
+ "Italian": [
701
+ "e",
702
+ "i",
703
+ "a",
704
+ "o",
705
+ "n",
706
+ "l",
707
+ "t",
708
+ "r",
709
+ "s",
710
+ "c",
711
+ "d",
712
+ "u",
713
+ "p",
714
+ "m",
715
+ "g",
716
+ "v",
717
+ "f",
718
+ "b",
719
+ "z",
720
+ "h",
721
+ "q",
722
+ "è",
723
+ "à",
724
+ "k",
725
+ "y",
726
+ "ò",
727
+ ],
728
+ "Polish": [
729
+ "a",
730
+ "i",
731
+ "o",
732
+ "e",
733
+ "n",
734
+ "r",
735
+ "z",
736
+ "w",
737
+ "s",
738
+ "c",
739
+ "t",
740
+ "k",
741
+ "y",
742
+ "d",
743
+ "p",
744
+ "m",
745
+ "u",
746
+ "l",
747
+ "j",
748
+ "ł",
749
+ "g",
750
+ "b",
751
+ "h",
752
+ "ą",
753
+ "ę",
754
+ "ó",
755
+ ],
756
+ "Spanish": [
757
+ "e",
758
+ "a",
759
+ "o",
760
+ "n",
761
+ "s",
762
+ "r",
763
+ "i",
764
+ "l",
765
+ "d",
766
+ "t",
767
+ "c",
768
+ "u",
769
+ "m",
770
+ "p",
771
+ "b",
772
+ "g",
773
+ "v",
774
+ "f",
775
+ "y",
776
+ "ó",
777
+ "h",
778
+ "q",
779
+ "í",
780
+ "j",
781
+ "z",
782
+ "á",
783
+ ],
784
+ "Russian": [
785
+ "о",
786
+ "а",
787
+ "е",
788
+ "и",
789
+ "н",
790
+ "с",
791
+ "т",
792
+ "р",
793
+ "в",
794
+ "л",
795
+ "к",
796
+ "м",
797
+ "д",
798
+ "п",
799
+ "у",
800
+ "г",
801
+ "я",
802
+ "ы",
803
+ "з",
804
+ "б",
805
+ "й",
806
+ "ь",
807
+ "ч",
808
+ "х",
809
+ "ж",
810
+ "ц",
811
+ ],
812
+ # Jap-Kanji
813
+ "Japanese": [
814
+ "人",
815
+ "一",
816
+ "大",
817
+ "亅",
818
+ "丁",
819
+ "丨",
820
+ "竹",
821
+ "笑",
822
+ "口",
823
+ "日",
824
+ "今",
825
+ "二",
826
+ "彳",
827
+ "行",
828
+ "十",
829
+ "土",
830
+ "丶",
831
+ "寸",
832
+ "寺",
833
+ "時",
834
+ "乙",
835
+ "丿",
836
+ "乂",
837
+ "气",
838
+ "気",
839
+ "冂",
840
+ "巾",
841
+ "亠",
842
+ "市",
843
+ "目",
844
+ "儿",
845
+ "見",
846
+ "八",
847
+ "小",
848
+ "凵",
849
+ "県",
850
+ "月",
851
+ "彐",
852
+ "門",
853
+ "間",
854
+ "木",
855
+ "東",
856
+ "山",
857
+ "出",
858
+ "本",
859
+ "中",
860
+ "刀",
861
+ "分",
862
+ "耳",
863
+ "又",
864
+ "取",
865
+ "最",
866
+ "言",
867
+ "田",
868
+ "心",
869
+ "思",
870
+ "刂",
871
+ "前",
872
+ "京",
873
+ "尹",
874
+ "事",
875
+ "生",
876
+ "厶",
877
+ "云",
878
+ "会",
879
+ "未",
880
+ "来",
881
+ "白",
882
+ "冫",
883
+ "楽",
884
+ "灬",
885
+ "馬",
886
+ "尸",
887
+ "尺",
888
+ "駅",
889
+ "明",
890
+ "耂",
891
+ "者",
892
+ "了",
893
+ "阝",
894
+ "都",
895
+ "高",
896
+ "卜",
897
+ "占",
898
+ "厂",
899
+ "广",
900
+ "店",
901
+ "子",
902
+ "申",
903
+ "奄",
904
+ "亻",
905
+ "俺",
906
+ "上",
907
+ "方",
908
+ "冖",
909
+ "学",
910
+ "衣",
911
+ "艮",
912
+ "食",
913
+ "自",
914
+ ],
915
+ # Jap-Katakana
916
+ "Japanese—": [
917
+ "ー",
918
+ "ン",
919
+ "ス",
920
+ "・",
921
+ "ル",
922
+ "ト",
923
+ "リ",
924
+ "イ",
925
+ "ア",
926
+ "ラ",
927
+ "ッ",
928
+ "ク",
929
+ "ド",
930
+ "シ",
931
+ "レ",
932
+ "ジ",
933
+ "タ",
934
+ "フ",
935
+ "ロ",
936
+ "カ",
937
+ "テ",
938
+ "マ",
939
+ "ィ",
940
+ "グ",
941
+ "バ",
942
+ "ム",
943
+ "プ",
944
+ "オ",
945
+ "コ",
946
+ "デ",
947
+ "ニ",
948
+ "ウ",
949
+ "メ",
950
+ "サ",
951
+ "ビ",
952
+ "ナ",
953
+ "ブ",
954
+ "ャ",
955
+ "エ",
956
+ "ュ",
957
+ "チ",
958
+ "キ",
959
+ "ズ",
960
+ "ダ",
961
+ "パ",
962
+ "ミ",
963
+ "ェ",
964
+ "ョ",
965
+ "ハ",
966
+ "セ",
967
+ "ベ",
968
+ "ガ",
969
+ "モ",
970
+ "ツ",
971
+ "ネ",
972
+ "ボ",
973
+ "ソ",
974
+ "ノ",
975
+ "ァ",
976
+ "ヴ",
977
+ "ワ",
978
+ "ポ",
979
+ "ペ",
980
+ "ピ",
981
+ "ケ",
982
+ "ゴ",
983
+ "ギ",
984
+ "ザ",
985
+ "ホ",
986
+ "ゲ",
987
+ "ォ",
988
+ "ヤ",
989
+ "ヒ",
990
+ "ユ",
991
+ "ヨ",
992
+ "ヘ",
993
+ "ゼ",
994
+ "ヌ",
995
+ "ゥ",
996
+ "ゾ",
997
+ "ヶ",
998
+ "ヂ",
999
+ "ヲ",
1000
+ "ヅ",
1001
+ "ヵ",
1002
+ "ヱ",
1003
+ "ヰ",
1004
+ "ヮ",
1005
+ "ヽ",
1006
+ "゠",
1007
+ "ヾ",
1008
+ "ヷ",
1009
+ "ヿ",
1010
+ "ヸ",
1011
+ "ヹ",
1012
+ "ヺ",
1013
+ ],
1014
+ # Jap-Hiragana
1015
+ "Japanese——": [
1016
+ "の",
1017
+ "に",
1018
+ "る",
1019
+ "た",
1020
+ "と",
1021
+ "は",
1022
+ "し",
1023
+ "い",
1024
+ "を",
1025
+ "で",
1026
+ "て",
1027
+ "が",
1028
+ "な",
1029
+ "れ",
1030
+ "か",
1031
+ "ら",
1032
+ "さ",
1033
+ "っ",
1034
+ "り",
1035
+ "す",
1036
+ "あ",
1037
+ "も",
1038
+ "こ",
1039
+ "ま",
1040
+ "う",
1041
+ "く",
1042
+ "よ",
1043
+ "き",
1044
+ "ん",
1045
+ "め",
1046
+ "お",
1047
+ "け",
1048
+ "そ",
1049
+ "つ",
1050
+ "だ",
1051
+ "や",
1052
+ "え",
1053
+ "ど",
1054
+ "わ",
1055
+ "ち",
1056
+ "み",
1057
+ "せ",
1058
+ "じ",
1059
+ "ば",
1060
+ "へ",
1061
+ "び",
1062
+ "ず",
1063
+ "ろ",
1064
+ "ほ",
1065
+ "げ",
1066
+ "む",
1067
+ "べ",
1068
+ "ひ",
1069
+ "ょ",
1070
+ "ゆ",
1071
+ "ぶ",
1072
+ "ご",
1073
+ "ゃ",
1074
+ "ね",
1075
+ "ふ",
1076
+ "ぐ",
1077
+ "ぎ",
1078
+ "ぼ",
1079
+ "ゅ",
1080
+ "づ",
1081
+ "ざ",
1082
+ "ぞ",
1083
+ "ぬ",
1084
+ "ぜ",
1085
+ "ぱ",
1086
+ "ぽ",
1087
+ "ぷ",
1088
+ "ぴ",
1089
+ "ぃ",
1090
+ "ぁ",
1091
+ "ぇ",
1092
+ "ぺ",
1093
+ "ゞ",
1094
+ "ぢ",
1095
+ "ぉ",
1096
+ "ぅ",
1097
+ "ゐ",
1098
+ "ゝ",
1099
+ "ゑ",
1100
+ "゛",
1101
+ "゜",
1102
+ "ゎ",
1103
+ "ゔ",
1104
+ "゚",
1105
+ "ゟ",
1106
+ "゙",
1107
+ "ゕ",
1108
+ "ゖ",
1109
+ ],
1110
+ "Portuguese": [
1111
+ "a",
1112
+ "e",
1113
+ "o",
1114
+ "s",
1115
+ "i",
1116
+ "r",
1117
+ "d",
1118
+ "n",
1119
+ "t",
1120
+ "m",
1121
+ "u",
1122
+ "c",
1123
+ "l",
1124
+ "p",
1125
+ "g",
1126
+ "v",
1127
+ "b",
1128
+ "f",
1129
+ "h",
1130
+ "ã",
1131
+ "q",
1132
+ "é",
1133
+ "ç",
1134
+ "á",
1135
+ "z",
1136
+ "í",
1137
+ ],
1138
+ "Swedish": [
1139
+ "e",
1140
+ "a",
1141
+ "n",
1142
+ "r",
1143
+ "t",
1144
+ "s",
1145
+ "i",
1146
+ "l",
1147
+ "d",
1148
+ "o",
1149
+ "m",
1150
+ "k",
1151
+ "g",
1152
+ "v",
1153
+ "h",
1154
+ "f",
1155
+ "u",
1156
+ "p",
1157
+ "ä",
1158
+ "c",
1159
+ "b",
1160
+ "ö",
1161
+ "å",
1162
+ "y",
1163
+ "j",
1164
+ "x",
1165
+ ],
1166
+ "Chinese": [
1167
+ "的",
1168
+ "一",
1169
+ "是",
1170
+ "不",
1171
+ "了",
1172
+ "在",
1173
+ "人",
1174
+ "有",
1175
+ "我",
1176
+ "他",
1177
+ "这",
1178
+ "个",
1179
+ "们",
1180
+ "中",
1181
+ "来",
1182
+ "上",
1183
+ "大",
1184
+ "为",
1185
+ "和",
1186
+ "国",
1187
+ "地",
1188
+ "到",
1189
+ "以",
1190
+ "说",
1191
+ "时",
1192
+ "要",
1193
+ "就",
1194
+ "出",
1195
+ "会",
1196
+ "可",
1197
+ "也",
1198
+ "你",
1199
+ "对",
1200
+ "生",
1201
+ "能",
1202
+ "而",
1203
+ "子",
1204
+ "那",
1205
+ "得",
1206
+ "于",
1207
+ "着",
1208
+ "下",
1209
+ "自",
1210
+ "之",
1211
+ "年",
1212
+ "过",
1213
+ "发",
1214
+ "后",
1215
+ "作",
1216
+ "里",
1217
+ "用",
1218
+ "道",
1219
+ "行",
1220
+ "所",
1221
+ "然",
1222
+ "家",
1223
+ "种",
1224
+ "事",
1225
+ "成",
1226
+ "方",
1227
+ "多",
1228
+ "经",
1229
+ "么",
1230
+ "去",
1231
+ "法",
1232
+ "学",
1233
+ "如",
1234
+ "都",
1235
+ "同",
1236
+ "现",
1237
+ "当",
1238
+ "没",
1239
+ "动",
1240
+ "面",
1241
+ "起",
1242
+ "看",
1243
+ "定",
1244
+ "天",
1245
+ "分",
1246
+ "还",
1247
+ "进",
1248
+ "好",
1249
+ "小",
1250
+ "部",
1251
+ "其",
1252
+ "些",
1253
+ "主",
1254
+ "样",
1255
+ "理",
1256
+ "心",
1257
+ "她",
1258
+ "本",
1259
+ "前",
1260
+ "开",
1261
+ "但",
1262
+ "因",
1263
+ "只",
1264
+ "从",
1265
+ "想",
1266
+ "实",
1267
+ ],
1268
+ "Ukrainian": [
1269
+ "о",
1270
+ "а",
1271
+ "н",
1272
+ "і",
1273
+ "и",
1274
+ "р",
1275
+ "в",
1276
+ "т",
1277
+ "е",
1278
+ "с",
1279
+ "к",
1280
+ "л",
1281
+ "у",
1282
+ "д",
1283
+ "м",
1284
+ "п",
1285
+ "з",
1286
+ "я",
1287
+ "ь",
1288
+ "б",
1289
+ "г",
1290
+ "й",
1291
+ "ч",
1292
+ "х",
1293
+ "ц",
1294
+ "ї",
1295
+ ],
1296
+ "Norwegian": [
1297
+ "e",
1298
+ "r",
1299
+ "n",
1300
+ "t",
1301
+ "a",
1302
+ "s",
1303
+ "i",
1304
+ "o",
1305
+ "l",
1306
+ "d",
1307
+ "g",
1308
+ "k",
1309
+ "m",
1310
+ "v",
1311
+ "f",
1312
+ "p",
1313
+ "u",
1314
+ "b",
1315
+ "h",
1316
+ "å",
1317
+ "y",
1318
+ "j",
1319
+ "ø",
1320
+ "c",
1321
+ "æ",
1322
+ "w",
1323
+ ],
1324
+ "Finnish": [
1325
+ "a",
1326
+ "i",
1327
+ "n",
1328
+ "t",
1329
+ "e",
1330
+ "s",
1331
+ "l",
1332
+ "o",
1333
+ "u",
1334
+ "k",
1335
+ "ä",
1336
+ "m",
1337
+ "r",
1338
+ "v",
1339
+ "j",
1340
+ "h",
1341
+ "p",
1342
+ "y",
1343
+ "d",
1344
+ "ö",
1345
+ "g",
1346
+ "c",
1347
+ "b",
1348
+ "f",
1349
+ "w",
1350
+ "z",
1351
+ ],
1352
+ "Vietnamese": [
1353
+ "n",
1354
+ "h",
1355
+ "t",
1356
+ "i",
1357
+ "c",
1358
+ "g",
1359
+ "a",
1360
+ "o",
1361
+ "u",
1362
+ "m",
1363
+ "l",
1364
+ "r",
1365
+ "à",
1366
+ "đ",
1367
+ "s",
1368
+ "e",
1369
+ "v",
1370
+ "p",
1371
+ "b",
1372
+ "y",
1373
+ "ư",
1374
+ "d",
1375
+ "á",
1376
+ "k",
1377
+ "ộ",
1378
+ "ế",
1379
+ ],
1380
+ "Czech": [
1381
+ "o",
1382
+ "e",
1383
+ "a",
1384
+ "n",
1385
+ "t",
1386
+ "s",
1387
+ "i",
1388
+ "l",
1389
+ "v",
1390
+ "r",
1391
+ "k",
1392
+ "d",
1393
+ "u",
1394
+ "m",
1395
+ "p",
1396
+ "í",
1397
+ "c",
1398
+ "h",
1399
+ "z",
1400
+ "á",
1401
+ "y",
1402
+ "j",
1403
+ "b",
1404
+ "ě",
1405
+ "é",
1406
+ "ř",
1407
+ ],
1408
+ "Hungarian": [
1409
+ "e",
1410
+ "a",
1411
+ "t",
1412
+ "l",
1413
+ "s",
1414
+ "n",
1415
+ "k",
1416
+ "r",
1417
+ "i",
1418
+ "o",
1419
+ "z",
1420
+ "á",
1421
+ "é",
1422
+ "g",
1423
+ "m",
1424
+ "b",
1425
+ "y",
1426
+ "v",
1427
+ "d",
1428
+ "h",
1429
+ "u",
1430
+ "p",
1431
+ "j",
1432
+ "ö",
1433
+ "f",
1434
+ "c",
1435
+ ],
1436
+ "Korean": [
1437
+ "이",
1438
+ "다",
1439
+ "에",
1440
+ "의",
1441
+ "는",
1442
+ "로",
1443
+ "하",
1444
+ "을",
1445
+ "가",
1446
+ "고",
1447
+ "지",
1448
+ "서",
1449
+ "한",
1450
+ "은",
1451
+ "기",
1452
+ "으",
1453
+ "년",
1454
+ "대",
1455
+ "사",
1456
+ "시",
1457
+ "를",
1458
+ "리",
1459
+ "도",
1460
+ "인",
1461
+ "스",
1462
+ "일",
1463
+ ],
1464
+ "Indonesian": [
1465
+ "a",
1466
+ "n",
1467
+ "e",
1468
+ "i",
1469
+ "r",
1470
+ "t",
1471
+ "u",
1472
+ "s",
1473
+ "d",
1474
+ "k",
1475
+ "m",
1476
+ "l",
1477
+ "g",
1478
+ "p",
1479
+ "b",
1480
+ "o",
1481
+ "h",
1482
+ "y",
1483
+ "j",
1484
+ "c",
1485
+ "w",
1486
+ "f",
1487
+ "v",
1488
+ "z",
1489
+ "x",
1490
+ "q",
1491
+ ],
1492
+ "Turkish": [
1493
+ "a",
1494
+ "e",
1495
+ "i",
1496
+ "n",
1497
+ "r",
1498
+ "l",
1499
+ "ı",
1500
+ "k",
1501
+ "d",
1502
+ "t",
1503
+ "s",
1504
+ "m",
1505
+ "y",
1506
+ "u",
1507
+ "o",
1508
+ "b",
1509
+ "ü",
1510
+ "ş",
1511
+ "v",
1512
+ "g",
1513
+ "z",
1514
+ "h",
1515
+ "c",
1516
+ "p",
1517
+ "ç",
1518
+ "ğ",
1519
+ ],
1520
+ "Romanian": [
1521
+ "e",
1522
+ "i",
1523
+ "a",
1524
+ "r",
1525
+ "n",
1526
+ "t",
1527
+ "u",
1528
+ "l",
1529
+ "o",
1530
+ "c",
1531
+ "s",
1532
+ "d",
1533
+ "p",
1534
+ "m",
1535
+ "ă",
1536
+ "f",
1537
+ "v",
1538
+ "î",
1539
+ "g",
1540
+ "b",
1541
+ "ș",
1542
+ "ț",
1543
+ "z",
1544
+ "h",
1545
+ "â",
1546
+ "j",
1547
+ ],
1548
+ "Farsi": [
1549
+ "ا",
1550
+ "ی",
1551
+ "ر",
1552
+ "د",
1553
+ "ن",
1554
+ "ه",
1555
+ "و",
1556
+ "م",
1557
+ "ت",
1558
+ "ب",
1559
+ "س",
1560
+ "ل",
1561
+ "ک",
1562
+ "ش",
1563
+ "ز",
1564
+ "ف",
1565
+ "گ",
1566
+ "ع",
1567
+ "خ",
1568
+ "ق",
1569
+ "ج",
1570
+ "آ",
1571
+ "پ",
1572
+ "ح",
1573
+ "ط",
1574
+ "ص",
1575
+ ],
1576
+ "Arabic": [
1577
+ "ا",
1578
+ "ل",
1579
+ "ي",
1580
+ "م",
1581
+ "و",
1582
+ "ن",
1583
+ "ر",
1584
+ "ت",
1585
+ "ب",
1586
+ "ة",
1587
+ "ع",
1588
+ "د",
1589
+ "س",
1590
+ "ف",
1591
+ "ه",
1592
+ "ك",
1593
+ "ق",
1594
+ "أ",
1595
+ "ح",
1596
+ "ج",
1597
+ "ش",
1598
+ "ط",
1599
+ "ص",
1600
+ "ى",
1601
+ "خ",
1602
+ "إ",
1603
+ ],
1604
+ "Danish": [
1605
+ "e",
1606
+ "r",
1607
+ "n",
1608
+ "t",
1609
+ "a",
1610
+ "i",
1611
+ "s",
1612
+ "d",
1613
+ "l",
1614
+ "o",
1615
+ "g",
1616
+ "m",
1617
+ "k",
1618
+ "f",
1619
+ "v",
1620
+ "u",
1621
+ "b",
1622
+ "h",
1623
+ "p",
1624
+ "å",
1625
+ "y",
1626
+ "ø",
1627
+ "æ",
1628
+ "c",
1629
+ "j",
1630
+ "w",
1631
+ ],
1632
+ "Serbian": [
1633
+ "а",
1634
+ "и",
1635
+ "о",
1636
+ "е",
1637
+ "н",
1638
+ "р",
1639
+ "с",
1640
+ "у",
1641
+ "т",
1642
+ "к",
1643
+ "ј",
1644
+ "в",
1645
+ "д",
1646
+ "м",
1647
+ "п",
1648
+ "л",
1649
+ "г",
1650
+ "з",
1651
+ "б",
1652
+ "a",
1653
+ "i",
1654
+ "e",
1655
+ "o",
1656
+ "n",
1657
+ "ц",
1658
+ "ш",
1659
+ ],
1660
+ "Lithuanian": [
1661
+ "i",
1662
+ "a",
1663
+ "s",
1664
+ "o",
1665
+ "r",
1666
+ "e",
1667
+ "t",
1668
+ "n",
1669
+ "u",
1670
+ "k",
1671
+ "m",
1672
+ "l",
1673
+ "p",
1674
+ "v",
1675
+ "d",
1676
+ "j",
1677
+ "g",
1678
+ "ė",
1679
+ "b",
1680
+ "y",
1681
+ "ų",
1682
+ "š",
1683
+ "ž",
1684
+ "c",
1685
+ "ą",
1686
+ "į",
1687
+ ],
1688
+ "Slovene": [
1689
+ "e",
1690
+ "a",
1691
+ "i",
1692
+ "o",
1693
+ "n",
1694
+ "r",
1695
+ "s",
1696
+ "l",
1697
+ "t",
1698
+ "j",
1699
+ "v",
1700
+ "k",
1701
+ "d",
1702
+ "p",
1703
+ "m",
1704
+ "u",
1705
+ "z",
1706
+ "b",
1707
+ "g",
1708
+ "h",
1709
+ "č",
1710
+ "c",
1711
+ "š",
1712
+ "ž",
1713
+ "f",
1714
+ "y",
1715
+ ],
1716
+ "Slovak": [
1717
+ "o",
1718
+ "a",
1719
+ "e",
1720
+ "n",
1721
+ "i",
1722
+ "r",
1723
+ "v",
1724
+ "t",
1725
+ "s",
1726
+ "l",
1727
+ "k",
1728
+ "d",
1729
+ "m",
1730
+ "p",
1731
+ "u",
1732
+ "c",
1733
+ "h",
1734
+ "j",
1735
+ "b",
1736
+ "z",
1737
+ "á",
1738
+ "y",
1739
+ "ý",
1740
+ "í",
1741
+ "č",
1742
+ "é",
1743
+ ],
1744
+ "Hebrew": [
1745
+ "י",
1746
+ "ו",
1747
+ "ה",
1748
+ "ל",
1749
+ "ר",
1750
+ "ב",
1751
+ "ת",
1752
+ "מ",
1753
+ "א",
1754
+ "ש",
1755
+ "נ",
1756
+ "ע",
1757
+ "ם",
1758
+ "ד",
1759
+ "ק",
1760
+ "ח",
1761
+ "פ",
1762
+ "ס",
1763
+ "כ",
1764
+ "ג",
1765
+ "ט",
1766
+ "צ",
1767
+ "ן",
1768
+ "ז",
1769
+ "ך",
1770
+ ],
1771
+ "Bulgarian": [
1772
+ "а",
1773
+ "и",
1774
+ "о",
1775
+ "е",
1776
+ "н",
1777
+ "т",
1778
+ "р",
1779
+ "с",
1780
+ "в",
1781
+ "л",
1782
+ "к",
1783
+ "д",
1784
+ "п",
1785
+ "м",
1786
+ "з",
1787
+ "г",
1788
+ "я",
1789
+ "ъ",
1790
+ "у",
1791
+ "б",
1792
+ "ч",
1793
+ "ц",
1794
+ "й",
1795
+ "ж",
1796
+ "щ",
1797
+ "х",
1798
+ ],
1799
+ "Croatian": [
1800
+ "a",
1801
+ "i",
1802
+ "o",
1803
+ "e",
1804
+ "n",
1805
+ "r",
1806
+ "j",
1807
+ "s",
1808
+ "t",
1809
+ "u",
1810
+ "k",
1811
+ "l",
1812
+ "v",
1813
+ "d",
1814
+ "m",
1815
+ "p",
1816
+ "g",
1817
+ "z",
1818
+ "b",
1819
+ "c",
1820
+ "č",
1821
+ "h",
1822
+ "š",
1823
+ "ž",
1824
+ "ć",
1825
+ "f",
1826
+ ],
1827
+ "Hindi": [
1828
+ "क",
1829
+ "र",
1830
+ "स",
1831
+ "न",
1832
+ "त",
1833
+ "म",
1834
+ "ह",
1835
+ "प",
1836
+ "य",
1837
+ "ल",
1838
+ "व",
1839
+ "ज",
1840
+ "द",
1841
+ "ग",
1842
+ "ब",
1843
+ "श",
1844
+ "ट",
1845
+ "अ",
1846
+ "ए",
1847
+ "थ",
1848
+ "भ",
1849
+ "ड",
1850
+ "च",
1851
+ "ध",
1852
+ "ष",
1853
+ "इ",
1854
+ ],
1855
+ "Estonian": [
1856
+ "a",
1857
+ "i",
1858
+ "e",
1859
+ "s",
1860
+ "t",
1861
+ "l",
1862
+ "u",
1863
+ "n",
1864
+ "o",
1865
+ "k",
1866
+ "r",
1867
+ "d",
1868
+ "m",
1869
+ "v",
1870
+ "g",
1871
+ "p",
1872
+ "j",
1873
+ "h",
1874
+ "ä",
1875
+ "b",
1876
+ "õ",
1877
+ "ü",
1878
+ "f",
1879
+ "c",
1880
+ "ö",
1881
+ "y",
1882
+ ],
1883
+ "Thai": [
1884
+ "า",
1885
+ "น",
1886
+ "ร",
1887
+ "อ",
1888
+ "ก",
1889
+ "เ",
1890
+ "ง",
1891
+ "ม",
1892
+ "ย",
1893
+ "ล",
1894
+ "ว",
1895
+ "ด",
1896
+ "ท",
1897
+ "ส",
1898
+ "ต",
1899
+ "ะ",
1900
+ "ป",
1901
+ "บ",
1902
+ "ค",
1903
+ "ห",
1904
+ "แ",
1905
+ "จ",
1906
+ "พ",
1907
+ "ช",
1908
+ "ข",
1909
+ "ใ",
1910
+ ],
1911
+ "Greek": [
1912
+ "α",
1913
+ "τ",
1914
+ "ο",
1915
+ "ι",
1916
+ "ε",
1917
+ "ν",
1918
+ "ρ",
1919
+ "σ",
1920
+ "κ",
1921
+ "η",
1922
+ "π",
1923
+ "ς",
1924
+ "υ",
1925
+ "μ",
1926
+ "λ",
1927
+ "ί",
1928
+ "ό",
1929
+ "ά",
1930
+ "γ",
1931
+ "έ",
1932
+ "δ",
1933
+ "ή",
1934
+ "ω",
1935
+ "χ",
1936
+ "θ",
1937
+ "ύ",
1938
+ ],
1939
+ "Tamil": [
1940
+ "க",
1941
+ "த",
1942
+ "ப",
1943
+ "ட",
1944
+ "ர",
1945
+ "ம",
1946
+ "ல",
1947
+ "ன",
1948
+ "வ",
1949
+ "ற",
1950
+ "ய",
1951
+ "ள",
1952
+ "ச",
1953
+ "ந",
1954
+ "இ",
1955
+ "ண",
1956
+ "அ",
1957
+ "ஆ",
1958
+ "ழ",
1959
+ "ங",
1960
+ "எ",
1961
+ "உ",
1962
+ "ஒ",
1963
+ "ஸ",
1964
+ ],
1965
+ "Kazakh": [
1966
+ "а",
1967
+ "ы",
1968
+ "е",
1969
+ "н",
1970
+ "т",
1971
+ "р",
1972
+ "л",
1973
+ "і",
1974
+ "д",
1975
+ "с",
1976
+ "м",
1977
+ "қ",
1978
+ "к",
1979
+ "о",
1980
+ "б",
1981
+ "и",
1982
+ "у",
1983
+ "ғ",
1984
+ "ж",
1985
+ "ң",
1986
+ "з",
1987
+ "ш",
1988
+ "й",
1989
+ "п",
1990
+ "г",
1991
+ "ө",
1992
+ ],
1993
+ }
1994
+
1995
+ LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)
Dataset_Construction/projects/charset-normalizer/python/legacy.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, Optional
4
+ from warnings import warn
5
+
6
+ from .api import from_bytes
7
+ from .constant import CHARDET_CORRESPONDENCE
8
+
9
+ # TODO: remove this check when dropping Python 3.7 support
10
+ if TYPE_CHECKING:
11
+ from typing_extensions import TypedDict
12
+
13
+ class ResultDict(TypedDict):
14
+ encoding: Optional[str]
15
+ language: str
16
+ confidence: Optional[float]
17
+
18
+
19
+ def detect(
20
+ byte_str: bytes, should_rename_legacy: bool = False, **kwargs: Any
21
+ ) -> ResultDict:
22
+ """
23
+ chardet legacy method
24
+ Detect the encoding of the given byte string. It should be mostly backward-compatible.
25
+ Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
26
+ This function is deprecated and should be used to migrate your project easily, consult the documentation for
27
+ further information. Not planned for removal.
28
+
29
+ :param byte_str: The byte sequence to examine.
30
+ :param should_rename_legacy: Should we rename legacy encodings
31
+ to their more modern equivalents?
32
+ """
33
+ if len(kwargs):
34
+ warn(
35
+ f"charset-normalizer disregard arguments '{','.join(list(kwargs.keys()))}' in legacy function detect()"
36
+ )
37
+
38
+ if not isinstance(byte_str, (bytearray, bytes)):
39
+ raise TypeError( # pragma: nocover
40
+ "Expected object of type bytes or bytearray, got: "
41
+ "{0}".format(type(byte_str))
42
+ )
43
+
44
+ if isinstance(byte_str, bytearray):
45
+ byte_str = bytes(byte_str)
46
+
47
+ r = from_bytes(byte_str).best()
48
+
49
+ encoding = r.encoding if r is not None else None
50
+ language = r.language if r is not None and r.language != "Unknown" else ""
51
+ confidence = 1.0 - r.chaos if r is not None else None
52
+
53
+ # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
54
+ # but chardet does return 'utf-8-sig' and it is a valid codec name.
55
+ if r is not None and encoding == "utf_8" and r.bom:
56
+ encoding += "_sig"
57
+
58
+ if should_rename_legacy is False and encoding in CHARDET_CORRESPONDENCE:
59
+ encoding = CHARDET_CORRESPONDENCE[encoding]
60
+
61
+ return {
62
+ "encoding": encoding,
63
+ "language": language,
64
+ "confidence": confidence,
65
+ }
Dataset_Construction/projects/charset-normalizer/python/md.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from logging import getLogger
3
+ from typing import List, Optional
4
+
5
+ from .constant import (
6
+ COMMON_SAFE_ASCII_CHARACTERS,
7
+ TRACE,
8
+ UNICODE_SECONDARY_RANGE_KEYWORD,
9
+ )
10
+ from .utils import (
11
+ is_accentuated,
12
+ is_arabic,
13
+ is_arabic_isolated_form,
14
+ is_case_variable,
15
+ is_cjk,
16
+ is_emoticon,
17
+ is_hangul,
18
+ is_hiragana,
19
+ is_katakana,
20
+ is_latin,
21
+ is_punctuation,
22
+ is_separator,
23
+ is_symbol,
24
+ is_thai,
25
+ is_unprintable,
26
+ remove_accent,
27
+ unicode_range,
28
+ )
29
+
30
+
31
+ class MessDetectorPlugin:
32
+ """
33
+ Base abstract class used for mess detection plugins.
34
+ All detectors MUST extend and implement given methods.
35
+ """
36
+
37
+ def eligible(self, character: str) -> bool:
38
+ """
39
+ Determine if given character should be fed in.
40
+ """
41
+ raise NotImplementedError # pragma: nocover
42
+
43
+ def feed(self, character: str) -> None:
44
+ """
45
+ The main routine to be executed upon character.
46
+ Insert the logic in witch the text would be considered chaotic.
47
+ """
48
+ raise NotImplementedError # pragma: nocover
49
+
50
+ def reset(self) -> None: # pragma: no cover
51
+ """
52
+ Permit to reset the plugin to the initial state.
53
+ """
54
+ raise NotImplementedError
55
+
56
+ @property
57
+ def ratio(self) -> float:
58
+ """
59
+ Compute the chaos ratio based on what your feed() has seen.
60
+ Must NOT be lower than 0.; No restriction gt 0.
61
+ """
62
+ raise NotImplementedError # pragma: nocover
63
+
64
+
65
+ class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
66
+ def __init__(self) -> None:
67
+ self._punctuation_count: int = 0
68
+ self._symbol_count: int = 0
69
+ self._character_count: int = 0
70
+
71
+ self._last_printable_char: Optional[str] = None
72
+ self._frenzy_symbol_in_word: bool = False
73
+
74
+ def eligible(self, character: str) -> bool:
75
+ return character.isprintable()
76
+
77
+ def feed(self, character: str) -> None:
78
+ self._character_count += 1
79
+
80
+ if (
81
+ character != self._last_printable_char
82
+ and character not in COMMON_SAFE_ASCII_CHARACTERS
83
+ ):
84
+ if is_punctuation(character):
85
+ self._punctuation_count += 1
86
+ elif (
87
+ character.isdigit() is False
88
+ and is_symbol(character)
89
+ and is_emoticon(character) is False
90
+ ):
91
+ self._symbol_count += 2
92
+
93
+ self._last_printable_char = character
94
+
95
+ def reset(self) -> None: # pragma: no cover
96
+ self._punctuation_count = 0
97
+ self._character_count = 0
98
+ self._symbol_count = 0
99
+
100
+ @property
101
+ def ratio(self) -> float:
102
+ if self._character_count == 0:
103
+ return 0.0
104
+
105
+ ratio_of_punctuation: float = (
106
+ self._punctuation_count + self._symbol_count
107
+ ) / self._character_count
108
+
109
+ return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
110
+
111
+
112
+ class TooManyAccentuatedPlugin(MessDetectorPlugin):
113
+ def __init__(self) -> None:
114
+ self._character_count: int = 0
115
+ self._accentuated_count: int = 0
116
+
117
+ def eligible(self, character: str) -> bool:
118
+ return character.isalpha()
119
+
120
+ def feed(self, character: str) -> None:
121
+ self._character_count += 1
122
+
123
+ if is_accentuated(character):
124
+ self._accentuated_count += 1
125
+
126
+ def reset(self) -> None: # pragma: no cover
127
+ self._character_count = 0
128
+ self._accentuated_count = 0
129
+
130
+ @property
131
+ def ratio(self) -> float:
132
+ if self._character_count < 8:
133
+ return 0.0
134
+
135
+ ratio_of_accentuation: float = self._accentuated_count / self._character_count
136
+ return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
137
+
138
+
139
+ class UnprintablePlugin(MessDetectorPlugin):
140
+ def __init__(self) -> None:
141
+ self._unprintable_count: int = 0
142
+ self._character_count: int = 0
143
+
144
+ def eligible(self, character: str) -> bool:
145
+ return True
146
+
147
+ def feed(self, character: str) -> None:
148
+ if is_unprintable(character):
149
+ self._unprintable_count += 1
150
+ self._character_count += 1
151
+
152
+ def reset(self) -> None: # pragma: no cover
153
+ self._unprintable_count = 0
154
+
155
+ @property
156
+ def ratio(self) -> float:
157
+ if self._character_count == 0:
158
+ return 0.0
159
+
160
+ return (self._unprintable_count * 8) / self._character_count
161
+
162
+
163
+ class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
164
+ def __init__(self) -> None:
165
+ self._successive_count: int = 0
166
+ self._character_count: int = 0
167
+
168
+ self._last_latin_character: Optional[str] = None
169
+
170
+ def eligible(self, character: str) -> bool:
171
+ return character.isalpha() and is_latin(character)
172
+
173
+ def feed(self, character: str) -> None:
174
+ self._character_count += 1
175
+ if (
176
+ self._last_latin_character is not None
177
+ and is_accentuated(character)
178
+ and is_accentuated(self._last_latin_character)
179
+ ):
180
+ if character.isupper() and self._last_latin_character.isupper():
181
+ self._successive_count += 1
182
+ # Worse if its the same char duplicated with different accent.
183
+ if remove_accent(character) == remove_accent(self._last_latin_character):
184
+ self._successive_count += 1
185
+ self._last_latin_character = character
186
+
187
+ def reset(self) -> None: # pragma: no cover
188
+ self._successive_count = 0
189
+ self._character_count = 0
190
+ self._last_latin_character = None
191
+
192
+ @property
193
+ def ratio(self) -> float:
194
+ if self._character_count == 0:
195
+ return 0.0
196
+
197
+ return (self._successive_count * 2) / self._character_count
198
+
199
+
200
+ class SuspiciousRange(MessDetectorPlugin):
201
+ def __init__(self) -> None:
202
+ self._suspicious_successive_range_count: int = 0
203
+ self._character_count: int = 0
204
+ self._last_printable_seen: Optional[str] = None
205
+
206
+ def eligible(self, character: str) -> bool:
207
+ return character.isprintable()
208
+
209
+ def feed(self, character: str) -> None:
210
+ self._character_count += 1
211
+
212
+ if (
213
+ character.isspace()
214
+ or is_punctuation(character)
215
+ or character in COMMON_SAFE_ASCII_CHARACTERS
216
+ ):
217
+ self._last_printable_seen = None
218
+ return
219
+
220
+ if self._last_printable_seen is None:
221
+ self._last_printable_seen = character
222
+ return
223
+
224
+ unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen)
225
+ unicode_range_b: Optional[str] = unicode_range(character)
226
+
227
+ if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
228
+ self._suspicious_successive_range_count += 1
229
+
230
+ self._last_printable_seen = character
231
+
232
+ def reset(self) -> None: # pragma: no cover
233
+ self._character_count = 0
234
+ self._suspicious_successive_range_count = 0
235
+ self._last_printable_seen = None
236
+
237
+ @property
238
+ def ratio(self) -> float:
239
+ if self._character_count <= 24:
240
+ return 0.0
241
+
242
+ ratio_of_suspicious_range_usage: float = (
243
+ self._suspicious_successive_range_count * 2
244
+ ) / self._character_count
245
+
246
+ return ratio_of_suspicious_range_usage
247
+
248
+
249
+ class SuperWeirdWordPlugin(MessDetectorPlugin):
250
+ def __init__(self) -> None:
251
+ self._word_count: int = 0
252
+ self._bad_word_count: int = 0
253
+ self._foreign_long_count: int = 0
254
+
255
+ self._is_current_word_bad: bool = False
256
+ self._foreign_long_watch: bool = False
257
+
258
+ self._character_count: int = 0
259
+ self._bad_character_count: int = 0
260
+
261
+ self._buffer: str = ""
262
+ self._buffer_accent_count: int = 0
263
+
264
+ def eligible(self, character: str) -> bool:
265
+ return True
266
+
267
+ def feed(self, character: str) -> None:
268
+ if character.isalpha():
269
+ self._buffer += character
270
+ if is_accentuated(character):
271
+ self._buffer_accent_count += 1
272
+ if (
273
+ self._foreign_long_watch is False
274
+ and (is_latin(character) is False or is_accentuated(character))
275
+ and is_cjk(character) is False
276
+ and is_hangul(character) is False
277
+ and is_katakana(character) is False
278
+ and is_hiragana(character) is False
279
+ and is_thai(character) is False
280
+ ):
281
+ self._foreign_long_watch = True
282
+ return
283
+ if not self._buffer:
284
+ return
285
+ if (
286
+ character.isspace() or is_punctuation(character) or is_separator(character)
287
+ ) and self._buffer:
288
+ self._word_count += 1
289
+ buffer_length: int = len(self._buffer)
290
+
291
+ self._character_count += buffer_length
292
+
293
+ if buffer_length >= 4:
294
+ if self._buffer_accent_count / buffer_length > 0.34:
295
+ self._is_current_word_bad = True
296
+ # Word/Buffer ending with an upper case accentuated letter are so rare,
297
+ # that we will consider them all as suspicious. Same weight as foreign_long suspicious.
298
+ if (
299
+ is_accentuated(self._buffer[-1])
300
+ and self._buffer[-1].isupper()
301
+ and all(_.isupper() for _ in self._buffer) is False
302
+ ):
303
+ self._foreign_long_count += 1
304
+ self._is_current_word_bad = True
305
+ if buffer_length >= 24 and self._foreign_long_watch:
306
+ camel_case_dst = [
307
+ i
308
+ for c, i in zip(self._buffer, range(0, buffer_length))
309
+ if c.isupper()
310
+ ]
311
+ probable_camel_cased: bool = False
312
+
313
+ if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3):
314
+ probable_camel_cased = True
315
+
316
+ if not probable_camel_cased:
317
+ self._foreign_long_count += 1
318
+ self._is_current_word_bad = True
319
+
320
+ if self._is_current_word_bad:
321
+ self._bad_word_count += 1
322
+ self._bad_character_count += len(self._buffer)
323
+ self._is_current_word_bad = False
324
+
325
+ self._foreign_long_watch = False
326
+ self._buffer = ""
327
+ self._buffer_accent_count = 0
328
+ elif (
329
+ character not in {"<", ">", "-", "=", "~", "|", "_"}
330
+ and character.isdigit() is False
331
+ and is_symbol(character)
332
+ ):
333
+ self._is_current_word_bad = True
334
+ self._buffer += character
335
+
336
+ def reset(self) -> None: # pragma: no cover
337
+ self._buffer = ""
338
+ self._is_current_word_bad = False
339
+ self._foreign_long_watch = False
340
+ self._bad_word_count = 0
341
+ self._word_count = 0
342
+ self._character_count = 0
343
+ self._bad_character_count = 0
344
+ self._foreign_long_count = 0
345
+
346
+ @property
347
+ def ratio(self) -> float:
348
+ if self._word_count <= 10 and self._foreign_long_count == 0:
349
+ return 0.0
350
+
351
+ return self._bad_character_count / self._character_count
352
+
353
+
354
+ class CjkInvalidStopPlugin(MessDetectorPlugin):
355
+ """
356
+ GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
357
+ can be easily detected. Searching for the overuse of '丅' and '丄'.
358
+ """
359
+
360
+ def __init__(self) -> None:
361
+ self._wrong_stop_count: int = 0
362
+ self._cjk_character_count: int = 0
363
+
364
+ def eligible(self, character: str) -> bool:
365
+ return True
366
+
367
+ def feed(self, character: str) -> None:
368
+ if character in {"丅", "丄"}:
369
+ self._wrong_stop_count += 1
370
+ return
371
+ if is_cjk(character):
372
+ self._cjk_character_count += 1
373
+
374
+ def reset(self) -> None: # pragma: no cover
375
+ self._wrong_stop_count = 0
376
+ self._cjk_character_count = 0
377
+
378
+ @property
379
+ def ratio(self) -> float:
380
+ if self._cjk_character_count < 16:
381
+ return 0.0
382
+ return self._wrong_stop_count / self._cjk_character_count
383
+
384
+
385
+ class ArchaicUpperLowerPlugin(MessDetectorPlugin):
386
+ def __init__(self) -> None:
387
+ self._buf: bool = False
388
+
389
+ self._character_count_since_last_sep: int = 0
390
+
391
+ self._successive_upper_lower_count: int = 0
392
+ self._successive_upper_lower_count_final: int = 0
393
+
394
+ self._character_count: int = 0
395
+
396
+ self._last_alpha_seen: Optional[str] = None
397
+ self._current_ascii_only: bool = True
398
+
399
+ def eligible(self, character: str) -> bool:
400
+ return True
401
+
402
+ def feed(self, character: str) -> None:
403
+ is_concerned = character.isalpha() and is_case_variable(character)
404
+ chunk_sep = is_concerned is False
405
+
406
+ if chunk_sep and self._character_count_since_last_sep > 0:
407
+ if (
408
+ self._character_count_since_last_sep <= 64
409
+ and character.isdigit() is False
410
+ and self._current_ascii_only is False
411
+ ):
412
+ self._successive_upper_lower_count_final += (
413
+ self._successive_upper_lower_count
414
+ )
415
+
416
+ self._successive_upper_lower_count = 0
417
+ self._character_count_since_last_sep = 0
418
+ self._last_alpha_seen = None
419
+ self._buf = False
420
+ self._character_count += 1
421
+ self._current_ascii_only = True
422
+
423
+ return
424
+
425
+ if self._current_ascii_only is True and character.isascii() is False:
426
+ self._current_ascii_only = False
427
+
428
+ if self._last_alpha_seen is not None:
429
+ if (character.isupper() and self._last_alpha_seen.islower()) or (
430
+ character.islower() and self._last_alpha_seen.isupper()
431
+ ):
432
+ if self._buf is True:
433
+ self._successive_upper_lower_count += 2
434
+ self._buf = False
435
+ else:
436
+ self._buf = True
437
+ else:
438
+ self._buf = False
439
+
440
+ self._character_count += 1
441
+ self._character_count_since_last_sep += 1
442
+ self._last_alpha_seen = character
443
+
444
+ def reset(self) -> None: # pragma: no cover
445
+ self._character_count = 0
446
+ self._character_count_since_last_sep = 0
447
+ self._successive_upper_lower_count = 0
448
+ self._successive_upper_lower_count_final = 0
449
+ self._last_alpha_seen = None
450
+ self._buf = False
451
+ self._current_ascii_only = True
452
+
453
+ @property
454
+ def ratio(self) -> float:
455
+ if self._character_count == 0:
456
+ return 0.0
457
+
458
+ return self._successive_upper_lower_count_final / self._character_count
459
+
460
+
461
+ class ArabicIsolatedFormPlugin(MessDetectorPlugin):
462
+ def __init__(self) -> None:
463
+ self._character_count: int = 0
464
+ self._isolated_form_count: int = 0
465
+
466
+ def reset(self) -> None: # pragma: no cover
467
+ self._character_count = 0
468
+ self._isolated_form_count = 0
469
+
470
+ def eligible(self, character: str) -> bool:
471
+ return is_arabic(character)
472
+
473
+ def feed(self, character: str) -> None:
474
+ self._character_count += 1
475
+
476
+ if is_arabic_isolated_form(character):
477
+ self._isolated_form_count += 1
478
+
479
+ @property
480
+ def ratio(self) -> float:
481
+ if self._character_count < 8:
482
+ return 0.0
483
+
484
+ isolated_form_usage: float = self._isolated_form_count / self._character_count
485
+
486
+ return isolated_form_usage
487
+
488
+
489
+ @lru_cache(maxsize=1024)
490
+ def is_suspiciously_successive_range(
491
+ unicode_range_a: Optional[str], unicode_range_b: Optional[str]
492
+ ) -> bool:
493
+ """
494
+ Determine if two Unicode range seen next to each other can be considered as suspicious.
495
+ """
496
+ if unicode_range_a is None or unicode_range_b is None:
497
+ return True
498
+
499
+ if unicode_range_a == unicode_range_b:
500
+ return False
501
+
502
+ if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
503
+ return False
504
+
505
+ if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
506
+ return False
507
+
508
+ # Latin characters can be accompanied with a combining diacritical mark
509
+ # eg. Vietnamese.
510
+ if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
511
+ "Combining" in unicode_range_a or "Combining" in unicode_range_b
512
+ ):
513
+ return False
514
+
515
+ keywords_range_a, keywords_range_b = unicode_range_a.split(
516
+ " "
517
+ ), unicode_range_b.split(" ")
518
+
519
+ for el in keywords_range_a:
520
+ if el in UNICODE_SECONDARY_RANGE_KEYWORD:
521
+ continue
522
+ if el in keywords_range_b:
523
+ return False
524
+
525
+ # Japanese Exception
526
+ range_a_jp_chars, range_b_jp_chars = (
527
+ unicode_range_a
528
+ in (
529
+ "Hiragana",
530
+ "Katakana",
531
+ ),
532
+ unicode_range_b in ("Hiragana", "Katakana"),
533
+ )
534
+ if (range_a_jp_chars or range_b_jp_chars) and (
535
+ "CJK" in unicode_range_a or "CJK" in unicode_range_b
536
+ ):
537
+ return False
538
+ if range_a_jp_chars and range_b_jp_chars:
539
+ return False
540
+
541
+ if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
542
+ if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
543
+ return False
544
+ if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
545
+ return False
546
+
547
+ # Chinese/Japanese use dedicated range for punctuation and/or separators.
548
+ if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
549
+ unicode_range_a in ["Katakana", "Hiragana"]
550
+ and unicode_range_b in ["Katakana", "Hiragana"]
551
+ ):
552
+ if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
553
+ return False
554
+ if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
555
+ return False
556
+ if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
557
+ return False
558
+
559
+ return True
560
+
561
+
562
+ @lru_cache(maxsize=2048)
563
+ def mess_ratio(
564
+ decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
565
+ ) -> float:
566
+ """
567
+ Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
568
+ """
569
+
570
+ detectors: List[MessDetectorPlugin] = [
571
+ md_class() for md_class in MessDetectorPlugin.__subclasses__()
572
+ ]
573
+
574
+ length: int = len(decoded_sequence) + 1
575
+
576
+ mean_mess_ratio: float = 0.0
577
+
578
+ if length < 512:
579
+ intermediary_mean_mess_ratio_calc: int = 32
580
+ elif length <= 1024:
581
+ intermediary_mean_mess_ratio_calc = 64
582
+ else:
583
+ intermediary_mean_mess_ratio_calc = 128
584
+
585
+ for character, index in zip(decoded_sequence + "\n", range(length)):
586
+ for detector in detectors:
587
+ if detector.eligible(character):
588
+ detector.feed(character)
589
+
590
+ if (
591
+ index > 0 and index % intermediary_mean_mess_ratio_calc == 0
592
+ ) or index == length - 1:
593
+ mean_mess_ratio = sum(dt.ratio for dt in detectors)
594
+
595
+ if mean_mess_ratio >= maximum_threshold:
596
+ break
597
+
598
+ if debug:
599
+ logger = getLogger("charset_normalizer")
600
+
601
+ logger.log(
602
+ TRACE,
603
+ "Mess-detector extended-analysis start. "
604
+ f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} "
605
+ f"maximum_threshold={maximum_threshold}",
606
+ )
607
+
608
+ if len(decoded_sequence) > 16:
609
+ logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}")
610
+ logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}")
611
+
612
+ for dt in detectors: # pragma: nocover
613
+ logger.log(TRACE, f"{dt.__class__}: {dt.ratio}")
614
+
615
+ return round(mean_mess_ratio, 3)
Dataset_Construction/projects/charset-normalizer/python/models.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from encodings.aliases import aliases
2
+ from hashlib import sha256
3
+ from json import dumps
4
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
5
+
6
+ from .constant import TOO_BIG_SEQUENCE
7
+ from .utils import iana_name, is_multi_byte_encoding, unicode_range
8
+
9
+
10
+ class CharsetMatch:
11
+ def __init__(
12
+ self,
13
+ payload: bytes,
14
+ guessed_encoding: str,
15
+ mean_mess_ratio: float,
16
+ has_sig_or_bom: bool,
17
+ languages: "CoherenceMatches",
18
+ decoded_payload: Optional[str] = None,
19
+ ):
20
+ self._payload: bytes = payload
21
+
22
+ self._encoding: str = guessed_encoding
23
+ self._mean_mess_ratio: float = mean_mess_ratio
24
+ self._languages: CoherenceMatches = languages
25
+ self._has_sig_or_bom: bool = has_sig_or_bom
26
+ self._unicode_ranges: Optional[List[str]] = None
27
+
28
+ self._leaves: List[CharsetMatch] = []
29
+ self._mean_coherence_ratio: float = 0.0
30
+
31
+ self._output_payload: Optional[bytes] = None
32
+ self._output_encoding: Optional[str] = None
33
+
34
+ self._string: Optional[str] = decoded_payload
35
+
36
+ def __eq__(self, other: object) -> bool:
37
+ if not isinstance(other, CharsetMatch):
38
+ if isinstance(other, str):
39
+ return iana_name(other) == self.encoding
40
+ return False
41
+ return self.encoding == other.encoding and self.fingerprint == other.fingerprint
42
+
43
+ def __lt__(self, other: object) -> bool:
44
+ """
45
+ Implemented to make sorted available upon CharsetMatches items.
46
+ """
47
+ if not isinstance(other, CharsetMatch):
48
+ raise ValueError
49
+
50
+ chaos_difference: float = abs(self.chaos - other.chaos)
51
+ coherence_difference: float = abs(self.coherence - other.coherence)
52
+
53
+ # Below 1% difference --> Use Coherence
54
+ if chaos_difference < 0.01 and coherence_difference > 0.02:
55
+ return self.coherence > other.coherence
56
+ elif chaos_difference < 0.01 and coherence_difference <= 0.02:
57
+ # When having a difficult decision, use the result that decoded as many multi-byte as possible.
58
+ # preserve RAM usage!
59
+ if len(self._payload) >= TOO_BIG_SEQUENCE:
60
+ return self.chaos < other.chaos
61
+ return self.multi_byte_usage > other.multi_byte_usage
62
+
63
+ return self.chaos < other.chaos
64
+
65
+ @property
66
+ def multi_byte_usage(self) -> float:
67
+ return 1.0 - (len(str(self)) / len(self.raw))
68
+
69
+ def __str__(self) -> str:
70
+ # Lazy Str Loading
71
+ if self._string is None:
72
+ self._string = str(self._payload, self._encoding, "strict")
73
+ return self._string
74
+
75
+ def __repr__(self) -> str:
76
+ return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
77
+
78
+ def add_submatch(self, other: "CharsetMatch") -> None:
79
+ if not isinstance(other, CharsetMatch) or other == self:
80
+ raise ValueError(
81
+ "Unable to add instance <{}> as a submatch of a CharsetMatch".format(
82
+ other.__class__
83
+ )
84
+ )
85
+
86
+ other._string = None # Unload RAM usage; dirty trick.
87
+ self._leaves.append(other)
88
+
89
+ @property
90
+ def encoding(self) -> str:
91
+ return self._encoding
92
+
93
+ @property
94
+ def encoding_aliases(self) -> List[str]:
95
+ """
96
+ Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
97
+ """
98
+ also_known_as: List[str] = []
99
+ for u, p in aliases.items():
100
+ if self.encoding == u:
101
+ also_known_as.append(p)
102
+ elif self.encoding == p:
103
+ also_known_as.append(u)
104
+ return also_known_as
105
+
106
+ @property
107
+ def bom(self) -> bool:
108
+ return self._has_sig_or_bom
109
+
110
+ @property
111
+ def byte_order_mark(self) -> bool:
112
+ return self._has_sig_or_bom
113
+
114
+ @property
115
+ def languages(self) -> List[str]:
116
+ """
117
+ Return the complete list of possible languages found in decoded sequence.
118
+ Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
119
+ """
120
+ return [e[0] for e in self._languages]
121
+
122
+ @property
123
+ def language(self) -> str:
124
+ """
125
+ Most probable language found in decoded sequence. If none were detected or inferred, the property will return
126
+ "Unknown".
127
+ """
128
+ if not self._languages:
129
+ # Trying to infer the language based on the given encoding
130
+ # Its either English or we should not pronounce ourselves in certain cases.
131
+ if "ascii" in self.could_be_from_charset:
132
+ return "English"
133
+
134
+ # doing it there to avoid circular import
135
+ from charset_normalizer.cd import encoding_languages, mb_encoding_languages
136
+
137
+ languages = (
138
+ mb_encoding_languages(self.encoding)
139
+ if is_multi_byte_encoding(self.encoding)
140
+ else encoding_languages(self.encoding)
141
+ )
142
+
143
+ if len(languages) == 0 or "Latin Based" in languages:
144
+ return "Unknown"
145
+
146
+ return languages[0]
147
+
148
+ return self._languages[0][0]
149
+
150
+ @property
151
+ def chaos(self) -> float:
152
+ return self._mean_mess_ratio
153
+
154
+ @property
155
+ def coherence(self) -> float:
156
+ if not self._languages:
157
+ return 0.0
158
+ return self._languages[0][1]
159
+
160
+ @property
161
+ def percent_chaos(self) -> float:
162
+ return round(self.chaos * 100, ndigits=3)
163
+
164
+ @property
165
+ def percent_coherence(self) -> float:
166
+ return round(self.coherence * 100, ndigits=3)
167
+
168
+ @property
169
+ def raw(self) -> bytes:
170
+ """
171
+ Original untouched bytes.
172
+ """
173
+ return self._payload
174
+
175
+ @property
176
+ def submatch(self) -> List["CharsetMatch"]:
177
+ return self._leaves
178
+
179
+ @property
180
+ def has_submatch(self) -> bool:
181
+ return len(self._leaves) > 0
182
+
183
+ @property
184
+ def alphabets(self) -> List[str]:
185
+ if self._unicode_ranges is not None:
186
+ return self._unicode_ranges
187
+ # list detected ranges
188
+ detected_ranges: List[Optional[str]] = [
189
+ unicode_range(char) for char in str(self)
190
+ ]
191
+ # filter and sort
192
+ self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
193
+ return self._unicode_ranges
194
+
195
+ @property
196
+ def could_be_from_charset(self) -> List[str]:
197
+ """
198
+ The complete list of encoding that output the exact SAME str result and therefore could be the originating
199
+ encoding.
200
+ This list does include the encoding available in property 'encoding'.
201
+ """
202
+ return [self._encoding] + [m.encoding for m in self._leaves]
203
+
204
+ def output(self, encoding: str = "utf_8") -> bytes:
205
+ """
206
+ Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
207
+ Any errors will be simply ignored by the encoder NOT replaced.
208
+ """
209
+ if self._output_encoding is None or self._output_encoding != encoding:
210
+ self._output_encoding = encoding
211
+ self._output_payload = str(self).encode(encoding, "replace")
212
+
213
+ return self._output_payload # type: ignore
214
+
215
+ @property
216
+ def fingerprint(self) -> str:
217
+ """
218
+ Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
219
+ """
220
+ return sha256(self.output()).hexdigest()
221
+
222
+
223
+ class CharsetMatches:
224
+ """
225
+ Container with every CharsetMatch items ordered by default from most probable to the less one.
226
+ Act like a list(iterable) but does not implements all related methods.
227
+ """
228
+
229
+ def __init__(self, results: Optional[List[CharsetMatch]] = None):
230
+ self._results: List[CharsetMatch] = sorted(results) if results else []
231
+
232
+ def __iter__(self) -> Iterator[CharsetMatch]:
233
+ yield from self._results
234
+
235
+ def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
236
+ """
237
+ Retrieve a single item either by its position or encoding name (alias may be used here).
238
+ Raise KeyError upon invalid index or encoding not present in results.
239
+ """
240
+ if isinstance(item, int):
241
+ return self._results[item]
242
+ if isinstance(item, str):
243
+ item = iana_name(item, False)
244
+ for result in self._results:
245
+ if item in result.could_be_from_charset:
246
+ return result
247
+ raise KeyError
248
+
249
+ def __len__(self) -> int:
250
+ return len(self._results)
251
+
252
+ def __bool__(self) -> bool:
253
+ return len(self._results) > 0
254
+
255
+ def append(self, item: CharsetMatch) -> None:
256
+ """
257
+ Insert a single match. Will be inserted accordingly to preserve sort.
258
+ Can be inserted as a submatch.
259
+ """
260
+ if not isinstance(item, CharsetMatch):
261
+ raise ValueError(
262
+ "Cannot append instance '{}' to CharsetMatches".format(
263
+ str(item.__class__)
264
+ )
265
+ )
266
+ # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
267
+ if len(item.raw) <= TOO_BIG_SEQUENCE:
268
+ for match in self._results:
269
+ if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
270
+ match.add_submatch(item)
271
+ return
272
+ self._results.append(item)
273
+ self._results = sorted(self._results)
274
+
275
+ def best(self) -> Optional["CharsetMatch"]:
276
+ """
277
+ Simply return the first match. Strict equivalent to matches[0].
278
+ """
279
+ if not self._results:
280
+ return None
281
+ return self._results[0]
282
+
283
+ def first(self) -> Optional["CharsetMatch"]:
284
+ """
285
+ Redundant method, call the method best(). Kept for BC reasons.
286
+ """
287
+ return self.best()
288
+
289
+
290
+ CoherenceMatch = Tuple[str, float]
291
+ CoherenceMatches = List[CoherenceMatch]
292
+
293
+
294
+ class CliDetectionResult:
295
+ def __init__(
296
+ self,
297
+ path: str,
298
+ encoding: Optional[str],
299
+ encoding_aliases: List[str],
300
+ alternative_encodings: List[str],
301
+ language: str,
302
+ alphabets: List[str],
303
+ has_sig_or_bom: bool,
304
+ chaos: float,
305
+ coherence: float,
306
+ unicode_path: Optional[str],
307
+ is_preferred: bool,
308
+ ):
309
+ self.path: str = path
310
+ self.unicode_path: Optional[str] = unicode_path
311
+ self.encoding: Optional[str] = encoding
312
+ self.encoding_aliases: List[str] = encoding_aliases
313
+ self.alternative_encodings: List[str] = alternative_encodings
314
+ self.language: str = language
315
+ self.alphabets: List[str] = alphabets
316
+ self.has_sig_or_bom: bool = has_sig_or_bom
317
+ self.chaos: float = chaos
318
+ self.coherence: float = coherence
319
+ self.is_preferred: bool = is_preferred
320
+
321
+ @property
322
+ def __dict__(self) -> Dict[str, Any]: # type: ignore
323
+ return {
324
+ "path": self.path,
325
+ "encoding": self.encoding,
326
+ "encoding_aliases": self.encoding_aliases,
327
+ "alternative_encodings": self.alternative_encodings,
328
+ "language": self.language,
329
+ "alphabets": self.alphabets,
330
+ "has_sig_or_bom": self.has_sig_or_bom,
331
+ "chaos": self.chaos,
332
+ "coherence": self.coherence,
333
+ "unicode_path": self.unicode_path,
334
+ "is_preferred": self.is_preferred,
335
+ }
336
+
337
+ def to_json(self) -> str:
338
+ return dumps(self.__dict__, ensure_ascii=True, indent=4)
Dataset_Construction/projects/charset-normalizer/python/py.typed ADDED
File without changes
Dataset_Construction/projects/charset-normalizer/python/utils.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import logging
3
+ import unicodedata
4
+ from codecs import IncrementalDecoder
5
+ from encodings.aliases import aliases
6
+ from functools import lru_cache
7
+ from re import findall
8
+ from typing import Generator, List, Optional, Set, Tuple, Union
9
+
10
+ from _multibytecodec import MultibyteIncrementalDecoder
11
+
12
+ from .constant import (
13
+ ENCODING_MARKS,
14
+ IANA_SUPPORTED_SIMILAR,
15
+ RE_POSSIBLE_ENCODING_INDICATION,
16
+ UNICODE_RANGES_COMBINED,
17
+ UNICODE_SECONDARY_RANGE_KEYWORD,
18
+ UTF8_MAXIMAL_ALLOCATION,
19
+ )
20
+
21
+
22
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
23
+ def is_accentuated(character: str) -> bool:
24
+ try:
25
+ description: str = unicodedata.name(character)
26
+ except ValueError:
27
+ return False
28
+ return (
29
+ "WITH GRAVE" in description
30
+ or "WITH ACUTE" in description
31
+ or "WITH CEDILLA" in description
32
+ or "WITH DIAERESIS" in description
33
+ or "WITH CIRCUMFLEX" in description
34
+ or "WITH TILDE" in description
35
+ or "WITH MACRON" in description
36
+ or "WITH RING ABOVE" in description
37
+ )
38
+
39
+
40
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
41
+ def remove_accent(character: str) -> str:
42
+ decomposed: str = unicodedata.decomposition(character)
43
+ if not decomposed:
44
+ return character
45
+
46
+ codes: List[str] = decomposed.split(" ")
47
+
48
+ return chr(int(codes[0], 16))
49
+
50
+
51
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
52
+ def unicode_range(character: str) -> Optional[str]:
53
+ """
54
+ Retrieve the Unicode range official name from a single character.
55
+ """
56
+ character_ord: int = ord(character)
57
+
58
+ for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
59
+ if character_ord in ord_range:
60
+ return range_name
61
+
62
+ return None
63
+
64
+
65
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
66
+ def is_latin(character: str) -> bool:
67
+ try:
68
+ description: str = unicodedata.name(character)
69
+ except ValueError:
70
+ return False
71
+ return "LATIN" in description
72
+
73
+
74
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
75
+ def is_punctuation(character: str) -> bool:
76
+ character_category: str = unicodedata.category(character)
77
+
78
+ if "P" in character_category:
79
+ return True
80
+
81
+ character_range: Optional[str] = unicode_range(character)
82
+
83
+ if character_range is None:
84
+ return False
85
+
86
+ return "Punctuation" in character_range
87
+
88
+
89
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
90
+ def is_symbol(character: str) -> bool:
91
+ character_category: str = unicodedata.category(character)
92
+
93
+ if "S" in character_category or "N" in character_category:
94
+ return True
95
+
96
+ character_range: Optional[str] = unicode_range(character)
97
+
98
+ if character_range is None:
99
+ return False
100
+
101
+ return "Forms" in character_range and character_category != "Lo"
102
+
103
+
104
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
105
+ def is_emoticon(character: str) -> bool:
106
+ character_range: Optional[str] = unicode_range(character)
107
+
108
+ if character_range is None:
109
+ return False
110
+
111
+ return "Emoticons" in character_range or "Pictographs" in character_range
112
+
113
+
114
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
115
+ def is_separator(character: str) -> bool:
116
+ if character.isspace() or character in {"|", "+", "<", ">"}:
117
+ return True
118
+
119
+ character_category: str = unicodedata.category(character)
120
+
121
+ return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
122
+
123
+
124
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
125
+ def is_case_variable(character: str) -> bool:
126
+ return character.islower() != character.isupper()
127
+
128
+
129
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
130
+ def is_cjk(character: str) -> bool:
131
+ try:
132
+ character_name = unicodedata.name(character)
133
+ except ValueError:
134
+ return False
135
+
136
+ return "CJK" in character_name
137
+
138
+
139
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
140
+ def is_hiragana(character: str) -> bool:
141
+ try:
142
+ character_name = unicodedata.name(character)
143
+ except ValueError:
144
+ return False
145
+
146
+ return "HIRAGANA" in character_name
147
+
148
+
149
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
150
+ def is_katakana(character: str) -> bool:
151
+ try:
152
+ character_name = unicodedata.name(character)
153
+ except ValueError:
154
+ return False
155
+
156
+ return "KATAKANA" in character_name
157
+
158
+
159
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
160
+ def is_hangul(character: str) -> bool:
161
+ try:
162
+ character_name = unicodedata.name(character)
163
+ except ValueError:
164
+ return False
165
+
166
+ return "HANGUL" in character_name
167
+
168
+
169
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
170
+ def is_thai(character: str) -> bool:
171
+ try:
172
+ character_name = unicodedata.name(character)
173
+ except ValueError:
174
+ return False
175
+
176
+ return "THAI" in character_name
177
+
178
+
179
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
180
+ def is_arabic(character: str) -> bool:
181
+ try:
182
+ character_name = unicodedata.name(character)
183
+ except ValueError:
184
+ return False
185
+
186
+ return "ARABIC" in character_name
187
+
188
+
189
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
190
+ def is_arabic_isolated_form(character: str) -> bool:
191
+ try:
192
+ character_name = unicodedata.name(character)
193
+ except ValueError:
194
+ return False
195
+
196
+ return "ARABIC" in character_name and "ISOLATED FORM" in character_name
197
+
198
+
199
+ @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
200
+ def is_unicode_range_secondary(range_name: str) -> bool:
201
+ return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
202
+
203
+
204
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
205
+ def is_unprintable(character: str) -> bool:
206
+ return (
207
+ character.isspace() is False # includes \n \t \r \v
208
+ and character.isprintable() is False
209
+ and character != "\x1A" # Why? Its the ASCII substitute character.
210
+ and character != "\ufeff" # bug discovered in Python,
211
+ # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
212
+ )
213
+
214
+
215
+ def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> Optional[str]:
216
+ """
217
+ Extract using ASCII-only decoder any specified encoding in the first n-bytes.
218
+ """
219
+ if not isinstance(sequence, bytes):
220
+ raise TypeError
221
+
222
+ seq_len: int = len(sequence)
223
+
224
+ results: List[str] = findall(
225
+ RE_POSSIBLE_ENCODING_INDICATION,
226
+ sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
227
+ )
228
+
229
+ if len(results) == 0:
230
+ return None
231
+
232
+ for specified_encoding in results:
233
+ specified_encoding = specified_encoding.lower().replace("-", "_")
234
+
235
+ encoding_alias: str
236
+ encoding_iana: str
237
+
238
+ for encoding_alias, encoding_iana in aliases.items():
239
+ if encoding_alias == specified_encoding:
240
+ return encoding_iana
241
+ if encoding_iana == specified_encoding:
242
+ return encoding_iana
243
+
244
+ return None
245
+
246
+
247
+ @lru_cache(maxsize=128)
248
+ def is_multi_byte_encoding(name: str) -> bool:
249
+ """
250
+ Verify is a specific encoding is a multi byte one based on it IANA name
251
+ """
252
+ return name in {
253
+ "utf_8",
254
+ "utf_8_sig",
255
+ "utf_16",
256
+ "utf_16_be",
257
+ "utf_16_le",
258
+ "utf_32",
259
+ "utf_32_le",
260
+ "utf_32_be",
261
+ "utf_7",
262
+ } or issubclass(
263
+ importlib.import_module("encodings.{}".format(name)).IncrementalDecoder,
264
+ MultibyteIncrementalDecoder,
265
+ )
266
+
267
+
268
+ def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
269
+ """
270
+ Identify and extract SIG/BOM in given sequence.
271
+ """
272
+
273
+ for iana_encoding in ENCODING_MARKS:
274
+ marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding]
275
+
276
+ if isinstance(marks, bytes):
277
+ marks = [marks]
278
+
279
+ for mark in marks:
280
+ if sequence.startswith(mark):
281
+ return iana_encoding, mark
282
+
283
+ return None, b""
284
+
285
+
286
+ def should_strip_sig_or_bom(iana_encoding: str) -> bool:
287
+ return iana_encoding not in {"utf_16", "utf_32"}
288
+
289
+
290
+ def iana_name(cp_name: str, strict: bool = True) -> str:
291
+ cp_name = cp_name.lower().replace("-", "_")
292
+
293
+ encoding_alias: str
294
+ encoding_iana: str
295
+
296
+ for encoding_alias, encoding_iana in aliases.items():
297
+ if cp_name in [encoding_alias, encoding_iana]:
298
+ return encoding_iana
299
+
300
+ if strict:
301
+ raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
302
+
303
+ return cp_name
304
+
305
+
306
+ def range_scan(decoded_sequence: str) -> List[str]:
307
+ ranges: Set[str] = set()
308
+
309
+ for character in decoded_sequence:
310
+ character_range: Optional[str] = unicode_range(character)
311
+
312
+ if character_range is None:
313
+ continue
314
+
315
+ ranges.add(character_range)
316
+
317
+ return list(ranges)
318
+
319
+
320
+ def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
321
+ if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
322
+ return 0.0
323
+
324
+ decoder_a = importlib.import_module(
325
+ "encodings.{}".format(iana_name_a)
326
+ ).IncrementalDecoder
327
+ decoder_b = importlib.import_module(
328
+ "encodings.{}".format(iana_name_b)
329
+ ).IncrementalDecoder
330
+
331
+ id_a: IncrementalDecoder = decoder_a(errors="ignore")
332
+ id_b: IncrementalDecoder = decoder_b(errors="ignore")
333
+
334
+ character_match_count: int = 0
335
+
336
+ for i in range(255):
337
+ to_be_decoded: bytes = bytes([i])
338
+ if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
339
+ character_match_count += 1
340
+
341
+ return character_match_count / 254
342
+
343
+
344
+ def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
345
+ """
346
+ Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
347
+ the function cp_similarity.
348
+ """
349
+ return (
350
+ iana_name_a in IANA_SUPPORTED_SIMILAR
351
+ and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
352
+ )
353
+
354
+
355
+ def set_logging_handler(
356
+ name: str = "charset_normalizer",
357
+ level: int = logging.INFO,
358
+ format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
359
+ ) -> None:
360
+ logger = logging.getLogger(name)
361
+ logger.setLevel(level)
362
+
363
+ handler = logging.StreamHandler()
364
+ handler.setFormatter(logging.Formatter(format_string))
365
+ logger.addHandler(handler)
366
+
367
+
368
+ def cut_sequence_chunks(
369
+ sequences: bytes,
370
+ encoding_iana: str,
371
+ offsets: range,
372
+ chunk_size: int,
373
+ bom_or_sig_available: bool,
374
+ strip_sig_or_bom: bool,
375
+ sig_payload: bytes,
376
+ is_multi_byte_decoder: bool,
377
+ decoded_payload: Optional[str] = None,
378
+ ) -> Generator[str, None, None]:
379
+ if decoded_payload and is_multi_byte_decoder is False:
380
+ for i in offsets:
381
+ chunk = decoded_payload[i : i + chunk_size]
382
+ if not chunk:
383
+ break
384
+ yield chunk
385
+ else:
386
+ for i in offsets:
387
+ chunk_end = i + chunk_size
388
+ if chunk_end > len(sequences) + 8:
389
+ continue
390
+
391
+ cut_sequence = sequences[i : i + chunk_size]
392
+
393
+ if bom_or_sig_available and strip_sig_or_bom is False:
394
+ cut_sequence = sig_payload + cut_sequence
395
+
396
+ chunk = cut_sequence.decode(
397
+ encoding_iana,
398
+ errors="ignore" if is_multi_byte_decoder else "strict",
399
+ )
400
+
401
+ # multi-byte bad cutting detector and adjustment
402
+ # not the cleanest way to perform that fix but clever enough for now.
403
+ if is_multi_byte_decoder and i > 0:
404
+ chunk_partial_size_chk: int = min(chunk_size, 16)
405
+
406
+ if (
407
+ decoded_payload
408
+ and chunk[:chunk_partial_size_chk] not in decoded_payload
409
+ ):
410
+ for j in range(i, i - 4, -1):
411
+ cut_sequence = sequences[j:chunk_end]
412
+
413
+ if bom_or_sig_available and strip_sig_or_bom is False:
414
+ cut_sequence = sig_payload + cut_sequence
415
+
416
+ chunk = cut_sequence.decode(encoding_iana, errors="ignore")
417
+
418
+ if chunk[:chunk_partial_size_chk] in decoded_payload:
419
+ break
420
+
421
+ yield chunk
Dataset_Construction/projects/charset-normalizer/python/version.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """
2
+ Expose version
3
+ """
4
+
5
+ __version__ = "3.3.2"
6
+ VERSION = __version__.split(".")
Dataset_Construction/projects/charset-normalizer/rust/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ In the interest of fostering an open and welcoming environment, we as
6
+ contributors and maintainers pledge to making participation in our project and
7
+ our community a harassment-free experience for everyone, regardless of age, body
8
+ size, disability, ethnicity, sex characteristics, gender identity and expression,
9
+ level of experience, education, socio-economic status, nationality, personal
10
+ appearance, race, religion, or sexual identity and orientation.
11
+
12
+ ## Our Standards
13
+
14
+ Examples of behavior that contributes to creating a positive environment
15
+ include:
16
+
17
+ * Using welcoming and inclusive language
18
+ * Being respectful of differing viewpoints and experiences
19
+ * Gracefully accepting constructive criticism
20
+ * Focusing on what is best for the community
21
+ * Showing empathy towards other community members
22
+
23
+ Examples of unacceptable behavior by participants include:
24
+
25
+ * The use of sexualized language or imagery and unwelcome sexual attention or
26
+ advances
27
+ * Trolling, insulting/derogatory comments, and personal or political attacks
28
+ * Public or private harassment
29
+ * Publishing others' private information, such as a physical or electronic
30
+ address, without explicit permission
31
+ * Other conduct which could reasonably be considered inappropriate in a
32
+ professional setting
33
+
34
+ ## Our Responsibilities
35
+
36
+ Project maintainers are responsible for clarifying the standards of acceptable
37
+ behavior and are expected to take appropriate and fair corrective action in
38
+ response to any instances of unacceptable behavior.
39
+
40
+ Project maintainers have the right and responsibility to remove, edit, or
41
+ reject comments, commits, code, wiki edits, issues, and other contributions
42
+ that are not aligned to this Code of Conduct, or to ban temporarily or
43
+ permanently any contributor for other behaviors that they deem inappropriate,
44
+ threatening, offensive, or harmful.
45
+
46
+ ## Scope
47
+
48
+ This Code of Conduct applies both within project spaces and in public spaces
49
+ when an individual is representing the project or its community. Examples of
50
+ representing a project or community include using an official project e-mail
51
+ address, posting via an official social media account, or acting as an appointed
52
+ representative at an online or offline event. Representation of a project may be
53
+ further defined and clarified by project maintainers.
54
+
55
+ ## Enforcement
56
+
57
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
58
+ reported by contacting the project team at ahmed.tahri@cloudnursery.dev. All
59
+ complaints will be reviewed and investigated and will result in a response that
60
+ is deemed necessary and appropriate to the circumstances. The project team is
61
+ obligated to maintain confidentiality with regard to the reporter of an incident.
62
+ Further details of specific enforcement policies may be posted separately.
63
+
64
+ Project maintainers who do not follow or enforce the Code of Conduct in good
65
+ faith may face temporary or permanent repercussions as determined by other
66
+ members of the project's leadership.
67
+
68
+ ## Attribution
69
+
70
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71
+ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72
+
73
+ [homepage]: https://www.contributor-covenant.org
74
+
75
+ For answers to common questions about this code of conduct, see
76
+ https://www.contributor-covenant.org/faq
Dataset_Construction/projects/charset-normalizer/rust/CONTRIBUTING.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contribution Guidelines
2
+
3
+ If you’re reading this, you’re probably interested in contributing to Charset Normalizer.
4
+ Thank you very much! Open source projects live-and-die based on the support they receive from others,
5
+ and the fact that you’re even considering contributing to this project is very generous of you.
6
+
7
+ ## Questions
8
+
9
+ The GitHub issue tracker is for *bug reports* and *feature requests*.
10
+ Questions are allowed only when no answer are provided in docs.
11
+
12
+ ## Good Bug Reports
13
+
14
+ Please be aware of the following things when filing bug reports:
15
+
16
+ 1. Avoid raising duplicate issues. *Please* use the GitHub issue search feature
17
+ to check whether your bug report or feature request has been mentioned in
18
+ the past. Duplicate bug reports and feature requests are a huge maintenance
19
+ burden on the limited resources of the project. If it is clear from your
20
+ report that you would have struggled to find the original, that's ok, but
21
+ if searching for a selection of words in your issue title would have found
22
+ the duplicate then the issue will likely be closed extremely abruptly.
23
+ 2. When filing bug reports about exceptions or tracebacks, please include the
24
+ *complete* traceback. Partial tracebacks, or just the exception text, are
25
+ not helpful. Issues that do not contain complete tracebacks may be closed
26
+ without warning.
27
+ 3. Make sure you provide a suitable amount of information to work with. This
28
+ means you should provide:
29
+
30
+ - Guidance on **how to reproduce the issue**. Ideally, this should be a
31
+ *small* code sample that can be run immediately by the maintainers.
32
+ Failing that, let us know what you're doing, how often it happens, what
33
+ environment you're using, etc. Be thorough: it prevents us needing to ask
34
+ further questions.
35
+ - Tell us **what you expected to happen**. When we run your example code,
36
+ what are we expecting to happen? What does "success" look like for your
37
+ code?
38
+ - Tell us **what actually happens**. It's not helpful for you to say "it
39
+ doesn't work" or "it fails". Tell us *how* it fails: do you get an
40
+ exception? A None answer? How was the actual result
41
+ different from your expected result?
42
+ - Tell us **what version of Charset Normalizer you're using**, and
43
+ **how you installed it**. Different versions of Charset Normalizer behave
44
+ differently and have different bugs.
45
+
46
+ If you do not provide all of these things, it will take us much longer to
47
+ fix your problem. If we ask you to clarify these, and you never respond, we
48
+ will close your issue without fixing it.
49
+
50
+
51
+ ## What PR are we accepting?
52
+
53
+ Mostly anything, from cosmetic to the detection-mechanism improvement at the solo condition that you do not break
54
+ the backward-compatibility.
55
+
56
+ ## What PR may be doomed?
57
+
58
+ - Add support for a Rust encoding unsupported charset/encoding
59
+ > If you looked carefully at the project, you would see that it aims to be generic whenever possible. So adding a specific prober is out of the question.
60
+
61
+ - Of course, if the CI/CD are failing
62
+ > Getting the discussion started often mean doing the minimum effort to get it Green! (Be reassured, maintainers will look into it, given a reasonable amount of time)
63
+
64
+ - Submitting a PR without any description OR viable commit description
65
+ > This is obvious, maintainers need to understand as fast as possible what are you trying to submit without putting too much effort.
66
+
67
+ ## How to run tests locally?
68
+
69
+ It is essential that you run, prior to any submissions the mandatory checks.
70
+ Run:
71
+ * `cargo fmt` to check and auto-fix formatting,
72
+ * `cargo clippy` to linter your code,
73
+ * `cargo test` to run all tests.
Dataset_Construction/projects/charset-normalizer/rust/Cargo.lock ADDED
@@ -0,0 +1,1795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is automatically @generated by Cargo.
2
+ # It is not intended for manual editing.
3
+ version = 3
4
+
5
+ [[package]]
6
+ name = "ahash"
7
+ version = "0.8.3"
8
+ source = "registry+https://github.com/rust-lang/crates.io-index"
9
+ checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f"
10
+ dependencies = [
11
+ "cfg-if",
12
+ "getrandom",
13
+ "once_cell",
14
+ "version_check",
15
+ ]
16
+
17
+ [[package]]
18
+ name = "aho-corasick"
19
+ version = "1.0.5"
20
+ source = "registry+https://github.com/rust-lang/crates.io-index"
21
+ checksum = "0c378d78423fdad8089616f827526ee33c19f2fddbd5de1629152c9593ba4783"
22
+ dependencies = [
23
+ "memchr",
24
+ ]
25
+
26
+ [[package]]
27
+ name = "allocator-api2"
28
+ version = "0.2.16"
29
+ source = "registry+https://github.com/rust-lang/crates.io-index"
30
+ checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
31
+
32
+ [[package]]
33
+ name = "anstream"
34
+ version = "0.5.0"
35
+ source = "registry+https://github.com/rust-lang/crates.io-index"
36
+ checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c"
37
+ dependencies = [
38
+ "anstyle",
39
+ "anstyle-parse",
40
+ "anstyle-query",
41
+ "anstyle-wincon",
42
+ "colorchoice",
43
+ "utf8parse",
44
+ ]
45
+
46
+ [[package]]
47
+ name = "anstyle"
48
+ version = "1.0.2"
49
+ source = "registry+https://github.com/rust-lang/crates.io-index"
50
+ checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea"
51
+
52
+ [[package]]
53
+ name = "anstyle-parse"
54
+ version = "0.2.1"
55
+ source = "registry+https://github.com/rust-lang/crates.io-index"
56
+ checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333"
57
+ dependencies = [
58
+ "utf8parse",
59
+ ]
60
+
61
+ [[package]]
62
+ name = "anstyle-query"
63
+ version = "1.0.0"
64
+ source = "registry+https://github.com/rust-lang/crates.io-index"
65
+ checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
66
+ dependencies = [
67
+ "windows-sys 0.48.0",
68
+ ]
69
+
70
+ [[package]]
71
+ name = "anstyle-wincon"
72
+ version = "2.1.0"
73
+ source = "registry+https://github.com/rust-lang/crates.io-index"
74
+ checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd"
75
+ dependencies = [
76
+ "anstyle",
77
+ "windows-sys 0.48.0",
78
+ ]
79
+
80
+ [[package]]
81
+ name = "assert_cmd"
82
+ version = "2.0.12"
83
+ source = "registry+https://github.com/rust-lang/crates.io-index"
84
+ checksum = "88903cb14723e4d4003335bb7f8a14f27691649105346a0f0957466c096adfe6"
85
+ dependencies = [
86
+ "anstyle",
87
+ "bstr",
88
+ "doc-comment",
89
+ "predicates",
90
+ "predicates-core",
91
+ "predicates-tree",
92
+ "wait-timeout",
93
+ ]
94
+
95
+ [[package]]
96
+ name = "atty"
97
+ version = "0.2.14"
98
+ source = "registry+https://github.com/rust-lang/crates.io-index"
99
+ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
100
+ dependencies = [
101
+ "hermit-abi 0.1.19",
102
+ "libc",
103
+ "winapi",
104
+ ]
105
+
106
+ [[package]]
107
+ name = "autocfg"
108
+ version = "1.1.0"
109
+ source = "registry+https://github.com/rust-lang/crates.io-index"
110
+ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
111
+
112
+ [[package]]
113
+ name = "bitflags"
114
+ version = "1.3.2"
115
+ source = "registry+https://github.com/rust-lang/crates.io-index"
116
+ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
117
+
118
+ [[package]]
119
+ name = "bitflags"
120
+ version = "2.4.0"
121
+ source = "registry+https://github.com/rust-lang/crates.io-index"
122
+ checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
123
+
124
+ [[package]]
125
+ name = "bstr"
126
+ version = "1.6.2"
127
+ source = "registry+https://github.com/rust-lang/crates.io-index"
128
+ checksum = "4c2f7349907b712260e64b0afe2f84692af14a454be26187d9df565c7f69266a"
129
+ dependencies = [
130
+ "memchr",
131
+ "regex-automata",
132
+ "serde",
133
+ ]
134
+
135
+ [[package]]
136
+ name = "bumpalo"
137
+ version = "3.13.0"
138
+ source = "registry+https://github.com/rust-lang/crates.io-index"
139
+ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
140
+
141
+ [[package]]
142
+ name = "cached"
143
+ version = "0.46.0"
144
+ source = "registry+https://github.com/rust-lang/crates.io-index"
145
+ checksum = "8cead8ece0da6b744b2ad8ef9c58a4cdc7ef2921e60a6ddfb9eaaa86839b5fc5"
146
+ dependencies = [
147
+ "ahash",
148
+ "cached_proc_macro",
149
+ "cached_proc_macro_types",
150
+ "hashbrown",
151
+ "instant",
152
+ "once_cell",
153
+ "thiserror",
154
+ ]
155
+
156
+ [[package]]
157
+ name = "cached_proc_macro"
158
+ version = "0.18.0"
159
+ source = "registry+https://github.com/rust-lang/crates.io-index"
160
+ checksum = "7da8245dd5f576a41c3b76247b54c15b0e43139ceeb4f732033e15be7c005176"
161
+ dependencies = [
162
+ "darling",
163
+ "proc-macro2",
164
+ "quote",
165
+ "syn 1.0.109",
166
+ ]
167
+
168
+ [[package]]
169
+ name = "cached_proc_macro_types"
170
+ version = "0.1.0"
171
+ source = "registry+https://github.com/rust-lang/crates.io-index"
172
+ checksum = "3a4f925191b4367301851c6d99b09890311d74b0d43f274c0b34c86d308a3663"
173
+
174
+ [[package]]
175
+ name = "cast"
176
+ version = "0.3.0"
177
+ source = "registry+https://github.com/rust-lang/crates.io-index"
178
+ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
179
+
180
+ [[package]]
181
+ name = "cc"
182
+ version = "1.0.83"
183
+ source = "registry+https://github.com/rust-lang/crates.io-index"
184
+ checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
185
+ dependencies = [
186
+ "libc",
187
+ ]
188
+
189
+ [[package]]
190
+ name = "cfg-if"
191
+ version = "1.0.0"
192
+ source = "registry+https://github.com/rust-lang/crates.io-index"
193
+ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
194
+
195
+ [[package]]
196
+ name = "chardet"
197
+ version = "0.2.4"
198
+ source = "registry+https://github.com/rust-lang/crates.io-index"
199
+ checksum = "1a48563284b67c003ba0fb7243c87fab68885e1532c605704228a80238512e31"
200
+
201
+ [[package]]
202
+ name = "chardetng"
203
+ version = "0.1.17"
204
+ source = "registry+https://github.com/rust-lang/crates.io-index"
205
+ checksum = "14b8f0b65b7b08ae3c8187e8d77174de20cb6777864c6b832d8ad365999cf1ea"
206
+ dependencies = [
207
+ "cfg-if",
208
+ "encoding_rs",
209
+ "memchr",
210
+ ]
211
+
212
+ [[package]]
213
+ name = "charset-normalizer-rs"
214
+ version = "1.0.6"
215
+ dependencies = [
216
+ "ahash",
217
+ "assert_cmd",
218
+ "bitflags 2.4.0",
219
+ "cached",
220
+ "chardet",
221
+ "chardetng",
222
+ "clap 4.4.2",
223
+ "counter",
224
+ "criterion",
225
+ "dialoguer",
226
+ "encoding",
227
+ "env_logger",
228
+ "icu_normalizer",
229
+ "icu_properties",
230
+ "log",
231
+ "once_cell",
232
+ "ordered-float",
233
+ "predicates",
234
+ "regex",
235
+ "serde",
236
+ "serde_json",
237
+ "strsim",
238
+ "unicode_names2",
239
+ ]
240
+
241
+ [[package]]
242
+ name = "clap"
243
+ version = "2.34.0"
244
+ source = "registry+https://github.com/rust-lang/crates.io-index"
245
+ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
246
+ dependencies = [
247
+ "bitflags 1.3.2",
248
+ "textwrap",
249
+ "unicode-width",
250
+ ]
251
+
252
+ [[package]]
253
+ name = "clap"
254
+ version = "4.4.2"
255
+ source = "registry+https://github.com/rust-lang/crates.io-index"
256
+ checksum = "6a13b88d2c62ff462f88e4a121f17a82c1af05693a2f192b5c38d14de73c19f6"
257
+ dependencies = [
258
+ "clap_builder",
259
+ "clap_derive",
260
+ ]
261
+
262
+ [[package]]
263
+ name = "clap_builder"
264
+ version = "4.4.2"
265
+ source = "registry+https://github.com/rust-lang/crates.io-index"
266
+ checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08"
267
+ dependencies = [
268
+ "anstream",
269
+ "anstyle",
270
+ "clap_lex",
271
+ "strsim",
272
+ ]
273
+
274
+ [[package]]
275
+ name = "clap_derive"
276
+ version = "4.4.2"
277
+ source = "registry+https://github.com/rust-lang/crates.io-index"
278
+ checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873"
279
+ dependencies = [
280
+ "heck",
281
+ "proc-macro2",
282
+ "quote",
283
+ "syn 2.0.31",
284
+ ]
285
+
286
+ [[package]]
287
+ name = "clap_lex"
288
+ version = "0.5.1"
289
+ source = "registry+https://github.com/rust-lang/crates.io-index"
290
+ checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961"
291
+
292
+ [[package]]
293
+ name = "colorchoice"
294
+ version = "1.0.0"
295
+ source = "registry+https://github.com/rust-lang/crates.io-index"
296
+ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
297
+
298
+ [[package]]
299
+ name = "console"
300
+ version = "0.15.7"
301
+ source = "registry+https://github.com/rust-lang/crates.io-index"
302
+ checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8"
303
+ dependencies = [
304
+ "encode_unicode",
305
+ "lazy_static",
306
+ "libc",
307
+ "unicode-width",
308
+ "windows-sys 0.45.0",
309
+ ]
310
+
311
+ [[package]]
312
+ name = "counter"
313
+ version = "0.5.7"
314
+ source = "registry+https://github.com/rust-lang/crates.io-index"
315
+ checksum = "2d458e66999348f56fd3ffcfbb7f7951542075ca8359687c703de6500c1ddccd"
316
+ dependencies = [
317
+ "num-traits",
318
+ ]
319
+
320
+ [[package]]
321
+ name = "criterion"
322
+ version = "0.3.6"
323
+ source = "registry+https://github.com/rust-lang/crates.io-index"
324
+ checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f"
325
+ dependencies = [
326
+ "atty",
327
+ "cast",
328
+ "clap 2.34.0",
329
+ "criterion-plot",
330
+ "csv",
331
+ "itertools",
332
+ "lazy_static",
333
+ "num-traits",
334
+ "oorandom",
335
+ "plotters",
336
+ "rayon",
337
+ "regex",
338
+ "serde",
339
+ "serde_cbor",
340
+ "serde_derive",
341
+ "serde_json",
342
+ "tinytemplate",
343
+ "walkdir",
344
+ ]
345
+
346
+ [[package]]
347
+ name = "criterion-plot"
348
+ version = "0.4.5"
349
+ source = "registry+https://github.com/rust-lang/crates.io-index"
350
+ checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876"
351
+ dependencies = [
352
+ "cast",
353
+ "itertools",
354
+ ]
355
+
356
+ [[package]]
357
+ name = "crossbeam-channel"
358
+ version = "0.5.8"
359
+ source = "registry+https://github.com/rust-lang/crates.io-index"
360
+ checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
361
+ dependencies = [
362
+ "cfg-if",
363
+ "crossbeam-utils",
364
+ ]
365
+
366
+ [[package]]
367
+ name = "crossbeam-deque"
368
+ version = "0.8.3"
369
+ source = "registry+https://github.com/rust-lang/crates.io-index"
370
+ checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
371
+ dependencies = [
372
+ "cfg-if",
373
+ "crossbeam-epoch",
374
+ "crossbeam-utils",
375
+ ]
376
+
377
+ [[package]]
378
+ name = "crossbeam-epoch"
379
+ version = "0.9.15"
380
+ source = "registry+https://github.com/rust-lang/crates.io-index"
381
+ checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7"
382
+ dependencies = [
383
+ "autocfg",
384
+ "cfg-if",
385
+ "crossbeam-utils",
386
+ "memoffset",
387
+ "scopeguard",
388
+ ]
389
+
390
+ [[package]]
391
+ name = "crossbeam-utils"
392
+ version = "0.8.16"
393
+ source = "registry+https://github.com/rust-lang/crates.io-index"
394
+ checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294"
395
+ dependencies = [
396
+ "cfg-if",
397
+ ]
398
+
399
+ [[package]]
400
+ name = "csv"
401
+ version = "1.2.2"
402
+ source = "registry+https://github.com/rust-lang/crates.io-index"
403
+ checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086"
404
+ dependencies = [
405
+ "csv-core",
406
+ "itoa",
407
+ "ryu",
408
+ "serde",
409
+ ]
410
+
411
+ [[package]]
412
+ name = "csv-core"
413
+ version = "0.1.10"
414
+ source = "registry+https://github.com/rust-lang/crates.io-index"
415
+ checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
416
+ dependencies = [
417
+ "memchr",
418
+ ]
419
+
420
+ [[package]]
421
+ name = "darling"
422
+ version = "0.14.4"
423
+ source = "registry+https://github.com/rust-lang/crates.io-index"
424
+ checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
425
+ dependencies = [
426
+ "darling_core",
427
+ "darling_macro",
428
+ ]
429
+
430
+ [[package]]
431
+ name = "darling_core"
432
+ version = "0.14.4"
433
+ source = "registry+https://github.com/rust-lang/crates.io-index"
434
+ checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
435
+ dependencies = [
436
+ "fnv",
437
+ "ident_case",
438
+ "proc-macro2",
439
+ "quote",
440
+ "strsim",
441
+ "syn 1.0.109",
442
+ ]
443
+
444
+ [[package]]
445
+ name = "darling_macro"
446
+ version = "0.14.4"
447
+ source = "registry+https://github.com/rust-lang/crates.io-index"
448
+ checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
449
+ dependencies = [
450
+ "darling_core",
451
+ "quote",
452
+ "syn 1.0.109",
453
+ ]
454
+
455
+ [[package]]
456
+ name = "dialoguer"
457
+ version = "0.10.4"
458
+ source = "registry+https://github.com/rust-lang/crates.io-index"
459
+ checksum = "59c6f2989294b9a498d3ad5491a79c6deb604617378e1cdc4bfc1c1361fe2f87"
460
+ dependencies = [
461
+ "console",
462
+ "shell-words",
463
+ "tempfile",
464
+ "zeroize",
465
+ ]
466
+
467
+ [[package]]
468
+ name = "difflib"
469
+ version = "0.4.0"
470
+ source = "registry+https://github.com/rust-lang/crates.io-index"
471
+ checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8"
472
+
473
+ [[package]]
474
+ name = "displaydoc"
475
+ version = "0.2.4"
476
+ source = "registry+https://github.com/rust-lang/crates.io-index"
477
+ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d"
478
+ dependencies = [
479
+ "proc-macro2",
480
+ "quote",
481
+ "syn 2.0.31",
482
+ ]
483
+
484
+ [[package]]
485
+ name = "doc-comment"
486
+ version = "0.3.3"
487
+ source = "registry+https://github.com/rust-lang/crates.io-index"
488
+ checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
489
+
490
+ [[package]]
491
+ name = "either"
492
+ version = "1.9.0"
493
+ source = "registry+https://github.com/rust-lang/crates.io-index"
494
+ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
495
+
496
+ [[package]]
497
+ name = "encode_unicode"
498
+ version = "0.3.6"
499
+ source = "registry+https://github.com/rust-lang/crates.io-index"
500
+ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
501
+
502
+ [[package]]
503
+ name = "encoding"
504
+ version = "0.2.33"
505
+ source = "registry+https://github.com/rust-lang/crates.io-index"
506
+ checksum = "6b0d943856b990d12d3b55b359144ff341533e516d94098b1d3fc1ac666d36ec"
507
+ dependencies = [
508
+ "encoding-index-japanese",
509
+ "encoding-index-korean",
510
+ "encoding-index-simpchinese",
511
+ "encoding-index-singlebyte",
512
+ "encoding-index-tradchinese",
513
+ ]
514
+
515
+ [[package]]
516
+ name = "encoding-index-japanese"
517
+ version = "1.20141219.5"
518
+ source = "registry+https://github.com/rust-lang/crates.io-index"
519
+ checksum = "04e8b2ff42e9a05335dbf8b5c6f7567e5591d0d916ccef4e0b1710d32a0d0c91"
520
+ dependencies = [
521
+ "encoding_index_tests",
522
+ ]
523
+
524
+ [[package]]
525
+ name = "encoding-index-korean"
526
+ version = "1.20141219.5"
527
+ source = "registry+https://github.com/rust-lang/crates.io-index"
528
+ checksum = "4dc33fb8e6bcba213fe2f14275f0963fd16f0a02c878e3095ecfdf5bee529d81"
529
+ dependencies = [
530
+ "encoding_index_tests",
531
+ ]
532
+
533
+ [[package]]
534
+ name = "encoding-index-simpchinese"
535
+ version = "1.20141219.5"
536
+ source = "registry+https://github.com/rust-lang/crates.io-index"
537
+ checksum = "d87a7194909b9118fc707194baa434a4e3b0fb6a5a757c73c3adb07aa25031f7"
538
+ dependencies = [
539
+ "encoding_index_tests",
540
+ ]
541
+
542
+ [[package]]
543
+ name = "encoding-index-singlebyte"
544
+ version = "1.20141219.5"
545
+ source = "registry+https://github.com/rust-lang/crates.io-index"
546
+ checksum = "3351d5acffb224af9ca265f435b859c7c01537c0849754d3db3fdf2bfe2ae84a"
547
+ dependencies = [
548
+ "encoding_index_tests",
549
+ ]
550
+
551
+ [[package]]
552
+ name = "encoding-index-tradchinese"
553
+ version = "1.20141219.5"
554
+ source = "registry+https://github.com/rust-lang/crates.io-index"
555
+ checksum = "fd0e20d5688ce3cab59eb3ef3a2083a5c77bf496cb798dc6fcdb75f323890c18"
556
+ dependencies = [
557
+ "encoding_index_tests",
558
+ ]
559
+
560
+ [[package]]
561
+ name = "encoding_index_tests"
562
+ version = "0.1.4"
563
+ source = "registry+https://github.com/rust-lang/crates.io-index"
564
+ checksum = "a246d82be1c9d791c5dfde9a2bd045fc3cbba3fa2b11ad558f27d01712f00569"
565
+
566
+ [[package]]
567
+ name = "encoding_rs"
568
+ version = "0.8.33"
569
+ source = "registry+https://github.com/rust-lang/crates.io-index"
570
+ checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1"
571
+ dependencies = [
572
+ "cfg-if",
573
+ ]
574
+
575
+ [[package]]
576
+ name = "env_logger"
577
+ version = "0.10.0"
578
+ source = "registry+https://github.com/rust-lang/crates.io-index"
579
+ checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0"
580
+ dependencies = [
581
+ "humantime",
582
+ "is-terminal",
583
+ "log",
584
+ "regex",
585
+ "termcolor",
586
+ ]
587
+
588
+ [[package]]
589
+ name = "errno"
590
+ version = "0.3.3"
591
+ source = "registry+https://github.com/rust-lang/crates.io-index"
592
+ checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd"
593
+ dependencies = [
594
+ "errno-dragonfly",
595
+ "libc",
596
+ "windows-sys 0.48.0",
597
+ ]
598
+
599
+ [[package]]
600
+ name = "errno-dragonfly"
601
+ version = "0.1.2"
602
+ source = "registry+https://github.com/rust-lang/crates.io-index"
603
+ checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
604
+ dependencies = [
605
+ "cc",
606
+ "libc",
607
+ ]
608
+
609
+ [[package]]
610
+ name = "fastrand"
611
+ version = "2.0.0"
612
+ source = "registry+https://github.com/rust-lang/crates.io-index"
613
+ checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764"
614
+
615
+ [[package]]
616
+ name = "float-cmp"
617
+ version = "0.9.0"
618
+ source = "registry+https://github.com/rust-lang/crates.io-index"
619
+ checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4"
620
+ dependencies = [
621
+ "num-traits",
622
+ ]
623
+
624
+ [[package]]
625
+ name = "fnv"
626
+ version = "1.0.7"
627
+ source = "registry+https://github.com/rust-lang/crates.io-index"
628
+ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
629
+
630
+ [[package]]
631
+ name = "getopts"
632
+ version = "0.2.21"
633
+ source = "registry+https://github.com/rust-lang/crates.io-index"
634
+ checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
635
+ dependencies = [
636
+ "unicode-width",
637
+ ]
638
+
639
+ [[package]]
640
+ name = "getrandom"
641
+ version = "0.2.10"
642
+ source = "registry+https://github.com/rust-lang/crates.io-index"
643
+ checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
644
+ dependencies = [
645
+ "cfg-if",
646
+ "libc",
647
+ "wasi",
648
+ ]
649
+
650
+ [[package]]
651
+ name = "half"
652
+ version = "1.8.2"
653
+ source = "registry+https://github.com/rust-lang/crates.io-index"
654
+ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
655
+
656
+ [[package]]
657
+ name = "hashbrown"
658
+ version = "0.14.0"
659
+ source = "registry+https://github.com/rust-lang/crates.io-index"
660
+ checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
661
+ dependencies = [
662
+ "ahash",
663
+ "allocator-api2",
664
+ ]
665
+
666
+ [[package]]
667
+ name = "heck"
668
+ version = "0.4.1"
669
+ source = "registry+https://github.com/rust-lang/crates.io-index"
670
+ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
671
+
672
+ [[package]]
673
+ name = "hermit-abi"
674
+ version = "0.1.19"
675
+ source = "registry+https://github.com/rust-lang/crates.io-index"
676
+ checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
677
+ dependencies = [
678
+ "libc",
679
+ ]
680
+
681
+ [[package]]
682
+ name = "hermit-abi"
683
+ version = "0.3.2"
684
+ source = "registry+https://github.com/rust-lang/crates.io-index"
685
+ checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
686
+
687
+ [[package]]
688
+ name = "humantime"
689
+ version = "2.1.0"
690
+ source = "registry+https://github.com/rust-lang/crates.io-index"
691
+ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
692
+
693
+ [[package]]
694
+ name = "icu_collections"
695
+ version = "1.3.2"
696
+ source = "registry+https://github.com/rust-lang/crates.io-index"
697
+ checksum = "3907b2246e8dd5a29ead8a965e7c0c8a90e9b928e614a4279257d45c5e553e91"
698
+ dependencies = [
699
+ "displaydoc",
700
+ "yoke",
701
+ "zerofrom",
702
+ "zerovec",
703
+ ]
704
+
705
+ [[package]]
706
+ name = "icu_locid"
707
+ version = "1.3.2"
708
+ source = "registry+https://github.com/rust-lang/crates.io-index"
709
+ checksum = "f284eb342dc49d3e9d9f3b188489d76b5d22dfb1d1a5e0d1941811253bac625c"
710
+ dependencies = [
711
+ "displaydoc",
712
+ "litemap",
713
+ "tinystr",
714
+ "writeable",
715
+ "zerovec",
716
+ ]
717
+
718
+ [[package]]
719
+ name = "icu_locid_transform"
720
+ version = "1.3.2"
721
+ source = "registry+https://github.com/rust-lang/crates.io-index"
722
+ checksum = "6551daf80882d8e68eee186cc19e132d8bde1b1f059a79b93384a5ca0e8fc5e7"
723
+ dependencies = [
724
+ "displaydoc",
725
+ "icu_locid",
726
+ "icu_locid_transform_data",
727
+ "icu_provider",
728
+ "tinystr",
729
+ "zerovec",
730
+ ]
731
+
732
+ [[package]]
733
+ name = "icu_locid_transform_data"
734
+ version = "1.3.2"
735
+ source = "registry+https://github.com/rust-lang/crates.io-index"
736
+ checksum = "2a741eba5431f75eb2f1f9022d3cffabcadda6771e54fb4e77c8ba8653e4da44"
737
+
738
+ [[package]]
739
+ name = "icu_normalizer"
740
+ version = "1.3.2"
741
+ source = "registry+https://github.com/rust-lang/crates.io-index"
742
+ checksum = "080fc33a720d50a7342b0c58df010fbcfb842d6f78ef81555f8b1ac6bba57d3c"
743
+ dependencies = [
744
+ "displaydoc",
745
+ "icu_collections",
746
+ "icu_normalizer_data",
747
+ "icu_properties",
748
+ "icu_provider",
749
+ "smallvec",
750
+ "utf16_iter",
751
+ "utf8_iter",
752
+ "write16",
753
+ "zerovec",
754
+ ]
755
+
756
+ [[package]]
757
+ name = "icu_normalizer_data"
758
+ version = "1.3.2"
759
+ source = "registry+https://github.com/rust-lang/crates.io-index"
760
+ checksum = "6f8d22f74066c2e6442db2a9aa14950278e86719e811e304e48bae03094b369d"
761
+
762
+ [[package]]
763
+ name = "icu_properties"
764
+ version = "1.3.2"
765
+ source = "registry+https://github.com/rust-lang/crates.io-index"
766
+ checksum = "3477ae70f8ca8dc08ff7574b5398ed0a2f2e4e6b66bdff2558a92ed67e262be1"
767
+ dependencies = [
768
+ "displaydoc",
769
+ "icu_collections",
770
+ "icu_locid_transform",
771
+ "icu_properties_data",
772
+ "icu_provider",
773
+ "tinystr",
774
+ "zerovec",
775
+ ]
776
+
777
+ [[package]]
778
+ name = "icu_properties_data"
779
+ version = "1.3.2"
780
+ source = "registry+https://github.com/rust-lang/crates.io-index"
781
+ checksum = "7c8bb3b67a8347e94d580434369e5c7ee89999b9309d04b7cfc88dfaa0f31b59"
782
+
783
+ [[package]]
784
+ name = "icu_provider"
785
+ version = "1.3.2"
786
+ source = "registry+https://github.com/rust-lang/crates.io-index"
787
+ checksum = "68acdef80034b5e35d8524e9817479d389a4f9774f3f0cbe1bf3884d80fd5934"
788
+ dependencies = [
789
+ "displaydoc",
790
+ "icu_locid",
791
+ "icu_provider_macros",
792
+ "stable_deref_trait",
793
+ "tinystr",
794
+ "writeable",
795
+ "yoke",
796
+ "zerofrom",
797
+ "zerovec",
798
+ ]
799
+
800
+ [[package]]
801
+ name = "icu_provider_macros"
802
+ version = "1.3.2"
803
+ source = "registry+https://github.com/rust-lang/crates.io-index"
804
+ checksum = "2060258edfcfe32ca7058849bf0f146cb5c59aadbedf480333c0d0002f97bc99"
805
+ dependencies = [
806
+ "proc-macro2",
807
+ "quote",
808
+ "syn 2.0.31",
809
+ ]
810
+
811
+ [[package]]
812
+ name = "ident_case"
813
+ version = "1.0.1"
814
+ source = "registry+https://github.com/rust-lang/crates.io-index"
815
+ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
816
+
817
+ [[package]]
818
+ name = "instant"
819
+ version = "0.1.12"
820
+ source = "registry+https://github.com/rust-lang/crates.io-index"
821
+ checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
822
+ dependencies = [
823
+ "cfg-if",
824
+ ]
825
+
826
+ [[package]]
827
+ name = "is-terminal"
828
+ version = "0.4.9"
829
+ source = "registry+https://github.com/rust-lang/crates.io-index"
830
+ checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
831
+ dependencies = [
832
+ "hermit-abi 0.3.2",
833
+ "rustix",
834
+ "windows-sys 0.48.0",
835
+ ]
836
+
837
+ [[package]]
838
+ name = "itertools"
839
+ version = "0.10.5"
840
+ source = "registry+https://github.com/rust-lang/crates.io-index"
841
+ checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
842
+ dependencies = [
843
+ "either",
844
+ ]
845
+
846
+ [[package]]
847
+ name = "itoa"
848
+ version = "1.0.9"
849
+ source = "registry+https://github.com/rust-lang/crates.io-index"
850
+ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
851
+
852
+ [[package]]
853
+ name = "js-sys"
854
+ version = "0.3.64"
855
+ source = "registry+https://github.com/rust-lang/crates.io-index"
856
+ checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a"
857
+ dependencies = [
858
+ "wasm-bindgen",
859
+ ]
860
+
861
+ [[package]]
862
+ name = "lazy_static"
863
+ version = "1.4.0"
864
+ source = "registry+https://github.com/rust-lang/crates.io-index"
865
+ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
866
+
867
+ [[package]]
868
+ name = "libc"
869
+ version = "0.2.147"
870
+ source = "registry+https://github.com/rust-lang/crates.io-index"
871
+ checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
872
+
873
+ [[package]]
874
+ name = "linux-raw-sys"
875
+ version = "0.4.7"
876
+ source = "registry+https://github.com/rust-lang/crates.io-index"
877
+ checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128"
878
+
879
+ [[package]]
880
+ name = "litemap"
881
+ version = "0.7.1"
882
+ source = "registry+https://github.com/rust-lang/crates.io-index"
883
+ checksum = "77a1a2647d5b7134127971a6de0d533c49de2159167e7f259c427195f87168a1"
884
+
885
+ [[package]]
886
+ name = "log"
887
+ version = "0.4.20"
888
+ source = "registry+https://github.com/rust-lang/crates.io-index"
889
+ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
890
+
891
+ [[package]]
892
+ name = "memchr"
893
+ version = "2.6.3"
894
+ source = "registry+https://github.com/rust-lang/crates.io-index"
895
+ checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c"
896
+
897
+ [[package]]
898
+ name = "memoffset"
899
+ version = "0.9.0"
900
+ source = "registry+https://github.com/rust-lang/crates.io-index"
901
+ checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
902
+ dependencies = [
903
+ "autocfg",
904
+ ]
905
+
906
+ [[package]]
907
+ name = "normalize-line-endings"
908
+ version = "0.3.0"
909
+ source = "registry+https://github.com/rust-lang/crates.io-index"
910
+ checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
911
+
912
+ [[package]]
913
+ name = "num-traits"
914
+ version = "0.2.16"
915
+ source = "registry+https://github.com/rust-lang/crates.io-index"
916
+ checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2"
917
+ dependencies = [
918
+ "autocfg",
919
+ ]
920
+
921
+ [[package]]
922
+ name = "num_cpus"
923
+ version = "1.16.0"
924
+ source = "registry+https://github.com/rust-lang/crates.io-index"
925
+ checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
926
+ dependencies = [
927
+ "hermit-abi 0.3.2",
928
+ "libc",
929
+ ]
930
+
931
+ [[package]]
932
+ name = "once_cell"
933
+ version = "1.18.0"
934
+ source = "registry+https://github.com/rust-lang/crates.io-index"
935
+ checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
936
+
937
+ [[package]]
938
+ name = "oorandom"
939
+ version = "11.1.3"
940
+ source = "registry+https://github.com/rust-lang/crates.io-index"
941
+ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
942
+
943
+ [[package]]
944
+ name = "ordered-float"
945
+ version = "3.9.1"
946
+ source = "registry+https://github.com/rust-lang/crates.io-index"
947
+ checksum = "2a54938017eacd63036332b4ae5c8a49fc8c0c1d6d629893057e4f13609edd06"
948
+ dependencies = [
949
+ "num-traits",
950
+ ]
951
+
952
+ [[package]]
953
+ name = "phf"
954
+ version = "0.11.2"
955
+ source = "registry+https://github.com/rust-lang/crates.io-index"
956
+ checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc"
957
+ dependencies = [
958
+ "phf_shared",
959
+ ]
960
+
961
+ [[package]]
962
+ name = "phf_codegen"
963
+ version = "0.11.2"
964
+ source = "registry+https://github.com/rust-lang/crates.io-index"
965
+ checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a"
966
+ dependencies = [
967
+ "phf_generator",
968
+ "phf_shared",
969
+ ]
970
+
971
+ [[package]]
972
+ name = "phf_generator"
973
+ version = "0.11.2"
974
+ source = "registry+https://github.com/rust-lang/crates.io-index"
975
+ checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0"
976
+ dependencies = [
977
+ "phf_shared",
978
+ "rand",
979
+ ]
980
+
981
+ [[package]]
982
+ name = "phf_shared"
983
+ version = "0.11.2"
984
+ source = "registry+https://github.com/rust-lang/crates.io-index"
985
+ checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b"
986
+ dependencies = [
987
+ "siphasher",
988
+ ]
989
+
990
+ [[package]]
991
+ name = "plotters"
992
+ version = "0.3.5"
993
+ source = "registry+https://github.com/rust-lang/crates.io-index"
994
+ checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45"
995
+ dependencies = [
996
+ "num-traits",
997
+ "plotters-backend",
998
+ "plotters-svg",
999
+ "wasm-bindgen",
1000
+ "web-sys",
1001
+ ]
1002
+
1003
+ [[package]]
1004
+ name = "plotters-backend"
1005
+ version = "0.3.5"
1006
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1007
+ checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609"
1008
+
1009
+ [[package]]
1010
+ name = "plotters-svg"
1011
+ version = "0.3.5"
1012
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1013
+ checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab"
1014
+ dependencies = [
1015
+ "plotters-backend",
1016
+ ]
1017
+
1018
+ [[package]]
1019
+ name = "ppv-lite86"
1020
+ version = "0.2.17"
1021
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1022
+ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
1023
+
1024
+ [[package]]
1025
+ name = "predicates"
1026
+ version = "3.0.3"
1027
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1028
+ checksum = "09963355b9f467184c04017ced4a2ba2d75cbcb4e7462690d388233253d4b1a9"
1029
+ dependencies = [
1030
+ "anstyle",
1031
+ "difflib",
1032
+ "float-cmp",
1033
+ "itertools",
1034
+ "normalize-line-endings",
1035
+ "predicates-core",
1036
+ "regex",
1037
+ ]
1038
+
1039
+ [[package]]
1040
+ name = "predicates-core"
1041
+ version = "1.0.6"
1042
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1043
+ checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174"
1044
+
1045
+ [[package]]
1046
+ name = "predicates-tree"
1047
+ version = "1.0.9"
1048
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1049
+ checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf"
1050
+ dependencies = [
1051
+ "predicates-core",
1052
+ "termtree",
1053
+ ]
1054
+
1055
+ [[package]]
1056
+ name = "proc-macro2"
1057
+ version = "1.0.66"
1058
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1059
+ checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
1060
+ dependencies = [
1061
+ "unicode-ident",
1062
+ ]
1063
+
1064
+ [[package]]
1065
+ name = "quote"
1066
+ version = "1.0.33"
1067
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1068
+ checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
1069
+ dependencies = [
1070
+ "proc-macro2",
1071
+ ]
1072
+
1073
+ [[package]]
1074
+ name = "rand"
1075
+ version = "0.8.5"
1076
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1077
+ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
1078
+ dependencies = [
1079
+ "libc",
1080
+ "rand_chacha",
1081
+ "rand_core",
1082
+ ]
1083
+
1084
+ [[package]]
1085
+ name = "rand_chacha"
1086
+ version = "0.3.1"
1087
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1088
+ checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
1089
+ dependencies = [
1090
+ "ppv-lite86",
1091
+ "rand_core",
1092
+ ]
1093
+
1094
+ [[package]]
1095
+ name = "rand_core"
1096
+ version = "0.6.4"
1097
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1098
+ checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
1099
+ dependencies = [
1100
+ "getrandom",
1101
+ ]
1102
+
1103
+ [[package]]
1104
+ name = "rayon"
1105
+ version = "1.7.0"
1106
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1107
+ checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
1108
+ dependencies = [
1109
+ "either",
1110
+ "rayon-core",
1111
+ ]
1112
+
1113
+ [[package]]
1114
+ name = "rayon-core"
1115
+ version = "1.11.0"
1116
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1117
+ checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
1118
+ dependencies = [
1119
+ "crossbeam-channel",
1120
+ "crossbeam-deque",
1121
+ "crossbeam-utils",
1122
+ "num_cpus",
1123
+ ]
1124
+
1125
+ [[package]]
1126
+ name = "redox_syscall"
1127
+ version = "0.3.5"
1128
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1129
+ checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29"
1130
+ dependencies = [
1131
+ "bitflags 1.3.2",
1132
+ ]
1133
+
1134
+ [[package]]
1135
+ name = "regex"
1136
+ version = "1.9.5"
1137
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1138
+ checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47"
1139
+ dependencies = [
1140
+ "aho-corasick",
1141
+ "memchr",
1142
+ "regex-automata",
1143
+ "regex-syntax",
1144
+ ]
1145
+
1146
+ [[package]]
1147
+ name = "regex-automata"
1148
+ version = "0.3.8"
1149
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1150
+ checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795"
1151
+ dependencies = [
1152
+ "aho-corasick",
1153
+ "memchr",
1154
+ "regex-syntax",
1155
+ ]
1156
+
1157
+ [[package]]
1158
+ name = "regex-syntax"
1159
+ version = "0.7.5"
1160
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1161
+ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
1162
+
1163
+ [[package]]
1164
+ name = "rustix"
1165
+ version = "0.38.12"
1166
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1167
+ checksum = "bdf14a7a466ce88b5eac3da815b53aefc208ce7e74d1c263aabb04d88c4abeb1"
1168
+ dependencies = [
1169
+ "bitflags 2.4.0",
1170
+ "errno",
1171
+ "libc",
1172
+ "linux-raw-sys",
1173
+ "windows-sys 0.48.0",
1174
+ ]
1175
+
1176
+ [[package]]
1177
+ name = "ryu"
1178
+ version = "1.0.15"
1179
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1180
+ checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
1181
+
1182
+ [[package]]
1183
+ name = "same-file"
1184
+ version = "1.0.6"
1185
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1186
+ checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
1187
+ dependencies = [
1188
+ "winapi-util",
1189
+ ]
1190
+
1191
+ [[package]]
1192
+ name = "scopeguard"
1193
+ version = "1.2.0"
1194
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1195
+ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
1196
+
1197
+ [[package]]
1198
+ name = "serde"
1199
+ version = "1.0.188"
1200
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1201
+ checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e"
1202
+ dependencies = [
1203
+ "serde_derive",
1204
+ ]
1205
+
1206
+ [[package]]
1207
+ name = "serde_cbor"
1208
+ version = "0.11.2"
1209
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1210
+ checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
1211
+ dependencies = [
1212
+ "half",
1213
+ "serde",
1214
+ ]
1215
+
1216
+ [[package]]
1217
+ name = "serde_derive"
1218
+ version = "1.0.188"
1219
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1220
+ checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2"
1221
+ dependencies = [
1222
+ "proc-macro2",
1223
+ "quote",
1224
+ "syn 2.0.31",
1225
+ ]
1226
+
1227
+ [[package]]
1228
+ name = "serde_json"
1229
+ version = "1.0.107"
1230
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1231
+ checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65"
1232
+ dependencies = [
1233
+ "itoa",
1234
+ "ryu",
1235
+ "serde",
1236
+ ]
1237
+
1238
+ [[package]]
1239
+ name = "shell-words"
1240
+ version = "1.1.0"
1241
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1242
+ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde"
1243
+
1244
+ [[package]]
1245
+ name = "siphasher"
1246
+ version = "0.3.11"
1247
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1248
+ checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d"
1249
+
1250
+ [[package]]
1251
+ name = "smallvec"
1252
+ version = "1.11.1"
1253
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1254
+ checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a"
1255
+
1256
+ [[package]]
1257
+ name = "stable_deref_trait"
1258
+ version = "1.2.0"
1259
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1260
+ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
1261
+
1262
+ [[package]]
1263
+ name = "strsim"
1264
+ version = "0.10.0"
1265
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1266
+ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
1267
+
1268
+ [[package]]
1269
+ name = "syn"
1270
+ version = "1.0.109"
1271
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1272
+ checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
1273
+ dependencies = [
1274
+ "proc-macro2",
1275
+ "quote",
1276
+ "unicode-ident",
1277
+ ]
1278
+
1279
+ [[package]]
1280
+ name = "syn"
1281
+ version = "2.0.31"
1282
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1283
+ checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398"
1284
+ dependencies = [
1285
+ "proc-macro2",
1286
+ "quote",
1287
+ "unicode-ident",
1288
+ ]
1289
+
1290
+ [[package]]
1291
+ name = "synstructure"
1292
+ version = "0.13.0"
1293
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1294
+ checksum = "285ba80e733fac80aa4270fbcdf83772a79b80aa35c97075320abfee4a915b06"
1295
+ dependencies = [
1296
+ "proc-macro2",
1297
+ "quote",
1298
+ "syn 2.0.31",
1299
+ "unicode-xid",
1300
+ ]
1301
+
1302
+ [[package]]
1303
+ name = "tempfile"
1304
+ version = "3.8.0"
1305
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1306
+ checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef"
1307
+ dependencies = [
1308
+ "cfg-if",
1309
+ "fastrand",
1310
+ "redox_syscall",
1311
+ "rustix",
1312
+ "windows-sys 0.48.0",
1313
+ ]
1314
+
1315
+ [[package]]
1316
+ name = "termcolor"
1317
+ version = "1.2.0"
1318
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1319
+ checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
1320
+ dependencies = [
1321
+ "winapi-util",
1322
+ ]
1323
+
1324
+ [[package]]
1325
+ name = "termtree"
1326
+ version = "0.4.1"
1327
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1328
+ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
1329
+
1330
+ [[package]]
1331
+ name = "textwrap"
1332
+ version = "0.11.0"
1333
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1334
+ checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
1335
+ dependencies = [
1336
+ "unicode-width",
1337
+ ]
1338
+
1339
+ [[package]]
1340
+ name = "thiserror"
1341
+ version = "1.0.49"
1342
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1343
+ checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4"
1344
+ dependencies = [
1345
+ "thiserror-impl",
1346
+ ]
1347
+
1348
+ [[package]]
1349
+ name = "thiserror-impl"
1350
+ version = "1.0.49"
1351
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1352
+ checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc"
1353
+ dependencies = [
1354
+ "proc-macro2",
1355
+ "quote",
1356
+ "syn 2.0.31",
1357
+ ]
1358
+
1359
+ [[package]]
1360
+ name = "time"
1361
+ version = "0.3.20"
1362
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1363
+ checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890"
1364
+ dependencies = [
1365
+ "serde",
1366
+ "time-core",
1367
+ ]
1368
+
1369
+ [[package]]
1370
+ name = "time-core"
1371
+ version = "0.1.0"
1372
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1373
+ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd"
1374
+
1375
+ [[package]]
1376
+ name = "tinystr"
1377
+ version = "0.7.4"
1378
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1379
+ checksum = "d5d0e245e80bdc9b4e5356fc45a72184abbc3861992603f515270e9340f5a219"
1380
+ dependencies = [
1381
+ "displaydoc",
1382
+ "zerovec",
1383
+ ]
1384
+
1385
+ [[package]]
1386
+ name = "tinytemplate"
1387
+ version = "1.2.1"
1388
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1389
+ checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
1390
+ dependencies = [
1391
+ "serde",
1392
+ "serde_json",
1393
+ ]
1394
+
1395
+ [[package]]
1396
+ name = "unicode-ident"
1397
+ version = "1.0.11"
1398
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1399
+ checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
1400
+
1401
+ [[package]]
1402
+ name = "unicode-width"
1403
+ version = "0.1.10"
1404
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1405
+ checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
1406
+
1407
+ [[package]]
1408
+ name = "unicode-xid"
1409
+ version = "0.2.4"
1410
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1411
+ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
1412
+
1413
+ [[package]]
1414
+ name = "unicode_names2"
1415
+ version = "1.1.0"
1416
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1417
+ checksum = "38b2c0942619ae1797f999a0ce7efc6c09592ad30e68e16cdbfdcd48a98c3579"
1418
+ dependencies = [
1419
+ "phf",
1420
+ "unicode_names2_generator",
1421
+ ]
1422
+
1423
+ [[package]]
1424
+ name = "unicode_names2_generator"
1425
+ version = "1.1.0"
1426
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1427
+ checksum = "4d0d66ab60be9799a70f8eb227ea43da7dcc47561dd9102cbadacfe0930113f7"
1428
+ dependencies = [
1429
+ "getopts",
1430
+ "log",
1431
+ "phf_codegen",
1432
+ "rand",
1433
+ "time",
1434
+ ]
1435
+
1436
+ [[package]]
1437
+ name = "utf16_iter"
1438
+ version = "1.0.4"
1439
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1440
+ checksum = "52df8b7fb78e7910d776fccf2e42ceaf3604d55e8e7eb2dbd183cb1441d8a692"
1441
+
1442
+ [[package]]
1443
+ name = "utf8_iter"
1444
+ version = "1.0.3"
1445
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1446
+ checksum = "64a8922555b9500e3d865caed19330172cd67cbf82203f1a3311d8c305cc9f33"
1447
+
1448
+ [[package]]
1449
+ name = "utf8parse"
1450
+ version = "0.2.1"
1451
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1452
+ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
1453
+
1454
+ [[package]]
1455
+ name = "version_check"
1456
+ version = "0.9.4"
1457
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1458
+ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
1459
+
1460
+ [[package]]
1461
+ name = "wait-timeout"
1462
+ version = "0.2.0"
1463
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1464
+ checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6"
1465
+ dependencies = [
1466
+ "libc",
1467
+ ]
1468
+
1469
+ [[package]]
1470
+ name = "walkdir"
1471
+ version = "2.4.0"
1472
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1473
+ checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee"
1474
+ dependencies = [
1475
+ "same-file",
1476
+ "winapi-util",
1477
+ ]
1478
+
1479
+ [[package]]
1480
+ name = "wasi"
1481
+ version = "0.11.0+wasi-snapshot-preview1"
1482
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1483
+ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
1484
+
1485
+ [[package]]
1486
+ name = "wasm-bindgen"
1487
+ version = "0.2.87"
1488
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1489
+ checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342"
1490
+ dependencies = [
1491
+ "cfg-if",
1492
+ "wasm-bindgen-macro",
1493
+ ]
1494
+
1495
+ [[package]]
1496
+ name = "wasm-bindgen-backend"
1497
+ version = "0.2.87"
1498
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1499
+ checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd"
1500
+ dependencies = [
1501
+ "bumpalo",
1502
+ "log",
1503
+ "once_cell",
1504
+ "proc-macro2",
1505
+ "quote",
1506
+ "syn 2.0.31",
1507
+ "wasm-bindgen-shared",
1508
+ ]
1509
+
1510
+ [[package]]
1511
+ name = "wasm-bindgen-macro"
1512
+ version = "0.2.87"
1513
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1514
+ checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
1515
+ dependencies = [
1516
+ "quote",
1517
+ "wasm-bindgen-macro-support",
1518
+ ]
1519
+
1520
+ [[package]]
1521
+ name = "wasm-bindgen-macro-support"
1522
+ version = "0.2.87"
1523
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1524
+ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
1525
+ dependencies = [
1526
+ "proc-macro2",
1527
+ "quote",
1528
+ "syn 2.0.31",
1529
+ "wasm-bindgen-backend",
1530
+ "wasm-bindgen-shared",
1531
+ ]
1532
+
1533
+ [[package]]
1534
+ name = "wasm-bindgen-shared"
1535
+ version = "0.2.87"
1536
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1537
+ checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1"
1538
+
1539
+ [[package]]
1540
+ name = "web-sys"
1541
+ version = "0.3.64"
1542
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1543
+ checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b"
1544
+ dependencies = [
1545
+ "js-sys",
1546
+ "wasm-bindgen",
1547
+ ]
1548
+
1549
+ [[package]]
1550
+ name = "winapi"
1551
+ version = "0.3.9"
1552
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1553
+ checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
1554
+ dependencies = [
1555
+ "winapi-i686-pc-windows-gnu",
1556
+ "winapi-x86_64-pc-windows-gnu",
1557
+ ]
1558
+
1559
+ [[package]]
1560
+ name = "winapi-i686-pc-windows-gnu"
1561
+ version = "0.4.0"
1562
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1563
+ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
1564
+
1565
+ [[package]]
1566
+ name = "winapi-util"
1567
+ version = "0.1.5"
1568
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1569
+ checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
1570
+ dependencies = [
1571
+ "winapi",
1572
+ ]
1573
+
1574
+ [[package]]
1575
+ name = "winapi-x86_64-pc-windows-gnu"
1576
+ version = "0.4.0"
1577
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1578
+ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
1579
+
1580
+ [[package]]
1581
+ name = "windows-sys"
1582
+ version = "0.45.0"
1583
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1584
+ checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
1585
+ dependencies = [
1586
+ "windows-targets 0.42.2",
1587
+ ]
1588
+
1589
+ [[package]]
1590
+ name = "windows-sys"
1591
+ version = "0.48.0"
1592
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1593
+ checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
1594
+ dependencies = [
1595
+ "windows-targets 0.48.5",
1596
+ ]
1597
+
1598
+ [[package]]
1599
+ name = "windows-targets"
1600
+ version = "0.42.2"
1601
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1602
+ checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
1603
+ dependencies = [
1604
+ "windows_aarch64_gnullvm 0.42.2",
1605
+ "windows_aarch64_msvc 0.42.2",
1606
+ "windows_i686_gnu 0.42.2",
1607
+ "windows_i686_msvc 0.42.2",
1608
+ "windows_x86_64_gnu 0.42.2",
1609
+ "windows_x86_64_gnullvm 0.42.2",
1610
+ "windows_x86_64_msvc 0.42.2",
1611
+ ]
1612
+
1613
+ [[package]]
1614
+ name = "windows-targets"
1615
+ version = "0.48.5"
1616
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1617
+ checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
1618
+ dependencies = [
1619
+ "windows_aarch64_gnullvm 0.48.5",
1620
+ "windows_aarch64_msvc 0.48.5",
1621
+ "windows_i686_gnu 0.48.5",
1622
+ "windows_i686_msvc 0.48.5",
1623
+ "windows_x86_64_gnu 0.48.5",
1624
+ "windows_x86_64_gnullvm 0.48.5",
1625
+ "windows_x86_64_msvc 0.48.5",
1626
+ ]
1627
+
1628
+ [[package]]
1629
+ name = "windows_aarch64_gnullvm"
1630
+ version = "0.42.2"
1631
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1632
+ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
1633
+
1634
+ [[package]]
1635
+ name = "windows_aarch64_gnullvm"
1636
+ version = "0.48.5"
1637
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1638
+ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
1639
+
1640
+ [[package]]
1641
+ name = "windows_aarch64_msvc"
1642
+ version = "0.42.2"
1643
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1644
+ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
1645
+
1646
+ [[package]]
1647
+ name = "windows_aarch64_msvc"
1648
+ version = "0.48.5"
1649
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1650
+ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
1651
+
1652
+ [[package]]
1653
+ name = "windows_i686_gnu"
1654
+ version = "0.42.2"
1655
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1656
+ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
1657
+
1658
+ [[package]]
1659
+ name = "windows_i686_gnu"
1660
+ version = "0.48.5"
1661
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1662
+ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
1663
+
1664
+ [[package]]
1665
+ name = "windows_i686_msvc"
1666
+ version = "0.42.2"
1667
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1668
+ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
1669
+
1670
+ [[package]]
1671
+ name = "windows_i686_msvc"
1672
+ version = "0.48.5"
1673
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1674
+ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
1675
+
1676
+ [[package]]
1677
+ name = "windows_x86_64_gnu"
1678
+ version = "0.42.2"
1679
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1680
+ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
1681
+
1682
+ [[package]]
1683
+ name = "windows_x86_64_gnu"
1684
+ version = "0.48.5"
1685
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1686
+ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
1687
+
1688
+ [[package]]
1689
+ name = "windows_x86_64_gnullvm"
1690
+ version = "0.42.2"
1691
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1692
+ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
1693
+
1694
+ [[package]]
1695
+ name = "windows_x86_64_gnullvm"
1696
+ version = "0.48.5"
1697
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1698
+ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
1699
+
1700
+ [[package]]
1701
+ name = "windows_x86_64_msvc"
1702
+ version = "0.42.2"
1703
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1704
+ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
1705
+
1706
+ [[package]]
1707
+ name = "windows_x86_64_msvc"
1708
+ version = "0.48.5"
1709
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1710
+ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
1711
+
1712
+ [[package]]
1713
+ name = "write16"
1714
+ version = "1.0.0"
1715
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1716
+ checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936"
1717
+
1718
+ [[package]]
1719
+ name = "writeable"
1720
+ version = "0.5.3"
1721
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1722
+ checksum = "c0af0c3d13faebf8dda0b5256fa7096a2d5ccb662f7b9f54a40fe201077ab1c2"
1723
+
1724
+ [[package]]
1725
+ name = "yoke"
1726
+ version = "0.7.2"
1727
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1728
+ checksum = "61e38c508604d6bbbd292dadb3c02559aa7fff6b654a078a36217cad871636e4"
1729
+ dependencies = [
1730
+ "serde",
1731
+ "stable_deref_trait",
1732
+ "yoke-derive",
1733
+ "zerofrom",
1734
+ ]
1735
+
1736
+ [[package]]
1737
+ name = "yoke-derive"
1738
+ version = "0.7.2"
1739
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1740
+ checksum = "d5e19fb6ed40002bab5403ffa37e53e0e56f914a4450c8765f533018db1db35f"
1741
+ dependencies = [
1742
+ "proc-macro2",
1743
+ "quote",
1744
+ "syn 2.0.31",
1745
+ "synstructure",
1746
+ ]
1747
+
1748
+ [[package]]
1749
+ name = "zerofrom"
1750
+ version = "0.1.3"
1751
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1752
+ checksum = "655b0814c5c0b19ade497851070c640773304939a6c0fd5f5fb43da0696d05b7"
1753
+ dependencies = [
1754
+ "zerofrom-derive",
1755
+ ]
1756
+
1757
+ [[package]]
1758
+ name = "zerofrom-derive"
1759
+ version = "0.1.3"
1760
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1761
+ checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3"
1762
+ dependencies = [
1763
+ "proc-macro2",
1764
+ "quote",
1765
+ "syn 2.0.31",
1766
+ "synstructure",
1767
+ ]
1768
+
1769
+ [[package]]
1770
+ name = "zeroize"
1771
+ version = "1.6.0"
1772
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1773
+ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
1774
+
1775
+ [[package]]
1776
+ name = "zerovec"
1777
+ version = "0.10.0"
1778
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1779
+ checksum = "1194130c5b155bf8ae50ab16c86ab758cd695cf9ad176d2f870b744cbdbb572e"
1780
+ dependencies = [
1781
+ "yoke",
1782
+ "zerofrom",
1783
+ "zerovec-derive",
1784
+ ]
1785
+
1786
+ [[package]]
1787
+ name = "zerovec-derive"
1788
+ version = "0.10.0"
1789
+ source = "registry+https://github.com/rust-lang/crates.io-index"
1790
+ checksum = "acabf549809064225ff8878baedc4ce3732ac3b07e7c7ce6e5c2ccdbc485c324"
1791
+ dependencies = [
1792
+ "proc-macro2",
1793
+ "quote",
1794
+ "syn 2.0.31",
1795
+ ]
Dataset_Construction/projects/charset-normalizer/rust/Cargo.toml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [package]
2
+ name = "charset-normalizer-rs"
3
+ version = "1.0.6"
4
+ authors = ["Nikolay Yarovoy <nikolay.yarovoy@gmail.com>"]
5
+ edition = "2021"
6
+ description = "Truly universal encoding detector in pure Rust - port of Python version"
7
+ license-file = "LICENSE"
8
+ documentation = "https://docs.rs/charset-normalizer-rs"
9
+ readme = "README.md"
10
+ repository = "https://github.com/nickspring/charset-normalizer-rs"
11
+ keywords = ["encoding", "charset", "detector", "conversion", "normalizer"]
12
+ categories = ["encoding", "internationalization", "localization"]
13
+ exclude = [
14
+ "/src/tests/data/**",
15
+ "/CONTRIBUTING.md",
16
+ "/CODE_OF_CONDUCT.md",
17
+ "/.github/**",
18
+ ".gitattributes"
19
+ ]
20
+
21
+ [dependencies]
22
+ ahash = "0.8.3"
23
+ bitflags = "2.4.0"
24
+ cached = "0.46.0"
25
+ chardet = { version = "0.2.4", optional = true }
26
+ chardetng = { version = "0.1.17", optional = true }
27
+ clap = { version = "4.4.2", features = ["derive"] }
28
+ counter = "0.5.7"
29
+ dialoguer = "0.10.4"
30
+ encoding = "0.2.33"
31
+ env_logger = "0.10.0"
32
+ icu_normalizer = "1.3.2"
33
+ icu_properties = "1.3.2"
34
+ log = "0.4.20"
35
+ once_cell = "1.18.0"
36
+ ordered-float = "3.9.1"
37
+ regex = "1.9.3"
38
+ serde = { version = "1.0.188", features = ["derive"] }
39
+ serde_json = "1.0.107"
40
+ strsim = "0.10.0"
41
+ unicode_names2 = "1.1.0"
42
+
43
+ [dev-dependencies]
44
+ assert_cmd = "2.0.12"
45
+ criterion = "0.3"
46
+ predicates = "3.0.3"
47
+
48
+ [[bench]]
49
+ name = "large_payload"
50
+ harness = false
51
+
52
+ [[bench]]
53
+ name = "large_datasets"
54
+ harness = false
55
+
56
+ [features]
57
+ performance = ["chardet", "chardetng"]
58
+
59
+ [[bin]]
60
+ name = "performance"
61
+ path = "src/performance.rs"
62
+ required-features = ["performance"]
63
+
64
+ [[bin]]
65
+ name = "normalizer"
66
+ path = "src/normalizer.rs"
67
+
68
+ [profile.release]
69
+ opt-level = 3
70
+ lto = "fat"
Dataset_Construction/projects/charset-normalizer/rust/LICENSE ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 YAROVOY Nikolay (Rust version).
4
+ Copyright (c) 2019 TAHRI Ahmed R. (author of original Python version)
5
+
6
+ Permission is hereby granted, free of charge, to any person obtaining a copy
7
+ of this software and associated documentation files (the "Software"), to deal
8
+ in the Software without restriction, including without limitation the rights
9
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
+ copies of the Software, and to permit persons to whom the Software is
11
+ furnished to do so, subject to the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be included in all
14
+ copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
+ SOFTWARE.
Dataset_Construction/projects/charset-normalizer/rust/README.md ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Charset Normalizer
2
+ [![charset-normalizer-rs on docs.rs][docsrs-image]][docsrs]
3
+ [![charset-normalizer-rs on crates.io][crates-image]][crates]
4
+
5
+ [docsrs-image]: https://docs.rs/charset-normalizer-rs/badge.svg
6
+ [docsrs]: https://docs.rs/charset-normalizer-rs
7
+ [crates-image]: https://img.shields.io/crates/v/charset-normalizer-rs.svg
8
+ [crates]: https://crates.io/crates/charset-normalizer-rs/
9
+
10
+ A library that helps you read text from an unknown charset encoding.<br /> Motivated by original Python version of `charset-normalizer`,
11
+ I'm trying to resolve the issue by taking a new approach.
12
+ All IANA character set names for which the Rust `encoding` library provides codecs are supported.
13
+
14
+ This project is port of original Pyhon version of [Charset Normalizer](https://github.com/Ousret/charset_normalizer).
15
+ The biggest difference between Python and Rust versions - number of supported encodings as each langauge has own encoding / decoding library.
16
+ In Rust version only encoding from [WhatWG standard](https://encoding.spec.whatwg.org) are supported.
17
+ Python version supports more encodings, but a lot of them are old almost unused ones.
18
+
19
+ ## ⚡ Performance
20
+
21
+ This package offer better performance than Python version (4 times faster, than MYPYC version of charset-normalizer, 8 times faster than usual Python version).
22
+ In comparison with `chardet` and `chardetng` packages it has approximately the same speed but more accurate.
23
+ Here are some numbers.
24
+
25
+ | Package | Accuracy | Mean per file (ms) | File per sec (est) |
26
+ |---------------------------------------------------------------------------------------------|:----------:|:------------------:|:------------------:|
27
+ | [chardet](https://crates.io/crates/chardet) | 82.6 % | 3 ms | 333 file/sec |
28
+ | [chardetng](https://crates.io/crates/chardetng) | 90.7 % | 1.6 ms | 625 file/sec |
29
+ | charset-normalizer-rs | **97.1 %** | **1.5 ms** | 666 file/sec |
30
+ | [charset-normalizer](https://github.com/Ousret/charset_normalizer) (Python + MYPYC version) | **98 %** | **8 ms** | 125 file/sec |
31
+
32
+ | Package | 99th percentile | 95th percentile | 50th percentile |
33
+ |---------------------------------------------------------------------------------------------|:---------------:|:---------------:|:---------------:|
34
+ | [chardet](https://crates.io/crates/chardet) | 8 ms | 2 ms | 0.2 ms |
35
+ | [chardetng](https://crates.io/crates/chardetng) | 14 ms | 5 ms | 0.5 ms |
36
+ | charset-normalizer-rs | 12 ms | 5 ms | 0.7 ms |
37
+ | [charset-normalizer](https://github.com/Ousret/charset_normalizer) (Python + MYPYC version) | 94 ms | 37 ms | 3 ms |
38
+
39
+ Stats are generated using 400+ files using default parameters. These results might change at any time.
40
+ The dataset can be updated to include more files. The actual delays heavily depends on your CPU capabilities.
41
+ The factors should remain the same. Rust version dataset has been reduced as number of supported encodings is lower than in Python version.
42
+
43
+ There is a still possibility to speed up library, so I'll appreciate any contributions.
44
+
45
+ ## ✨ Installation
46
+
47
+ Library installation:
48
+
49
+ ```console
50
+ cargo add charset-normalizer-rs
51
+ ```
52
+
53
+ Binary CLI tool installation:
54
+ ```console
55
+ cargo install charset-normalizer-rs
56
+ ```
57
+
58
+ ## 🚀 Basic Usage
59
+
60
+ ### CLI
61
+ This package comes with a CLI, which supposes to be compatible with Python version CLI tool.
62
+
63
+ ```console
64
+ normalizer -h
65
+ Usage: normalizer [OPTIONS] <FILES>...
66
+
67
+ Arguments:
68
+ <FILES>... File(s) to be analysed
69
+
70
+ Options:
71
+ -v, --verbose Display complementary information about file if any. Stdout will contain logs about the detection process
72
+ -a, --with-alternative Output complementary possibilities if any. Top-level JSON WILL be a list
73
+ -n, --normalize Permit to normalize input file. If not set, program does not write anything
74
+ -m, --minimal Only output the charset detected to STDOUT. Disabling JSON output
75
+ -r, --replace Replace file when trying to normalize it instead of creating a new one
76
+ -f, --force Replace file without asking if you are sure, use this flag with caution
77
+ -t, --threshold <THRESHOLD> Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1 [default: 0.2]
78
+ -h, --help Print help
79
+ -V, --version Print version
80
+ ```
81
+
82
+ ```bash
83
+ normalizer ./data/sample.1.fr.srt
84
+ ```
85
+
86
+ 🎉 The CLI produces easily usable stdout result in JSON format (should be the same as in Python version).
87
+
88
+ ```json
89
+ {
90
+ "path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt",
91
+ "encoding": "cp1252",
92
+ "encoding_aliases": [
93
+ "1252",
94
+ "windows_1252"
95
+ ],
96
+ "alternative_encodings": [
97
+ "cp1254",
98
+ "cp1256",
99
+ "cp1258",
100
+ "iso8859_14",
101
+ "iso8859_15",
102
+ "iso8859_16",
103
+ "iso8859_3",
104
+ "iso8859_9",
105
+ "latin_1",
106
+ "mbcs"
107
+ ],
108
+ "language": "French",
109
+ "alphabets": [
110
+ "Basic Latin",
111
+ "Latin-1 Supplement"
112
+ ],
113
+ "has_sig_or_bom": false,
114
+ "chaos": 0.149,
115
+ "coherence": 97.152,
116
+ "unicode_path": null,
117
+ "is_preferred": true
118
+ }
119
+ ```
120
+
121
+ ### Rust
122
+
123
+ Library offers two main methods. First one is `from_bytes`, which processes text using bytes as input parameter:
124
+ ```rust
125
+ use charset_normalizer_rs::from_bytes;
126
+
127
+ fn test_from_bytes() {
128
+ let result = from_bytes(&vec![0x84, 0x31, 0x95, 0x33], None);
129
+ let best_guess = result.get_best();
130
+ assert_eq!(
131
+ best_guess.unwrap().encoding(),
132
+ "gb18030",
133
+ );
134
+ }
135
+ test_from_bytes();
136
+ ```
137
+
138
+ `from_path` processes text using filename as input parameter:
139
+ ```rust
140
+ use std::path::PathBuf;
141
+ use charset_normalizer_rs::from_path;
142
+
143
+ fn test_from_path() {
144
+ let result = from_path(&PathBuf::from("src/tests/data/samples/sample-chinese.txt"), None).unwrap();
145
+ let best_guess = result.get_best();
146
+ assert_eq!(
147
+ best_guess.unwrap().encoding(),
148
+ "big5",
149
+ );
150
+ }
151
+ test_from_path();
152
+ ```
153
+
154
+ ## 😇 Why
155
+
156
+ When I started using Chardet (Python version), I noticed that it was not suited to my expectations, and I wanted to propose a
157
+ reliable alternative using a completely different method. Also! I never back down on a good challenge!
158
+
159
+ I **don't care** about the **originating charset** encoding, because **two different tables** can
160
+ produce **two identical rendered string.**
161
+ What I want is to get readable text, the best I can.
162
+
163
+ In a way, **I'm brute forcing text decoding.** How cool is that? 😎
164
+
165
+ ## 🍰 How
166
+
167
+ - Discard all charset encoding table that could not fit the binary content.
168
+ - Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding.
169
+ - Extract matches with the lowest mess detected.
170
+ - Additionally, we measure coherence / probe for a language.
171
+
172
+ **Wait a minute**, what is noise/mess and coherence according to **YOU?**
173
+
174
+ *Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then
175
+ **I established** some ground rules about **what is obvious** when **it seems like** a mess.
176
+ I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to
177
+ improve or rewrite it.
178
+
179
+ *Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought
180
+ that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design.
181
+
182
+ ## ⚡ Known limitations
183
+
184
+ - Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters))
185
+ - Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content.
186
+
187
+ ## 👤 Contributing
188
+
189
+ Contributions, issues and feature requests are very much welcome.<br />
190
+ Feel free to check [issues page](https://github.com/nickspring/charset-normalizer-rs/issues) if you want to contribute.
191
+
192
+ ## 📝 License
193
+
194
+ Copyright © [Nikolay Yarovoy @nickspring](https://github.com/nickspring) - porting to Rust. <br />
195
+ Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret) - original Python version and some parts of this document.<br />
196
+ This project is [MIT](https://github.com/nickspring/charset-normalizer-rs/blob/master/LICENSE) licensed.
197
+
198
+ Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/)
199
+
Dataset_Construction/projects/charset-normalizer/rust/benches/large_datasets.rs ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use charset_normalizer_rs::from_path;
2
+ use charset_normalizer_rs::utils::get_large_test_datasets;
3
+ use criterion::BenchmarkId;
4
+ use criterion::{black_box, criterion_group, criterion_main, Criterion};
5
+ use std::path::PathBuf;
6
+
7
+ fn bench_foo(paths: &[String]) {
8
+ for path in paths {
9
+ let _ = from_path(&PathBuf::from(path), None);
10
+ }
11
+ }
12
+
13
+ pub fn large_datasets(c: &mut Criterion) {
14
+ let paths: Vec<String> = get_large_test_datasets()
15
+ .unwrap()
16
+ .iter()
17
+ .map(|v| v.0.clone())
18
+ .collect();
19
+
20
+ let mut group = c.benchmark_group("sample-size-example");
21
+ group.significance_level(0.1).sample_size(10);
22
+ group.bench_with_input(BenchmarkId::new("large_datasets", ""), &paths, |b, s| {
23
+ b.iter(|| {
24
+ bench_foo(s);
25
+ black_box(())
26
+ });
27
+ });
28
+ }
29
+
30
+ criterion_group!(benches, large_datasets);
31
+ criterion_main!(benches);
Dataset_Construction/projects/charset-normalizer/rust/benches/large_payload.rs ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use charset_normalizer_rs::consts::TOO_BIG_SEQUENCE;
2
+ use charset_normalizer_rs::from_bytes;
3
+ use criterion::BenchmarkId;
4
+ use criterion::{black_box, criterion_group, criterion_main, Criterion};
5
+
6
+ pub fn large_payload(c: &mut Criterion) {
7
+ let mut payload = b"hello simple ascii "
8
+ .repeat(TOO_BIG_SEQUENCE)
9
+ .as_slice()
10
+ .to_vec();
11
+ payload.extend("我没有埋怨,磋砣的只是一些时间。 磋砣的只是一些时间。".as_bytes());
12
+ c.bench_with_input(BenchmarkId::new("large_payload", ""), &payload, |b, s| {
13
+ b.iter(|| black_box(from_bytes(s, None)));
14
+ });
15
+ }
16
+
17
+ criterion_group!(benches, large_payload);
18
+ criterion_main!(benches);
Dataset_Construction/projects/charset-normalizer/rust/rust-toolchain.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [toolchain]
2
+ channel = "1.77.1"
Dataset_Construction/projects/charset-normalizer/rust/src/assets.rs ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use crate::entity::Language;
2
+ use ahash::HashMap;
3
+
4
+ use once_cell::sync::Lazy;
5
+ use std::iter::FromIterator;
6
+
7
+ pub(crate) static LANGUAGES: Lazy<[(Language, &'static str, bool, bool); 41]> = Lazy::new(|| {
8
+ [
9
+ // language, alphabet, have_accents, pure_latin
10
+ (Language::English, "eationsrhldcmufpgwbyvkjxzq", false, true, ),
11
+ (Language::English, "eationsrhldcumfpgwybvkxjzq", false, true, ),
12
+ (Language::German, "enirstadhulgocmbfkwzpvüäöj", true, true, ),
13
+ (Language::French, "easnitrluodcpmévgfbhqàxèyj", true, true, ),
14
+ (Language::Dutch, "enairtodslghvmukcpbwjzfyxë", true, true, ),
15
+ (Language::Italian, "eiaonltrscdupmgvfbzhqèàkyò", true, true, ),
16
+ (Language::Polish, "aioenrzwsctkydpmuljłgbhąęó", true, true, ),
17
+ (Language::Spanish, "eaonsrildtcumpbgvfyóhqíjzá", true, true, ),
18
+ (Language::Russian, "оаеинстрвлкмдпугяызбйьчхжц", false, false, ),
19
+ (Language::Japanese, "人一大亅丁丨竹笑口日今二彳行十土丶寸寺時乙丿乂气気冂巾亠市目儿見八小凵県月彐門間木東山出本中刀分耳又取最言田心思刂前京尹事生厶云会未来白冫楽灬馬尸尺駅明耂者了阝都高卜占厂广店子申奄亻俺上方冖学衣艮食自", false, false, ),
20
+ (Language::Japanese, "ーンス・ルトリイアラックドシレジタフロカテマィグバムプオコデニウメサビナブャエュチキズダパミェョハセベガモツネボソノァヴワポペピケゴギザホゲォヤヒユヨヘゼヌゥゾヶヂヲヅヵヱヰヮヽ゠ヾヷヿヸヹヺ", false, false, ),
21
+ (Language::Japanese, "のにるたとはしいをでてがなれからさっりすあもこまうくよきんめおけそつだやえどわちみせじばへびずろほげむべひょゆぶごゃねふぐぎぼゅづざぞぬぜぱぽぷぴぃぁぇぺゞぢぉぅゐゝゑ゛゜ゎゔ゚ゟ゙ゕゖ", false, false, ),
22
+ (Language::Portuguese, "aeosirdntmuclpgvbfhãqéçází", true, true, ),
23
+ (Language::Swedish, "eanrtsildomkgvhfupäcböåyjx", true, true, ),
24
+ (Language::Chinese, "的一是不了在人有我他这个们中来上大为和国地到以说时要就出会可也你对生能而子那得于着下自之年过发后作里用道行所然家种事成方多经么去法学如都同现当没动面起看定天分还进好小部其些主样理心她本前开但因只从想实", false, false, ),
25
+ (Language::Ukrainian, "оаніирвтесклудмпзяьбгйчхцї", false, false, ),
26
+ (Language::Norwegian, "erntasioldgkmvfpubhåyjøcæw", false, true, ),
27
+ (Language::Finnish, "aintesloukämrvjhpydögcbfwz", true, true, ),
28
+ (Language::Vietnamese, "nhticgaoumlràđsevpbyưdákộế", true, true, ),
29
+ (Language::Czech, "oeantsilvrkdumpíchzáyjběéř", true, true, ),
30
+ (Language::Hungarian, "eatlsnkriozáégmbyvdhupjöfc", true, true, ),
31
+ (Language::Korean, "이다에의는로하을가고지서한은기으년대사시를리도인스일", false, false, ),
32
+ (Language::Indonesian, "aneirtusdkmlgpbohyjcwfvzxq", false, true, ),
33
+ (Language::Turkish, "aeinrlıkdtsmyuobüşvgzhcpçğ", true, true, ),
34
+ (Language::Romanian, "eiarntulocsdpmăfvîgbșțzhâj", true, true, ),
35
+ (Language::Farsi, "ایردنهومتبسلکشزفگعخقجآپحطص", false, false, ),
36
+ (Language::Arabic, "اليمونرتبةعدسفهكقأحجشطصىخإ", false, false, ),
37
+ (Language::Danish, "erntaisdlogmkfvubhpåyøæcjw", false, true, ),
38
+ (Language::Serbian, "аиоенрсуткјвдмплгзбaieonцш", false, false, ),
39
+ (Language::Lithuanian, "iasoretnukmlpvdjgėbyųšžcąį", false, true, ),
40
+ (Language::Slovene, "eaionrsltjvkdpmuzbghčcšžfy", false, true, ),
41
+ (Language::Slovak, "oaenirvtslkdmpuchjbzáyýíčé", true, true, ),
42
+ (Language::Hebrew, "יוהלרבתמאשנעםדקחפסכגטצןזך", false, false, ),
43
+ (Language::Bulgarian, "аиоентрсвлкдпмзгяъубчцйжщх", false, false, ),
44
+ (Language::Croatian, "aioenrjstuklvdmpgzbcčhšžćf", true, true, ),
45
+ (Language::Hindi, "करसनतमहपयलवजदगबशटअएथभडचधषइ", false, false, ),
46
+ (Language::Estonian, "aiestlunokrdmvgpjhäbõüfcöy", true, true, ),
47
+ (Language::Thai, "านรอกเงมยลวดทสตะปบคหแจพชขใ", false, false, ),
48
+ (Language::Greek, "ατοιενρσκηπςυμλίόάγέδήωχθύ", false, false, ),
49
+ (Language::Tamil, "கதபடரமலனவறயளசநஇணஅஆழஙஎஉஒஸ", false, false, ),
50
+ (Language::Kazakh, "аыентрлідсмқкобиуғжңзшйпгө", false, false, ),
51
+ ]
52
+ });
53
+ pub(crate) static LANGUAGE_SUPPORTED_COUNT: Lazy<usize> = Lazy::new(|| LANGUAGES.len()); // 41
54
+
55
+ pub(crate) static ENCODING_TO_LANGUAGE: Lazy<HashMap<&'static str, Language>> = Lazy::new(|| {
56
+ HashMap::from_iter([
57
+ ("euc-kr", Language::Korean),
58
+ ("big5", Language::Chinese),
59
+ ("hz", Language::Chinese),
60
+ ("gbk", Language::Chinese),
61
+ ("gb18030", Language::Chinese),
62
+ ("euc-jp", Language::Japanese),
63
+ ("iso-2022-jp", Language::Japanese),
64
+ ("shift_jis", Language::Japanese),
65
+ ])
66
+ });
Dataset_Construction/projects/charset-normalizer/rust/src/cd.rs ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #![allow(unused_variables)]
2
+ use crate::assets::{ENCODING_TO_LANGUAGE, LANGUAGES, LANGUAGE_SUPPORTED_COUNT};
3
+ use crate::consts::TOO_SMALL_SEQUENCE;
4
+ use crate::entity::{CoherenceMatch, CoherenceMatches, Language};
5
+ use crate::utils::{
6
+ get_language_data, is_accentuated, is_multi_byte_encoding, is_suspiciously_successive_range,
7
+ is_unicode_range_secondary, unicode_range,
8
+ };
9
+ use ahash::{HashMap, HashMapExt, HashSet};
10
+ use cached::proc_macro::cached;
11
+ use counter::Counter;
12
+ use encoding::label::encoding_from_whatwg_label;
13
+ use encoding::DecoderTrap;
14
+ use ordered_float::OrderedFloat;
15
+ use strsim::jaro;
16
+
17
+ //
18
+ // Coherence detection module
19
+ //
20
+
21
+ // Return associated unicode ranges in a single byte code page.
22
+ pub(crate) fn encoding_unicode_range(iana_name: &str) -> Result<Vec<&str>, String> {
23
+ if is_multi_byte_encoding(iana_name) {
24
+ return Err("Function not supported on multi-byte code page".to_string());
25
+ }
26
+ let encoder = encoding_from_whatwg_label(iana_name)
27
+ .ok_or("No decoder found for this encoding".to_string())?;
28
+
29
+ let byte_range = 0x40..0xFF; // utf8 range. range.len()==191
30
+ let mut result: HashMap<&str, u8> = HashMap::with_capacity(byte_range.len());
31
+
32
+ byte_range.for_each(|i| {
33
+ if let Some(range) = encoder
34
+ .decode(&[i], DecoderTrap::Ignore)
35
+ .ok()
36
+ .and_then(|chunk| chunk.chars().next())
37
+ .and_then(unicode_range)
38
+ .filter(|&range| !is_unicode_range_secondary(range))
39
+ {
40
+ *result.entry(range).or_insert(0) += 1;
41
+ }
42
+ });
43
+ let character_count: u8 = result.values().sum();
44
+ let threshold = 0.15;
45
+ let mut result: Vec<&str> = result
46
+ .iter()
47
+ .filter(|(_, &value)| (value as f32 / character_count as f32) >= threshold)
48
+ .map(|(&name, _)| name)
49
+ .collect();
50
+ result.sort_unstable();
51
+ Ok(result)
52
+ }
53
+
54
+ // Return inferred languages used with a unicode range.
55
+ pub(crate) fn unicode_range_languages(primary_range: &str) -> Vec<&'static Language> {
56
+ LANGUAGES
57
+ .iter()
58
+ .filter_map(|(language, characters, _, _)| {
59
+ characters
60
+ .chars()
61
+ .find(|char| unicode_range(*char).unwrap_or_default() == primary_range)
62
+ .map(|_| language)
63
+ })
64
+ .collect::<Vec<&Language>>()
65
+ }
66
+
67
+ // Single-byte encoding language association.
68
+ // Some code page are heavily linked to particular language(s).
69
+ // This function does the correspondence.
70
+ #[cached(size = 128)]
71
+ pub(crate) fn encoding_languages(iana_name: String) -> Vec<&'static Language> {
72
+ match encoding_unicode_range(&iana_name)
73
+ .unwrap_or_default()
74
+ .iter()
75
+ .find(|&&range| !range.contains("Latin"))
76
+ {
77
+ Some(&range) => unicode_range_languages(range),
78
+ None => vec![&Language::Unknown],
79
+ }
80
+ }
81
+
82
+ // Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
83
+ // This function does the correspondence.
84
+ pub(crate) fn mb_encoding_languages(iana_name: &str) -> Vec<&'static Language> {
85
+ ENCODING_TO_LANGUAGE
86
+ .get(iana_name)
87
+ .map_or(vec![], |found| vec![found])
88
+ }
89
+
90
+ // Return associated languages associated to given characters
91
+ #[allow(clippy::ptr_arg)]
92
+ pub(crate) fn alphabet_languages(
93
+ characters: &[char],
94
+ ignore_non_latin: bool,
95
+ ) -> Vec<&'static Language> {
96
+ let mut languages: Vec<(&Language, f32)> = Vec::with_capacity(*LANGUAGE_SUPPORTED_COUNT);
97
+ let source_characters_set: HashSet<char> = characters.iter().copied().collect();
98
+ let source_has_accents = source_characters_set
99
+ .iter()
100
+ .any(|&char| is_accentuated(char));
101
+
102
+ for (language, language_characters, target_have_accents, target_pure_latin) in LANGUAGES.iter()
103
+ {
104
+ if (ignore_non_latin && !target_pure_latin) || (!target_have_accents && source_has_accents)
105
+ {
106
+ continue;
107
+ }
108
+
109
+ let language_characters_set: HashSet<char> = language_characters.chars().collect();
110
+ let intersection: HashSet<char> = language_characters_set
111
+ .intersection(&source_characters_set)
112
+ .copied()
113
+ .collect();
114
+
115
+ let ratio: f32 = intersection.len() as f32 / language_characters_set.len() as f32;
116
+ if ratio >= 0.2 {
117
+ languages.push((language, ratio));
118
+ }
119
+ }
120
+ // reverse sort
121
+ languages.sort_unstable_by(|&a, &b| b.1.partial_cmp(&a.1).unwrap());
122
+ languages.iter().map(|&lang| lang.0).collect()
123
+ }
124
+
125
+ // Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
126
+ // Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
127
+ // One containing the latin letters and the other hebrew.
128
+ pub(crate) fn alpha_unicode_split(decoded_sequence: &str) -> Vec<String> {
129
+ let mut layers: HashMap<&str, String> = HashMap::new();
130
+
131
+ for ch in decoded_sequence.chars().filter(|c| c.is_alphabetic()) {
132
+ if let Some(character_range) = unicode_range(ch) {
133
+ let layer_key: &str = layers
134
+ .keys()
135
+ .find(|key| !is_suspiciously_successive_range(Some(key), Some(character_range)))
136
+ .copied()
137
+ .unwrap_or(character_range);
138
+ let layer = layers.entry(layer_key).or_default();
139
+ layer.extend(ch.to_lowercase());
140
+ }
141
+ }
142
+ layers.into_values().collect()
143
+ }
144
+
145
+ // Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
146
+ // The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
147
+ // Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
148
+ // Remark: ordered_characters is string here, with chars ordered by popularity.
149
+ // Original function in Python was more complicated and slower
150
+ pub(crate) fn characters_popularity_compare(
151
+ language: &Language,
152
+ ordered_characters: &str,
153
+ ) -> Result<f32, String> {
154
+ let language_data = get_language_data(language)?;
155
+ Ok(jaro(ordered_characters, language_data.0) as f32)
156
+ }
157
+
158
+ // We shall NOT return more than one "English" in CoherenceMatches because it is an alternative
159
+ // of "English" (the same for Japan language). This function only keeps the best match.
160
+ pub(crate) fn filter_alt_coherence_matches(results: &CoherenceMatches) -> CoherenceMatches {
161
+ let mut index: HashMap<&Language, f32> = HashMap::with_capacity(results.len());
162
+ for result in results {
163
+ let score = index.entry(result.language).or_default();
164
+ *score = result.score.max(*score);
165
+ }
166
+ index
167
+ .into_iter()
168
+ .map(|(language, score)| CoherenceMatch { language, score })
169
+ .collect()
170
+ }
171
+
172
+ // This function merge results previously given by the function coherence_ratio.
173
+ // The return type is the same as coherence_ratio.
174
+ pub(crate) fn merge_coherence_ratios(results: &Vec<CoherenceMatches>) -> CoherenceMatches {
175
+ let mut index: HashMap<&Language, Vec<f32>> = HashMap::with_capacity(results.len());
176
+ results
177
+ .iter()
178
+ .flatten()
179
+ .for_each(|result| index.entry(result.language).or_default().push(result.score));
180
+
181
+ let mut merge: Vec<CoherenceMatch> = index
182
+ .iter()
183
+ .map(|(&lang, scores)| CoherenceMatch {
184
+ language: lang,
185
+ score: scores.iter().sum::<f32>() / (scores.len() as f32),
186
+ })
187
+ .collect();
188
+
189
+ merge.sort_unstable_by(|a, b| b.score.partial_cmp(&a.score).unwrap());
190
+ merge
191
+ }
192
+
193
+ // The main function. Detect ANY language that can be identified in given sequence.
194
+ // The sequence will be analysed by layers.
195
+ // A layer = Character extraction by alphabets/ranges.
196
+ #[cached(size = 2048)]
197
+ pub(crate) fn coherence_ratio(
198
+ decoded_sequence: String,
199
+ threshold: Option<OrderedFloat<f32>>,
200
+ include_languages: Option<Vec<&'static Language>>,
201
+ ) -> Result<CoherenceMatches, String> {
202
+ let threshold = f32::from(threshold.unwrap_or(OrderedFloat(0.1)));
203
+ let mut include_languages: Vec<&Language> = include_languages.unwrap_or_default();
204
+ let ignore_non_latin = include_languages == vec![&Language::Unknown];
205
+ if ignore_non_latin {
206
+ include_languages.clear();
207
+ }
208
+
209
+ let mut results: CoherenceMatches = vec![];
210
+ let mut sufficient_match_count: u64 = 0;
211
+
212
+ for layer in alpha_unicode_split(&decoded_sequence) {
213
+ if layer.chars().count() <= TOO_SMALL_SEQUENCE {
214
+ continue;
215
+ }
216
+ let most_common = layer.chars().collect::<Counter<_>>().most_common_ordered();
217
+ let popular_character_ordered: Vec<char> = most_common.iter().map(|(ch, _)| *ch).collect();
218
+
219
+ let languages = if include_languages.is_empty() {
220
+ alphabet_languages(&popular_character_ordered, ignore_non_latin)
221
+ } else {
222
+ include_languages.clone()
223
+ };
224
+
225
+ let popular_character_ordered_as_string: String =
226
+ popular_character_ordered.iter().collect();
227
+
228
+ // Convert the String into a &str
229
+ for language in languages {
230
+ let ratio: f32 =
231
+ characters_popularity_compare(language, &popular_character_ordered_as_string)?;
232
+
233
+ match ratio {
234
+ r if r < threshold => continue,
235
+ r if r >= 0.8 => sufficient_match_count += 1,
236
+ _ => {}
237
+ }
238
+
239
+ results.push(CoherenceMatch {
240
+ language,
241
+ score: ratio,
242
+ });
243
+
244
+ if sufficient_match_count >= 3 {
245
+ break;
246
+ }
247
+ }
248
+ }
249
+ results = filter_alt_coherence_matches(&results);
250
+ results.sort_unstable_by(|a, b| b.score.partial_cmp(&a.score).unwrap());
251
+ Ok(results)
252
+ }
Dataset_Construction/projects/charset-normalizer/rust/src/consts.rs ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use ahash::{HashMap, HashSet};
2
+ use core::ops::RangeInclusive;
3
+ use encoding::all::encodings;
4
+ use once_cell::sync::Lazy;
5
+ use regex::Regex;
6
+
7
+ pub static TOO_BIG_SEQUENCE: usize = 1_000_000; // 10E6
8
+ pub(crate) static MAX_PROCESSED_BYTES: usize = 500_000;
9
+ pub(crate) static TOO_SMALL_SEQUENCE: usize = 32;
10
+ pub(crate) static UTF8_MAXIMAL_ALLOCATION: usize = 1_112_064;
11
+ pub(crate) static COMMON_SAFE_ASCII_CHARACTERS: &'static str = "<>=:/&;{}[],|\"-";
12
+
13
+ // Contain for each eligible encoding a list of/item bytes SIG/BOM
14
+ pub(crate) static ENCODING_MARKS: Lazy<HashMap<&'static str, &'static [u8]>> = Lazy::new(|| {
15
+ HashMap::from_iter([
16
+ ("utf-8", b"\xef\xbb\xbf".as_slice()),
17
+ ("gb18030", b"\x84\x31\x95\x33".as_slice()),
18
+ ("utf-16le", b"\xff\xfe".as_slice()),
19
+ ("utf-16be", b"\xfe\xff".as_slice()),
20
+ ])
21
+ });
22
+
23
+ pub(crate) static UNICODE_RANGES_COMBINED: Lazy<[(&'static str, RangeInclusive<u32>); 279]> =
24
+ Lazy::new(|| {
25
+ [
26
+ ("Control character", 0..=31),
27
+ ("Basic Latin", 32..=127),
28
+ ("Latin-1 Supplement", 128..=255),
29
+ ("Latin Extended-A", 256..=383),
30
+ ("Latin Extended-B", 384..=591),
31
+ ("IPA Extensions", 592..=687),
32
+ ("Spacing Modifier Letters", 688..=767),
33
+ ("Combining Diacritical Marks", 768..=879),
34
+ ("Greek and Coptic", 880..=1023),
35
+ ("Cyrillic", 1024..=1279),
36
+ ("Cyrillic Supplement", 1280..=1327),
37
+ ("Armenian", 1328..=1423),
38
+ ("Hebrew", 1424..=1535),
39
+ ("Arabic", 1536..=1791),
40
+ ("Syriac", 1792..=1871),
41
+ ("Arabic Supplement", 1872..=1919),
42
+ ("Thaana", 1920..=1983),
43
+ ("NKo", 1984..=2047),
44
+ ("Samaritan", 2048..=2111),
45
+ ("Mandaic", 2112..=2143),
46
+ ("Syriac Supplement", 2144..=2159),
47
+ ("Arabic Extended-A", 2208..=2303),
48
+ ("Devanagari", 2304..=2431),
49
+ ("Bengali", 2432..=2559),
50
+ ("Gurmukhi", 2560..=2687),
51
+ ("Gujarati", 2688..=2815),
52
+ ("Oriya", 2816..=2943),
53
+ ("Tamil", 2944..=3071),
54
+ ("Telugu", 3072..=3199),
55
+ ("Kannada", 3200..=3327),
56
+ ("Malayalam", 3328..=3455),
57
+ ("Sinhala", 3456..=3583),
58
+ ("Thai", 3584..=3711),
59
+ ("Lao", 3712..=3839),
60
+ ("Tibetan", 3840..=4095),
61
+ ("Myanmar", 4096..=4255),
62
+ ("Georgian", 4256..=4351),
63
+ ("Hangul Jamo", 4352..=4607),
64
+ ("Ethiopic", 4608..=4991),
65
+ ("Ethiopic Supplement", 4992..=5023),
66
+ ("Cherokee", 5024..=5119),
67
+ ("Unified Canadian Aboriginal Syllabics", 5120..=5759),
68
+ ("Ogham", 5760..=5791),
69
+ ("Runic", 5792..=5887),
70
+ ("Tagalog", 5888..=5919),
71
+ ("Hanunoo", 5920..=5951),
72
+ ("Buhid", 5952..=5983),
73
+ ("Tagbanwa", 5984..=6015),
74
+ ("Khmer", 6016..=6143),
75
+ ("Mongolian", 6144..=6319),
76
+ (
77
+ "Unified Canadian Aboriginal Syllabics Extended",
78
+ 6320..=6399,
79
+ ),
80
+ ("Limbu", 6400..=6479),
81
+ ("Tai Le", 6480..=6527),
82
+ ("New Tai Lue", 6528..=6623),
83
+ ("Khmer Symbols", 6624..=6655),
84
+ ("Buginese", 6656..=6687),
85
+ ("Tai Tham", 6688..=6831),
86
+ ("Combining Diacritical Marks Extended", 6832..=6911),
87
+ ("Balinese", 6912..=7039),
88
+ ("Sundanese", 7040..=7103),
89
+ ("Batak", 7104..=7167),
90
+ ("Lepcha", 7168..=7247),
91
+ ("Ol Chiki", 7248..=7295),
92
+ ("Cyrillic Extended C", 7296..=7311),
93
+ ("Sundanese Supplement", 7360..=7375),
94
+ ("Vedic Extensions", 7376..=7423),
95
+ ("Phonetic Extensions", 7424..=7551),
96
+ ("Phonetic Extensions Supplement", 7552..=7615),
97
+ ("Combining Diacritical Marks Supplement", 7616..=7679),
98
+ ("Latin Extended Additional", 7680..=7935),
99
+ ("Greek Extended", 7936..=8191),
100
+ ("General Punctuation", 8192..=8303),
101
+ ("Superscripts and Subscripts", 8304..=8351),
102
+ ("Currency Symbols", 8352..=8399),
103
+ ("Combining Diacritical Marks for Symbols", 8400..=8447),
104
+ ("Letterlike Symbols", 8448..=8527),
105
+ ("Number Forms", 8528..=8591),
106
+ ("Arrows", 8592..=8703),
107
+ ("Mathematical Operators", 8704..=8959),
108
+ ("Miscellaneous Technical", 8960..=9215),
109
+ ("Control Pictures", 9216..=9279),
110
+ ("Optical Character Recognition", 9280..=9311),
111
+ ("Enclosed Alphanumerics", 9312..=9471),
112
+ ("Box Drawing", 9472..=9599),
113
+ ("Block Elements", 9600..=9631),
114
+ ("Geometric Shapes", 9632..=9727),
115
+ ("Miscellaneous Symbols", 9728..=9983),
116
+ ("Dingbats", 9984..=10175),
117
+ ("Miscellaneous Mathematical Symbols-A", 10176..=10223),
118
+ ("Supplemental Arrows-A", 10224..=10239),
119
+ ("Braille Patterns", 10240..=10495),
120
+ ("Supplemental Arrows-B", 10496..=10623),
121
+ ("Miscellaneous Mathematical Symbols-B", 10624..=10751),
122
+ ("Supplemental Mathematical Operators", 10752..=11007),
123
+ ("Miscellaneous Symbols and Arrows", 11008..=11263),
124
+ ("Glagolitic", 11264..=11359),
125
+ ("Latin Extended-C", 11360..=11391),
126
+ ("Coptic", 11392..=11519),
127
+ ("Georgian Supplement", 11520..=11567),
128
+ ("Tifinagh", 11568..=11647),
129
+ ("Ethiopic Extended", 11648..=11743),
130
+ ("Cyrillic Extended-A", 11744..=11775),
131
+ ("Supplemental Punctuation", 11776..=11903),
132
+ ("CJK Radicals Supplement", 11904..=12031),
133
+ ("Kangxi Radicals", 12032..=12255),
134
+ ("Ideographic Description Characters", 12272..=12287),
135
+ ("CJK Symbols and Punctuation", 12288..=12351),
136
+ ("Hiragana", 12352..=12447),
137
+ ("Katakana", 12448..=12543),
138
+ ("Bopomofo", 12544..=12591),
139
+ ("Hangul Compatibility Jamo", 12592..=12687),
140
+ ("Kanbun", 12688..=12703),
141
+ ("Bopomofo Extended", 12704..=12735),
142
+ ("CJK Strokes", 12736..=12783),
143
+ ("Katakana Phonetic Extensions", 12784..=12799),
144
+ ("Enclosed CJK Letters and Months", 12800..=13055),
145
+ ("CJK Compatibility", 13056..=13311),
146
+ ("CJK Unified Ideographs Extension A", 13312..=19903),
147
+ ("Yijing Hexagram Symbols", 19904..=19967),
148
+ ("CJK Unified Ideographs", 19968..=40959),
149
+ ("Yi Syllables", 40960..=42127),
150
+ ("Yi Radicals", 42128..=42191),
151
+ ("Lisu", 42192..=42239),
152
+ ("Vai", 42240..=42559),
153
+ ("Cyrillic Extended-B", 42560..=42655),
154
+ ("Bamum", 42656..=42751),
155
+ ("Modifier Tone Letters", 42752..=42783),
156
+ ("Latin Extended-D", 42784..=43007),
157
+ ("Syloti Nagri", 43008..=43055),
158
+ ("Common Indic Number Forms", 43056..=43071),
159
+ ("Phags-pa", 43072..=43135),
160
+ ("Saurashtra", 43136..=43231),
161
+ ("Devanagari Extended", 43232..=43263),
162
+ ("Kayah Li", 43264..=43311),
163
+ ("Rejang", 43312..=43359),
164
+ ("Hangul Jamo Extended-A", 43360..=43391),
165
+ ("Javanese", 43392..=43487),
166
+ ("Myanmar Extended-B", 43488..=43519),
167
+ ("Cham", 43520..=43615),
168
+ ("Myanmar Extended-A", 43616..=43647),
169
+ ("Tai Viet", 43648..=43743),
170
+ ("Meetei Mayek Extensions", 43744..=43775),
171
+ ("Ethiopic Extended-A", 43776..=43823),
172
+ ("Latin Extended-E", 43824..=43887),
173
+ ("Cherokee Supplement", 43888..=43967),
174
+ ("Meetei Mayek", 43968..=44031),
175
+ ("Hangul Syllables", 44032..=55215),
176
+ ("Hangul Jamo Extended-B", 55216..=55295),
177
+ ("High Surrogates", 55296..=56191),
178
+ ("High Private Use Surrogates", 56192..=56319),
179
+ ("Low Surrogates", 56320..=57343),
180
+ ("Private Use Area", 57344..=63743),
181
+ ("CJK Compatibility Ideographs", 63744..=64255),
182
+ ("Alphabetic Presentation Forms", 64256..=64335),
183
+ ("Arabic Presentation Forms-A", 64336..=65023),
184
+ ("Variation Selectors", 65024..=65039),
185
+ ("Vertical Forms", 65040..=65055),
186
+ ("Combining Half Marks", 65056..=65071),
187
+ ("CJK Compatibility Forms", 65072..=65103),
188
+ ("Small Form Variants", 65104..=65135),
189
+ ("Arabic Presentation Forms-B", 65136..=65279),
190
+ ("Halfwidth and Fullwidth Forms", 65280..=65519),
191
+ ("Specials", 65520..=65535),
192
+ ("Linear B Syllabary", 65536..=65663),
193
+ ("Linear B Ideograms", 65664..=65791),
194
+ ("Aegean Numbers", 65792..=65855),
195
+ ("Ancient Greek Numbers", 65856..=65935),
196
+ ("Ancient Symbols", 65936..=65999),
197
+ ("Phaistos Disc", 66000..=66047),
198
+ ("Lycian", 66176..=66207),
199
+ ("Carian", 66208..=66271),
200
+ ("Coptic Epact Numbers", 66272..=66303),
201
+ ("Old Italic", 66304..=66351),
202
+ ("Gothic", 66352..=66383),
203
+ ("Old Permic", 66384..=66431),
204
+ ("Ugaritic", 66432..=66463),
205
+ ("Old Persian", 66464..=66527),
206
+ ("Deseret", 66560..=66639),
207
+ ("Shavian", 66640..=66687),
208
+ ("Osmanya", 66688..=66735),
209
+ ("Osage", 66736..=66815),
210
+ ("Elbasan", 66816..=66863),
211
+ ("Caucasian Albanian", 66864..=66927),
212
+ ("Linear A", 67072..=67455),
213
+ ("Cypriot Syllabary", 67584..=67647),
214
+ ("Imperial Aramaic", 67648..=67679),
215
+ ("Palmyrene", 67680..=67711),
216
+ ("Nabataean", 67712..=67759),
217
+ ("Hatran", 67808..=67839),
218
+ ("Phoenician", 67840..=67871),
219
+ ("Lydian", 67872..=67903),
220
+ ("Meroitic Hieroglyphs", 67968..=67999),
221
+ ("Meroitic Cursive", 68000..=68095),
222
+ ("Kharoshthi", 68096..=68191),
223
+ ("Old South Arabian", 68192..=68223),
224
+ ("Old North Arabian", 68224..=68255),
225
+ ("Manichaean", 68288..=68351),
226
+ ("Avestan", 68352..=68415),
227
+ ("Inscriptional Parthian", 68416..=68447),
228
+ ("Inscriptional Pahlavi", 68448..=68479),
229
+ ("Psalter Pahlavi", 68480..=68527),
230
+ ("Old Turkic", 68608..=68687),
231
+ ("Old Hungarian", 68736..=68863),
232
+ ("Rumi Numeral Symbols", 69216..=69247),
233
+ ("Brahmi", 69632..=69759),
234
+ ("Kaithi", 69760..=69839),
235
+ ("Sora Sompeng", 69840..=69887),
236
+ ("Chakma", 69888..=69967),
237
+ ("Mahajani", 69968..=70015),
238
+ ("Sharada", 70016..=70111),
239
+ ("Sinhala Archaic Numbers", 70112..=70143),
240
+ ("Khojki", 70144..=70223),
241
+ ("Multani", 70272..=70319),
242
+ ("Khudawadi", 70320..=70399),
243
+ ("Grantha", 70400..=70527),
244
+ ("Newa", 70656..=70783),
245
+ ("Tirhuta", 70784..=70879),
246
+ ("Siddham", 71040..=71167),
247
+ ("Modi", 71168..=71263),
248
+ ("Mongolian Supplement", 71264..=71295),
249
+ ("Takri", 71296..=71375),
250
+ ("Ahom", 71424..=71487),
251
+ ("Warang Citi", 71840..=71935),
252
+ ("Zanabazar Square", 72192..=72271),
253
+ ("Soyombo", 72272..=72367),
254
+ ("Pau Cin Hau", 72384..=72447),
255
+ ("Bhaiksuki", 72704..=72815),
256
+ ("Marchen", 72816..=72895),
257
+ ("Masaram Gondi", 72960..=73055),
258
+ ("Cuneiform", 73728..=74751),
259
+ ("Cuneiform Numbers and Punctuation", 74752..=74879),
260
+ ("Early Dynastic Cuneiform", 74880..=75087),
261
+ ("Egyptian Hieroglyphs", 77824..=78895),
262
+ ("Anatolian Hieroglyphs", 82944..=83583),
263
+ ("Bamum Supplement", 92160..=92735),
264
+ ("Mro", 92736..=92783),
265
+ ("Bassa Vah", 92880..=92927),
266
+ ("Pahawh Hmong", 92928..=93071),
267
+ ("Miao", 93952..=94111),
268
+ ("Ideographic Symbols and Punctuation", 94176..=94207),
269
+ ("Tangut", 94208..=100_351),
270
+ ("Tangut Components", 100_352..=101_119),
271
+ ("Kana Supplement", 110_592..=110_847),
272
+ ("Kana Extended-A", 110_848..=110_895),
273
+ ("Nushu", 110_960..=111_359),
274
+ ("Duployan", 113_664..=113_823),
275
+ ("Shorthand Format Controls", 113_824..=113_839),
276
+ ("Byzantine Musical Symbols", 118_784..=119_039),
277
+ ("Musical Symbols", 119_040..=119_295),
278
+ ("Ancient Greek Musical Notation", 119_296..=119_375),
279
+ ("Tai Xuan Jing Symbols", 119_552..=119_647),
280
+ ("Counting Rod Numerals", 119_648..=119_679),
281
+ ("Mathematical Alphanumeric Symbols", 119_808..=120_831),
282
+ ("Sutton SignWriting", 120_832..=121_519),
283
+ ("Glagolitic Supplement", 122_880..=122_927),
284
+ ("Mende Kikakui", 124_928..=125_151),
285
+ ("Adlam", 125_184..=125_279),
286
+ ("Arabic Mathematical Alphabetic Symbols", 126_464..=126_719),
287
+ ("Mahjong Tiles", 126_976..=127_023),
288
+ ("Domino Tiles", 127_024..=127_135),
289
+ ("Playing Cards", 127_136..=127_231),
290
+ ("Enclosed Alphanumeric Supplement", 127_232..=127_487),
291
+ ("Enclosed Ideographic Supplement", 127_488..=127_743),
292
+ ("Miscellaneous Symbols and Pictographs", 127_744..=128_511),
293
+ ("Emoticons range(Emoji)", 128_512..=128_591),
294
+ ("Ornamental Dingbats", 128_592..=128_639),
295
+ ("Transport and Map Symbols", 128_640..=128_767),
296
+ ("Alchemical Symbols", 128_768..=128_895),
297
+ ("Geometric Shapes Extended", 128_896..=129_023),
298
+ ("Supplemental Arrows-C", 129_024..=129_279),
299
+ ("Supplemental Symbols and Pictographs", 129_280..=129_535),
300
+ ("CJK Unified Ideographs Extension B", 131_072..=173_791),
301
+ ("CJK Unified Ideographs Extension C", 173_824..=177_983),
302
+ ("CJK Unified Ideographs Extension D", 177_984..=178_207),
303
+ ("CJK Unified Ideographs Extension E", 178_208..=183_983),
304
+ ("CJK Unified Ideographs Extension F", 183_984..=191_471),
305
+ ("CJK Compatibility Ideographs Supplement", 194_560..=195_103),
306
+ ("Tags", 917_504..=917_631),
307
+ ("Variation Selectors Supplement", 917_760..=917_999),
308
+ ]
309
+ });
310
+
311
+ pub(crate) static UNICODE_SECONDARY_RANGE_KEYWORD: Lazy<HashSet<&'static str>> = Lazy::new(|| {
312
+ HashSet::from_iter([
313
+ "Extended",
314
+ "Extensions",
315
+ "Modifier",
316
+ "Marks",
317
+ "Punctuation",
318
+ "Symbols",
319
+ "Forms",
320
+ "Operators",
321
+ "Miscellaneous",
322
+ "Drawing",
323
+ "Block",
324
+ "Shapes",
325
+ "Supplemental",
326
+ "Supplement",
327
+ "Tags",
328
+ ])
329
+ });
330
+
331
+ pub(crate) static RE_POSSIBLE_ENCODING_INDICATION: Lazy<Regex> = Lazy::new(|| {
332
+ Regex::new(
333
+ r#"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:["']?)([a-zA-Z0-9\-_]+)(?:["']?)"#
334
+ ).unwrap()
335
+ });
336
+
337
+ pub static IANA_SUPPORTED: Lazy<Vec<&'static str>> = Lazy::new(|| {
338
+ encodings()
339
+ .iter()
340
+ .filter(|&enc| !["error", "encoder-only-utf-8", "pua-mapped-binary"].contains(&enc.name()))
341
+ .map(|&enc| enc.whatwg_name().unwrap_or(enc.name()))
342
+ .collect()
343
+ });
344
+
345
+ pub static IANA_SUPPORTED_COUNT: Lazy<usize> = Lazy::new(|| IANA_SUPPORTED.len());
346
+
347
+ // chardet encoding names (in lowercase!)
348
+ pub static CHARDET_CORRESPONDENCE: Lazy<HashMap<&'static str, &'static str>> = Lazy::new(|| {
349
+ HashMap::from_iter([
350
+ ("tis-620", "windows-874"),
351
+ ("utf-16", "utf-16le"),
352
+ ("maccyrillic", "x-mac-cyrillic"),
353
+ ("gb2312", "gbk"),
354
+ ("cp949", "euc-kr"),
355
+ ])
356
+ });
357
+
358
+ // aliases (labels) are from https://encoding.spec.whatwg.org/#concept-encoding-get -> as is + lowercased
359
+ pub static IANA_SUPPORTED_ALIASES: Lazy<HashMap<&'static str, Vec<&'static str>>> =
360
+ Lazy::new(|| {
361
+ HashMap::from_iter([
362
+ (
363
+ "utf-8",
364
+ vec![
365
+ "unicode-1-1-utf-8",
366
+ "unicode11utf8",
367
+ "unicode20utf8",
368
+ "utf-8",
369
+ "utf8",
370
+ "x-unicode20utf8",
371
+ ],
372
+ ),
373
+ ("ibm866", vec!["866", "cp866", "csibm866", "ibm866"]),
374
+ (
375
+ "iso-8859-2",
376
+ vec![
377
+ "csisolatin2",
378
+ "iso-8859-2",
379
+ "iso-ir-101",
380
+ "iso8859-2",
381
+ "iso88592",
382
+ "iso_8859-2",
383
+ "iso_8859-2:1987",
384
+ "l2",
385
+ "latin2",
386
+ ],
387
+ ),
388
+ (
389
+ "iso-8859-3",
390
+ vec![
391
+ "csisolatin3",
392
+ "iso-8859-3",
393
+ "iso-ir-109",
394
+ "iso8859-3",
395
+ "iso88593",
396
+ "iso_8859-3",
397
+ "iso_8859-3:1988",
398
+ "l3",
399
+ "latin3",
400
+ ],
401
+ ),
402
+ (
403
+ "iso-8859-4",
404
+ vec![
405
+ "csisolatin4",
406
+ "iso-8859-4",
407
+ "iso-ir-110",
408
+ "iso8859-4",
409
+ "iso88594",
410
+ "iso_8859-4",
411
+ "iso_8859-4:1988",
412
+ "l4",
413
+ "latin4",
414
+ ],
415
+ ),
416
+ (
417
+ "iso-8859-5",
418
+ vec![
419
+ "csisolatincyrillic",
420
+ "cyrillic",
421
+ "iso-8859-5",
422
+ "iso-ir-144",
423
+ "iso8859-5",
424
+ "iso88595",
425
+ "iso_8859-5",
426
+ "iso_8859-5:1988",
427
+ ],
428
+ ),
429
+ (
430
+ "iso-8859-6",
431
+ vec![
432
+ "arabic",
433
+ "asmo-708",
434
+ "csiso88596e",
435
+ "csiso88596i",
436
+ "csisolatinarabic",
437
+ "ecma-114",
438
+ "iso-8859-6",
439
+ "iso-8859-6-e",
440
+ "iso-8859-6-i",
441
+ "iso-ir-127",
442
+ "iso8859-6",
443
+ "iso88596",
444
+ "iso_8859-6",
445
+ "iso_8859-6:1987",
446
+ ],
447
+ ),
448
+ (
449
+ "iso-8859-7",
450
+ vec![
451
+ "csisolatingreek",
452
+ "ecma-118",
453
+ "elot_928",
454
+ "greek",
455
+ "greek8",
456
+ "iso-8859-7",
457
+ "iso-ir-126",
458
+ "iso8859-7",
459
+ "iso88597",
460
+ "iso_8859-7",
461
+ "iso_8859-7:1987",
462
+ "sun_eu_greek",
463
+ ],
464
+ ),
465
+ (
466
+ "iso-8859-8",
467
+ vec![
468
+ "csiso88598e",
469
+ "csisolatinhebrew",
470
+ "hebrew",
471
+ "iso-8859-8",
472
+ "iso-8859-8-e",
473
+ "iso-ir-138",
474
+ "iso8859-8",
475
+ "iso88598",
476
+ "iso_8859-8",
477
+ "iso_8859-8:1988",
478
+ "visual",
479
+ ],
480
+ ),
481
+ (
482
+ "iso-8859-8-i",
483
+ vec!["csiso88598i", "iso-8859-8-i", "logical"],
484
+ ),
485
+ (
486
+ "iso-8859-10",
487
+ vec![
488
+ "csisolatin6",
489
+ "iso-8859-10",
490
+ "iso-ir-157",
491
+ "iso8859-10",
492
+ "iso885910",
493
+ "l6",
494
+ "latin6",
495
+ ],
496
+ ),
497
+ (
498
+ "iso-8859-13",
499
+ vec!["iso-8859-13", "iso8859-13", "iso885913"],
500
+ ),
501
+ (
502
+ "iso-8859-14",
503
+ vec!["iso-8859-14", "iso8859-14", "iso885914"],
504
+ ),
505
+ (
506
+ "iso-8859-15",
507
+ vec![
508
+ "csisolatin9",
509
+ "iso-8859-15",
510
+ "iso8859-15",
511
+ "iso885915",
512
+ "iso_8859-15",
513
+ "l9",
514
+ ],
515
+ ),
516
+ ("iso-8859-16", vec!["iso-8859-16"]),
517
+ ("koi8-r", vec!["cskoi8r", "koi", "koi8", "koi8-r", "koi8_r"]),
518
+ ("koi8-u", vec!["koi8-ru", "koi8-u"]),
519
+ (
520
+ "macintosh",
521
+ vec!["csmacintosh", "mac", "macintosh", "x-mac-roman"],
522
+ ),
523
+ (
524
+ "windows-874",
525
+ vec![
526
+ "dos-874",
527
+ "iso-8859-11",
528
+ "iso8859-11",
529
+ "iso885911",
530
+ "tis-620",
531
+ "windows-874",
532
+ ],
533
+ ),
534
+ ("windows-1250", vec!["cp1250", "windows-1250", "x-cp1250"]),
535
+ ("windows-1251", vec!["cp1251", "windows-1251", "x-cp1251"]),
536
+ (
537
+ "windows-1252",
538
+ vec![
539
+ "ansi_x3.4-1968",
540
+ "ascii",
541
+ "cp1252",
542
+ "cp819",
543
+ "csisolatin1",
544
+ "ibm819",
545
+ "iso-8859-1",
546
+ "iso-ir-100",
547
+ "iso8859-1",
548
+ "iso88591",
549
+ "iso_8859-1",
550
+ "iso_8859-1:1987",
551
+ "l1",
552
+ "latin1",
553
+ "us-ascii",
554
+ "windows-1252",
555
+ "x-cp1252",
556
+ ],
557
+ ),
558
+ ("windows-1253", vec!["cp1253", "windows-1253", "x-cp1253"]),
559
+ (
560
+ "windows-1254",
561
+ vec![
562
+ "cp1254",
563
+ "csisolatin5",
564
+ "iso-8859-9",
565
+ "iso-ir-148",
566
+ "iso8859-9",
567
+ "iso88599",
568
+ "iso_8859-9",
569
+ "iso_8859-9:1989",
570
+ "l5",
571
+ "latin5",
572
+ "windows-1254",
573
+ "x-cp1254",
574
+ ],
575
+ ),
576
+ ("windows-1255", vec!["cp1255", "windows-1255", "x-cp1255"]),
577
+ ("windows-1256", vec!["cp1256", "windows-1256", "x-cp1256"]),
578
+ ("windows-1257", vec!["cp1257", "windows-1257", "x-cp1257"]),
579
+ ("windows-1258", vec!["cp1258", "windows-1258", "x-cp1258"]),
580
+ ("x-mac-cyrillic", vec!["x-mac-cyrillic", "x-mac-ukrainian"]),
581
+ (
582
+ "gbk",
583
+ vec![
584
+ "chinese",
585
+ "csgb2312",
586
+ "csiso58gb231280",
587
+ "gb2312",
588
+ "gb_2312",
589
+ "gb_2312-80",
590
+ "gbk",
591
+ "iso-ir-58",
592
+ "x-gbk",
593
+ ],
594
+ ),
595
+ ("gb18030", vec!["gb18030"]),
596
+ (
597
+ "big5",
598
+ vec!["big5", "big5-hkscs", "cn-big5", "csbig5", "x-x-big5"],
599
+ ),
600
+ ("euc-jp", vec!["cseucpkdfmtjapanese", "euc-jp", "x-euc-jp"]),
601
+ ("iso-2022-jp", vec!["csiso2022jp", "iso-2022-jp"]),
602
+ (
603
+ "shift_jis",
604
+ vec![
605
+ "csshiftjis",
606
+ "ms932",
607
+ "ms_kanji",
608
+ "shift-jis",
609
+ "shift_jis",
610
+ "sjis",
611
+ "windows-31j",
612
+ "x-sjis",
613
+ ],
614
+ ),
615
+ (
616
+ "euc-kr",
617
+ vec![
618
+ "cseuckr",
619
+ "csksc56011987",
620
+ "euc-kr",
621
+ "iso-ir-149",
622
+ "korean",
623
+ "ks_c_5601-1987",
624
+ "ks_c_5601-1989",
625
+ "ksc5601",
626
+ "ksc_5601",
627
+ "windows-949",
628
+ ],
629
+ ),
630
+ (
631
+ "replacement",
632
+ vec![
633
+ "csiso2022kr",
634
+ "hz-gb-2312",
635
+ "iso-2022-cn",
636
+ "iso-2022-cn-ext",
637
+ "iso-2022-kr",
638
+ "replacement",
639
+ ],
640
+ ),
641
+ ("utf-16be", vec!["unicodefffe", "utf-16be"]),
642
+ (
643
+ "utf-16le",
644
+ vec![
645
+ "csunicode",
646
+ "iso-10646-ucs-2",
647
+ "ucs-2",
648
+ "unicode",
649
+ "unicodefeff",
650
+ "utf-16",
651
+ "utf-16le",
652
+ ],
653
+ ),
654
+ ("x-user-defined", vec!["x-user-defined"]),
655
+ ])
656
+ });
657
+
658
+ pub static IANA_SUPPORTED_SIMILAR: Lazy<HashMap<&'static str, Vec<&'static str>>> =
659
+ Lazy::new(|| {
660
+ HashMap::from_iter([
661
+ ("windows-1252", vec!["iso-8859-15", "windows-1254"]),
662
+ ("windows-1253", vec!["iso-8859-7"]),
663
+ ("windows-1254", vec!["iso-8859-15", "windows-1252"]),
664
+ ("windows-1257", vec!["iso-8859-13"]),
665
+ (
666
+ "iso-8859-10",
667
+ vec![
668
+ "iso-8859-14",
669
+ "iso-8859-15",
670
+ "iso-8859-4",
671
+ "windows-1254",
672
+ "windows-1252",
673
+ ],
674
+ ),
675
+ ("iso-8859-13", vec!["windows-1257"]),
676
+ (
677
+ "iso-8859-14",
678
+ vec![
679
+ "iso-8859-10",
680
+ "iso-8859-15",
681
+ "iso-8859-16",
682
+ "iso-8859-3",
683
+ "windows-1254",
684
+ "windows-1252",
685
+ ],
686
+ ),
687
+ (
688
+ "iso-8859-15",
689
+ vec![
690
+ "windows-1252",
691
+ "windows-1254",
692
+ "iso-8859-10",
693
+ "iso-8859-14",
694
+ "iso-8859-16",
695
+ "iso-8859-3",
696
+ ],
697
+ ),
698
+ (
699
+ "iso-8859-16",
700
+ vec![
701
+ "iso-8859-14",
702
+ "iso-8859-15",
703
+ "iso-8859-2",
704
+ "iso-8859-3",
705
+ "windows-1254",
706
+ "windows-1252",
707
+ ],
708
+ ),
709
+ ("iso-8859-2", vec!["iso-8859-16", "iso-8859-4"]),
710
+ (
711
+ "iso-8859-3",
712
+ vec![
713
+ "iso-8859-14",
714
+ "iso-8859-15",
715
+ "iso-8859-16",
716
+ "windows-1254",
717
+ "windows-1252",
718
+ ],
719
+ ),
720
+ (
721
+ "iso-8859-4",
722
+ vec!["iso-8859-10", "iso-8859-2", "windows-1254", "windows-1252"],
723
+ ),
724
+ ("iso-8859-7", vec!["windows-1253"]),
725
+ (
726
+ "windows-1254",
727
+ vec![
728
+ "windows-1252",
729
+ "windows-1258",
730
+ "iso-8859-10",
731
+ "iso-8859-14",
732
+ "iso-8859-15",
733
+ "iso-8859-16",
734
+ "iso-8859-3",
735
+ "iso-8859-4",
736
+ ],
737
+ ),
738
+ (
739
+ "windows-1252",
740
+ vec![
741
+ "windows-1254",
742
+ "windows-1258",
743
+ "iso-8859-10",
744
+ "iso-8859-14",
745
+ "iso-8859-15",
746
+ "iso-8859-16",
747
+ "iso-8859-3",
748
+ "iso-8859-4",
749
+ ],
750
+ ),
751
+ ])
752
+ });
Dataset_Construction/projects/charset-normalizer/rust/src/entity.rs ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #![allow(unused_variables)]
2
+
3
+ use crate::cd::{encoding_languages, mb_encoding_languages};
4
+ use crate::consts::{IANA_SUPPORTED_ALIASES, TOO_BIG_SEQUENCE};
5
+ use crate::utils::{decode, iana_name, is_multi_byte_encoding, range_scan};
6
+ use clap::Parser;
7
+ use encoding::DecoderTrap;
8
+ use ordered_float::OrderedFloat;
9
+ use serde::Serialize;
10
+ use std::cmp::Ordering;
11
+ use std::fmt;
12
+ use std::fmt::{Debug, Display, Formatter};
13
+ use std::hash::Hash;
14
+ use std::ops::Index;
15
+ use std::path::PathBuf;
16
+ use std::time::Duration;
17
+
18
+ /////////////////////////////////////////////////////////////////////////////////////
19
+ // Languages
20
+ /////////////////////////////////////////////////////////////////////////////////////
21
+
22
+ #[derive(Debug, PartialEq, Eq, Hash)]
23
+ pub enum Language {
24
+ English,
25
+ German,
26
+ French,
27
+ Dutch,
28
+ Italian,
29
+ Polish,
30
+ Spanish,
31
+ Russian,
32
+ Japanese,
33
+ Portuguese,
34
+ Swedish,
35
+ Chinese,
36
+ Ukrainian,
37
+ Norwegian,
38
+ Finnish,
39
+ Vietnamese,
40
+ Czech,
41
+ Hungarian,
42
+ Korean,
43
+ Indonesian,
44
+ Turkish,
45
+ Romanian,
46
+ Farsi,
47
+ Arabic,
48
+ Danish,
49
+ Serbian,
50
+ Lithuanian,
51
+ Slovene,
52
+ Slovak,
53
+ Hebrew,
54
+ Bulgarian,
55
+ Croatian,
56
+ Hindi,
57
+ Estonian,
58
+ Thai,
59
+ Greek,
60
+ Tamil,
61
+ Kazakh,
62
+ Unknown,
63
+ }
64
+
65
+ impl Display for Language {
66
+ fn fmt(&self, f: &mut Formatter) -> fmt::Result {
67
+ write!(f, "{:?}", self)
68
+ }
69
+ }
70
+
71
+ /////////////////////////////////////////////////////////////////////////////////////
72
+ // CoherenceMatch & CoherenceMatches
73
+ /////////////////////////////////////////////////////////////////////////////////////
74
+
75
+ #[derive(Debug, PartialEq, Clone)]
76
+ pub struct CoherenceMatch {
77
+ pub language: &'static Language,
78
+ pub score: f32,
79
+ }
80
+
81
+ pub type CoherenceMatches = Vec<CoherenceMatch>;
82
+
83
+ /////////////////////////////////////////////////////////////////////////////////////
84
+ // CharsetMatch
85
+ /////////////////////////////////////////////////////////////////////////////////////
86
+
87
+ #[derive(Clone)]
88
+ pub struct CharsetMatch {
89
+ payload: Vec<u8>,
90
+ encoding: String,
91
+
92
+ mean_mess_ratio: f32,
93
+ coherence_matches: CoherenceMatches,
94
+
95
+ has_sig_or_bom: bool,
96
+
97
+ submatch: Vec<CharsetMatch>,
98
+ decoded_payload: Option<String>,
99
+ }
100
+
101
+ impl Display for CharsetMatch {
102
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
103
+ write!(f, "{:?} ({})", self.payload, self.encoding)
104
+ }
105
+ }
106
+
107
+ impl Debug for CharsetMatch {
108
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
109
+ write!(f, "{:?} ({})", self.payload, self.encoding)
110
+ }
111
+ }
112
+
113
+ impl Default for CharsetMatch {
114
+ fn default() -> Self {
115
+ CharsetMatch {
116
+ payload: vec![],
117
+ encoding: "utf-8".to_string(),
118
+ mean_mess_ratio: 0.0,
119
+ coherence_matches: vec![],
120
+ has_sig_or_bom: false,
121
+ submatch: vec![],
122
+ decoded_payload: None,
123
+ }
124
+ }
125
+ }
126
+
127
+ impl PartialEq<Self> for CharsetMatch {
128
+ fn eq(&self, other: &Self) -> bool {
129
+ self.encoding == other.encoding && self.decoded_payload == other.decoded_payload
130
+ }
131
+ }
132
+
133
+ impl PartialOrd<Self> for CharsetMatch {
134
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
135
+ let mess_difference = (self.mean_mess_ratio - other.mean_mess_ratio).abs();
136
+ let coherence_a = self.coherence();
137
+ let coherence_b = other.coherence();
138
+ let coherence_difference = (coherence_a - coherence_b).abs();
139
+
140
+ // Below 1% difference --> Use Coherence
141
+ if mess_difference < 0.01 {
142
+ if coherence_difference > 0.02 {
143
+ return coherence_b.partial_cmp(&coherence_a);
144
+ }
145
+ let multibyte_usage_a = self.multi_byte_usage();
146
+ let multibyte_usage_b = other.multi_byte_usage();
147
+ let multibyte_usage_delta = (multibyte_usage_a - multibyte_usage_b).abs();
148
+ if multibyte_usage_delta > f32::EPSILON {
149
+ return multibyte_usage_b.partial_cmp(&multibyte_usage_a);
150
+ }
151
+ }
152
+ self.mean_mess_ratio.partial_cmp(&other.mean_mess_ratio)
153
+ }
154
+ }
155
+
156
+ impl CharsetMatch {
157
+ // Init function
158
+ pub fn new(
159
+ payload: &[u8],
160
+ encoding: &str,
161
+ mean_mess_ratio: f32,
162
+ has_sig_or_bom: bool,
163
+ coherence_matches: &CoherenceMatches,
164
+ decoded_payload: Option<&str>,
165
+ ) -> Self {
166
+ CharsetMatch {
167
+ payload: Vec::from(payload),
168
+ encoding: String::from(encoding),
169
+ mean_mess_ratio,
170
+ coherence_matches: coherence_matches.clone(),
171
+ has_sig_or_bom,
172
+ submatch: vec![],
173
+ decoded_payload: decoded_payload.map(String::from).or_else(|| {
174
+ decode(payload, encoding, DecoderTrap::Strict, false, true)
175
+ .ok()
176
+ .map(|res| res.strip_prefix('\u{feff}').unwrap_or(&res).to_string())
177
+ }),
178
+ }
179
+ }
180
+
181
+ // Add submatch
182
+ pub fn add_submatch(&mut self, submatch: &CharsetMatch) {
183
+ self.submatch.push(submatch.clone());
184
+ //self.decoded_payload = None;
185
+ }
186
+
187
+ // Get encoding aliases according to https://encoding.spec.whatwg.org/encodings.json
188
+ pub fn encoding_aliases(&self) -> Vec<&'static str> {
189
+ IANA_SUPPORTED_ALIASES
190
+ .get(self.encoding.as_str())
191
+ .cloned()
192
+ .expect("Problem with static HashMap IANA_SUPPORTED_ALIASES")
193
+ }
194
+ // byte_order_mark
195
+ pub fn bom(&self) -> bool {
196
+ self.has_sig_or_bom
197
+ }
198
+ pub fn encoding(&self) -> &str {
199
+ &self.encoding
200
+ }
201
+ pub fn chaos(&self) -> f32 {
202
+ self.mean_mess_ratio
203
+ }
204
+ // Most probable language found in decoded sequence. If none were detected or inferred, the property will return
205
+ // Language::Unknown
206
+ pub fn most_probably_language(&self) -> &'static Language {
207
+ self.coherence_matches.first().map_or_else(
208
+ // Default case: Trying to infer the language based on the given encoding
209
+ || {
210
+ if self.suitable_encodings().contains(&String::from("ascii")) {
211
+ &Language::English
212
+ } else {
213
+ let languages = if is_multi_byte_encoding(&self.encoding) {
214
+ mb_encoding_languages(&self.encoding)
215
+ } else {
216
+ encoding_languages(self.encoding.clone())
217
+ };
218
+ languages.first().copied().unwrap_or(&Language::Unknown)
219
+ }
220
+ },
221
+ |lang| lang.language,
222
+ )
223
+ }
224
+ // Return the complete list of possible languages found in decoded sequence.
225
+ // Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
226
+ pub fn languages(&self) -> Vec<&'static Language> {
227
+ self.coherence_matches
228
+ .iter()
229
+ .map(|cm| cm.language)
230
+ .collect()
231
+ }
232
+ // Has submatch
233
+ pub fn has_submatch(&self) -> bool {
234
+ !self.submatch.is_empty()
235
+ }
236
+ // Return submatch list
237
+ pub fn submatch(&self) -> &Vec<CharsetMatch> {
238
+ &self.submatch
239
+ }
240
+ // Multibyte usage ratio
241
+ pub fn multi_byte_usage(&self) -> f32 {
242
+ let decoded_chars = self.decoded_payload().unwrap_or_default().chars().count() as f32;
243
+ let payload_len = self.payload.len() as f32;
244
+
245
+ 1.0 - (decoded_chars / payload_len)
246
+ }
247
+ // Original untouched bytes
248
+ pub fn raw(&self) -> &Vec<u8> {
249
+ &self.payload
250
+ }
251
+ // Return chaos in percents with rounding
252
+ pub fn chaos_percents(&self) -> f32 {
253
+ self.chaos() * 100.0
254
+ }
255
+ // Return coherence in percents with rounding
256
+ pub fn coherence_percents(&self) -> f32 {
257
+ self.coherence() * 100.0
258
+ }
259
+ // Most relevant language coherence
260
+ pub fn coherence(&self) -> f32 {
261
+ self.coherence_matches
262
+ .first()
263
+ .map(|lang| lang.score)
264
+ .unwrap_or_default()
265
+ }
266
+
267
+ // To recalc decoded_payload field
268
+ pub fn decoded_payload(&self) -> Option<&str> {
269
+ self.decoded_payload.as_deref()
270
+ }
271
+
272
+ // The complete list of encodings that output the exact SAME str result and therefore could be the originating
273
+ // encoding. This list does include the encoding available in property 'encoding'.
274
+ pub fn suitable_encodings(&self) -> Vec<String> {
275
+ std::iter::once(self.encoding.clone())
276
+ .chain(self.submatch.iter().map(|s| s.encoding.clone()))
277
+ .collect()
278
+ }
279
+ // Returns sorted list of unicode ranges (if exists)
280
+ pub fn unicode_ranges(&self) -> Vec<String> {
281
+ let mut ranges: Vec<String> = range_scan(self.decoded_payload().unwrap_or_default())
282
+ .iter()
283
+ .cloned()
284
+ .collect();
285
+ ranges.sort_unstable();
286
+ ranges
287
+ }
288
+ }
289
+
290
+ /////////////////////////////////////////////////////////////////////////////////////
291
+ // CharsetMatches
292
+ // Container with every CharsetMatch items ordered by default from most probable
293
+ // to the less one.
294
+ /////////////////////////////////////////////////////////////////////////////////////
295
+
296
+ #[derive(Debug, Default)]
297
+ pub struct CharsetMatches {
298
+ items: Vec<CharsetMatch>,
299
+ }
300
+
301
+ pub struct CharsetMatchesIterMut<'a> {
302
+ items: std::slice::IterMut<'a, CharsetMatch>,
303
+ }
304
+
305
+ pub struct CharsetMatchesIter<'a> {
306
+ items: std::slice::Iter<'a, CharsetMatch>,
307
+ }
308
+
309
+ impl CharsetMatches {
310
+ // Initialization method
311
+ pub fn new(items: Option<Vec<CharsetMatch>>) -> Self {
312
+ let mut items = items.unwrap_or_default();
313
+ CharsetMatches::resort(&mut items);
314
+ CharsetMatches { items }
315
+ }
316
+ pub fn from_single(item: CharsetMatch) -> Self {
317
+ CharsetMatches { items: vec![item] }
318
+ }
319
+ // Insert a single match. Will be inserted accordingly to preserve sort.
320
+ // Can be inserted as a submatch.
321
+ pub fn append(&mut self, item: CharsetMatch) {
322
+ // We should disable the submatch factoring when the input file is too heavy
323
+ // (conserve RAM usage)
324
+ if item.payload.len() <= TOO_BIG_SEQUENCE {
325
+ for m in &mut self.items {
326
+ if m.decoded_payload() == item.decoded_payload()
327
+ && (m.mean_mess_ratio - item.mean_mess_ratio).abs() < f32::EPSILON
328
+ {
329
+ m.add_submatch(&item);
330
+ return;
331
+ }
332
+ }
333
+ }
334
+ self.items.push(item);
335
+ CharsetMatches::resort(&mut self.items);
336
+ }
337
+ // Simply return the first match. Strict equivalent to matches[0].
338
+ pub fn get_best(&self) -> Option<&CharsetMatch> {
339
+ self.items.first()
340
+ }
341
+ // Retrieve a single item either by its position or encoding name (alias may be used here).
342
+ pub fn get_by_encoding(&self, encoding: &str) -> Option<&CharsetMatch> {
343
+ let encoding = iana_name(encoding)?;
344
+ self.items
345
+ .iter()
346
+ .find(|&i| i.suitable_encodings().contains(&encoding.to_string()))
347
+ }
348
+ // Resort items by relevancy (for internal use)
349
+ fn resort(items: &mut [CharsetMatch]) {
350
+ items.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
351
+ }
352
+ // iterator
353
+ pub fn iter_mut(&mut self) -> CharsetMatchesIterMut {
354
+ CharsetMatchesIterMut {
355
+ items: self.items.iter_mut(),
356
+ }
357
+ }
358
+ pub fn iter(&self) -> CharsetMatchesIter {
359
+ CharsetMatchesIter {
360
+ items: self.items.iter(),
361
+ }
362
+ }
363
+ // len
364
+ pub fn len(&self) -> usize {
365
+ self.items.len()
366
+ }
367
+ // is empty?
368
+ pub fn is_empty(&self) -> bool {
369
+ self.items.is_empty()
370
+ }
371
+ }
372
+
373
+ impl Index<usize> for CharsetMatches {
374
+ type Output = CharsetMatch;
375
+ fn index(&self, index: usize) -> &Self::Output {
376
+ &self.items[index]
377
+ }
378
+ }
379
+
380
+ impl<'a> Iterator for CharsetMatchesIterMut<'a> {
381
+ type Item = &'a mut CharsetMatch;
382
+
383
+ fn next(&mut self) -> Option<Self::Item> {
384
+ self.items.next()
385
+ }
386
+ }
387
+
388
+ impl<'a> Iterator for CharsetMatchesIter<'a> {
389
+ type Item = &'a CharsetMatch;
390
+
391
+ fn next(&mut self) -> Option<Self::Item> {
392
+ self.items.next()
393
+ }
394
+ }
395
+
396
+ #[derive(Clone)]
397
+ pub struct NormalizerSettings {
398
+ /// How many steps (chunks) should be used from file
399
+ pub steps: usize,
400
+ /// Each chunk size
401
+ pub chunk_size: usize,
402
+ /// Mess ration threshold
403
+ pub threshold: OrderedFloat<f32>,
404
+ /// Specify probing encodings exactly
405
+ pub include_encodings: Vec<String>,
406
+ /// Exclude these encodings from probing
407
+ pub exclude_encodings: Vec<String>,
408
+ /// Allow try to find charset in the text
409
+ pub preemptive_behaviour: bool,
410
+ /// Language detector threshold
411
+ pub language_threshold: OrderedFloat<f32>,
412
+ /// Allow fallback to ASCII / UTF-8
413
+ pub enable_fallback: bool,
414
+ }
415
+
416
+ impl Default for NormalizerSettings {
417
+ fn default() -> Self {
418
+ NormalizerSettings {
419
+ steps: 5,
420
+ chunk_size: 512,
421
+ threshold: OrderedFloat(0.2),
422
+ include_encodings: vec![],
423
+ exclude_encodings: vec![],
424
+ preemptive_behaviour: true,
425
+ language_threshold: OrderedFloat(0.1),
426
+ enable_fallback: true,
427
+ }
428
+ }
429
+ }
430
+
431
+ /////////////////////////////////////////////////////////////////////////////////////
432
+ // Performance binary application
433
+ /////////////////////////////////////////////////////////////////////////////////////
434
+
435
+ #[derive(Parser, Debug)]
436
+ #[command(name = "Performance check for charset-normalizer-rs vs chardet vs chardetng")]
437
+ #[command(author, version, about, long_about = None)]
438
+ pub struct PerformanceArgs {
439
+ /// Apply artificial size increase to challenge the detection mechanism further
440
+ #[arg(short, long, default_value_t = 1)]
441
+ pub size_increase: u8,
442
+ }
443
+
444
+ // Struct to save result of each test in performance app
445
+ pub struct PerformanceResult {
446
+ /// Performance test duration
447
+ pub duration: Duration,
448
+ /// Is result accurate?
449
+ pub correct: bool,
450
+ }
451
+
452
+ /////////////////////////////////////////////////////////////////////////////////////
453
+ // Normalizer CLI application
454
+ /////////////////////////////////////////////////////////////////////////////////////
455
+
456
+ #[derive(Parser, Debug)]
457
+ #[command(
458
+ name = "The Real First Universal Charset Detector. Discover originating encoding used on text file. Normalize text to unicode."
459
+ )]
460
+ #[command(author, version, about, long_about = None)]
461
+ pub struct CLINormalizerArgs {
462
+ /// File(s) to be analysed
463
+ #[arg(required = true, action = clap::ArgAction::Append)]
464
+ pub files: Vec<PathBuf>,
465
+
466
+ /// Display complementary information about file if any. Stdout will contain logs about the detection process.
467
+ #[arg(short = 'v', long = "verbose", default_value_t = false)]
468
+ pub verbose: bool,
469
+
470
+ /// Output complementary possibilities if any. Top-level JSON WILL be a list.
471
+ #[arg(short = 'a', long = "with-alternative", default_value_t = false)]
472
+ pub alternatives: bool,
473
+
474
+ /// Permit to normalize input file. If not set, program does not write anything.
475
+ #[arg(short, long, default_value_t = false)]
476
+ pub normalize: bool,
477
+
478
+ /// Only output the charset detected to STDOUT. Disabling JSON output.
479
+ #[arg(short, long, default_value_t = false)]
480
+ pub minimal: bool,
481
+
482
+ /// Replace file when trying to normalize it instead of creating a new one.
483
+ #[arg(short, long, default_value_t = false)]
484
+ pub replace: bool,
485
+
486
+ /// Replace file without asking if you are sure, use this flag with caution.
487
+ #[arg(short, long, default_value_t = false)]
488
+ pub force: bool,
489
+
490
+ /// Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.
491
+ #[arg(short, long, default_value_t = 0.2)]
492
+ pub threshold: f32,
493
+ }
494
+
495
+ #[derive(Default, Debug, Serialize)]
496
+ pub struct CLINormalizerResult {
497
+ /// Path to analysed file
498
+ pub path: PathBuf,
499
+ /// Guessed encoding
500
+ pub encoding: Option<String>,
501
+ /// Possible aliases of guessed encoding
502
+ pub encoding_aliases: Vec<String>,
503
+ /// Alternative possible encodings
504
+ pub alternative_encodings: Vec<String>,
505
+ /// Most probably language
506
+ pub language: String,
507
+ /// Found alphabets
508
+ pub alphabets: Vec<String>,
509
+ /// Does it has SIG or BOM mark?
510
+ pub has_sig_or_bom: bool,
511
+ /// Chaos (mess) level
512
+ pub chaos: String,
513
+ /// Coherence (language detection) level
514
+ pub coherence: String,
515
+ /// Path to decoded data
516
+ pub unicode_path: Option<PathBuf>,
517
+ pub is_preferred: bool,
518
+ }
Dataset_Construction/projects/charset-normalizer/rust/src/lib.rs ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //! charset-normalizer-rs
2
+ //! ======================
3
+ //!
4
+ //! The Real First Universal Charset Detector, Rust version.
5
+ //! Motivated by original Python version of `charset-normalizer`,
6
+ //!
7
+ //! This library helps read text from an unknown charset encoding.
8
+ //! All IANA character set names for which the Rust `encoding` library provides codecs are supported.
9
+ //!
10
+ //! This project is port of original Pyhon version of [Charset Normalizer](https://github.com/Ousret/charset_normalizer).
11
+ //! The biggest difference between Python and Rust versions - number of supported encodings as each langauge has own encoding / decoding library.
12
+ //! In Rust version only encoding from [WhatWG standard](https://encoding.spec.whatwg.org) are supported.
13
+ //! Python version supports more encodings, but a lot of them are old almost unused ones.
14
+ //!
15
+ //! # Performance:
16
+ //!
17
+ //! This library in comparison to Python version is more faster (2-3 times faster, than MYPYC version of charset-normalizer, 4-6 times faster, than usual Python version).
18
+ //! All measurements are approximated.
19
+ //!
20
+ //! # Library:
21
+ //!
22
+ //! Library offers two main methods:
23
+ //!
24
+ //! * `from_bytes` processes text using bytes as input parameter
25
+ //! * `from_path` processes text using filename as input parameter
26
+ //!
27
+ //! ## Examples:
28
+ //!
29
+ //! ```rust
30
+ //! use charset_normalizer_rs::from_bytes;
31
+ //!
32
+ //! fn test_from_bytes() {
33
+ //! let result = from_bytes(&vec![0x84, 0x31, 0x95, 0x33], None);
34
+ //! let best_guess = result.get_best();
35
+ //! assert_eq!(
36
+ //! best_guess.unwrap().encoding(),
37
+ //! "gb18030",
38
+ //! );
39
+ //! }
40
+ //! test_from_bytes();
41
+ //! ```
42
+ //!
43
+ //! ```rust
44
+ //! use std::path::Path;
45
+ //! use charset_normalizer_rs::from_path;
46
+ //!
47
+ //! fn test_from_path() {
48
+ //! let result = from_path(Path::new("src/tests/data/samples/sample-chinese.txt"), None).unwrap();
49
+ //! let best_guess = result.get_best();
50
+ //! assert_eq!(
51
+ //! best_guess.unwrap().encoding(),
52
+ //! "big5",
53
+ //! );
54
+ //! }
55
+ //! test_from_path();
56
+ //! ```
57
+ //!
58
+ //! # CLI tool:
59
+ //!
60
+ //! Binary CLI tool is included within this package. It has similar to Python version input parameters and output data.
61
+ //!
62
+ //! ## Installation:
63
+ //!
64
+ //! ```shell
65
+ //! cargo install charset-normalizer-rs
66
+ //! ```
67
+ //!
68
+ //! ## Usage:
69
+ //!
70
+ //! ```shell
71
+ //! normalizer -h
72
+ //!
73
+ //! usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD] [--version] files [files ...]
74
+ //!
75
+ //! The Real First Universal Charset Detector. Discover originating encoding used on text file. Normalize text to unicode.
76
+ //!
77
+ //! positional arguments:
78
+ //! files File(s) to be analysed
79
+ //!
80
+ //! options:
81
+ //! -h, --help show this help message and exit
82
+ //! -v, --verbose Display complementary information about file if any. Stdout will contain logs about the detection process.
83
+ //! -a, --with-alternative
84
+ //! Output complementary possibilities if any. Top-level JSON WILL be a list.
85
+ //! -n, --normalize Permit to normalize input file. If not set, program does not write anything.
86
+ //! -m, --minimal Only output the charset detected to STDOUT. Disabling JSON output.
87
+ //! -r, --replace Replace file when trying to normalize it instead of creating a new one.
88
+ //! -f, --force Replace file without asking if you are sure, use this flag with caution.
89
+ //! -t THRESHOLD, --threshold THRESHOLD
90
+ //! Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.
91
+ //! --version Show version information and exit.
92
+ //! ```
93
+ //!
94
+ //! ## Example:
95
+ //!
96
+ //! ```shell
97
+ //! normalizer src/tests/data/samples/sample-chinese.txt
98
+ //! ```
99
+ //!
100
+ //! This will produce such JSON output:
101
+ //!
102
+ //! ```json
103
+ //! {
104
+ //! "path": ".../src/tests/data/samples/sample-chinese.txt",
105
+ //! "encoding": "big5",
106
+ //! "encoding_aliases": [
107
+ //! "big5_tw",
108
+ //! "csbig5",
109
+ //! "x_mac_trad_chinese"
110
+ //! ],
111
+ //! "alternative_encodings": [
112
+ //! "big5hkscs",
113
+ //! "cp950"
114
+ //! ],
115
+ //! "language": "Chinese",
116
+ //! "alphabets": [
117
+ //! "Basic Latin",
118
+ //! "CJK Compatibility Forms",
119
+ //! "CJK Symbols and Punctuation",
120
+ //! "CJK Unified Ideographs",
121
+ //! "Control character",
122
+ //! "Halfwidth and Fullwidth Forms"
123
+ //! ],
124
+ //! "has_sig_or_bom": false,
125
+ //! "chaos": 0.0,
126
+ //! "coherence": 12.21,
127
+ //! "unicode_path": null,
128
+ //! "is_preferred": true
129
+ //! }
130
+ //! ```
131
+ use crate::cd::{
132
+ coherence_ratio, encoding_languages, mb_encoding_languages, merge_coherence_ratios,
133
+ };
134
+ use crate::consts::{IANA_SUPPORTED, MAX_PROCESSED_BYTES, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE};
135
+ use crate::entity::{CharsetMatch, CharsetMatches, CoherenceMatches, NormalizerSettings};
136
+ use crate::md::mess_ratio;
137
+ use crate::utils::{
138
+ any_specified_encoding, decode, iana_name, identify_sig_or_bom, is_cp_similar,
139
+ is_invalid_chunk, is_multi_byte_encoding,
140
+ };
141
+ use encoding::DecoderTrap;
142
+ use log::{debug, trace};
143
+ use std::collections::VecDeque;
144
+ use std::fs::File;
145
+ use std::io::Read;
146
+ use std::path::Path;
147
+
148
+ pub mod assets;
149
+ // TODO: Revisit float conversions when we want to push for accuracy
150
+ #[allow(clippy::cast_lossless, clippy::cast_precision_loss)]
151
+ mod cd;
152
+ pub mod consts;
153
+ pub mod entity;
154
+ mod md;
155
+ mod tests;
156
+ pub mod utils;
157
+
158
+ // Given a raw bytes sequence, return the best possibles charset usable to render str objects.
159
+ // If there is no results, it is a strong indicator that the source is binary/not text.
160
+ // By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
161
+ // And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
162
+ //
163
+ // The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
164
+ // but never take it for granted. Can improve the performance.
165
+ //
166
+ // You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
167
+ // purpose.
168
+ //
169
+ // This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
170
+ // By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
171
+ // toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
172
+ // Custom logging format and handler can be set manually.
173
+ pub fn from_bytes(bytes: &[u8], settings: Option<NormalizerSettings>) -> CharsetMatches {
174
+ // init settings with default values if it's None and recheck include_encodings and
175
+ // exclude_encodings settings
176
+ let mut settings = settings.unwrap_or_default();
177
+ if !settings.include_encodings.is_empty() {
178
+ settings.include_encodings = settings
179
+ .include_encodings
180
+ .iter()
181
+ .map(|e| iana_name(e).unwrap().to_string())
182
+ .collect();
183
+ trace!(
184
+ "include_encodings is set. Use this flag for debugging purpose. \
185
+ Limited list of encoding allowed : {}.",
186
+ settings.include_encodings.join(", ")
187
+ );
188
+ }
189
+ if !settings.exclude_encodings.is_empty() {
190
+ settings.exclude_encodings = settings
191
+ .exclude_encodings
192
+ .iter()
193
+ .map(|e| iana_name(e).unwrap().to_string())
194
+ .collect();
195
+ trace!(
196
+ "exclude_encodings is set. Use this flag for debugging purpose. \
197
+ Limited list of encoding allowed : {}.",
198
+ settings.exclude_encodings.join(", ")
199
+ );
200
+ }
201
+
202
+ // check for empty
203
+ let bytes_length = bytes.len();
204
+ if bytes_length == 0 {
205
+ debug!("Encoding detection on empty bytes, assuming utf_8 intention.");
206
+ return CharsetMatches::from_single(CharsetMatch::default());
207
+ }
208
+
209
+ // check min length
210
+ if bytes_length <= (settings.chunk_size * settings.steps) {
211
+ trace!(
212
+ "override steps ({}) and chunk_size ({}) as content does not \
213
+ fit ({} byte(s) given) parameters.",
214
+ settings.steps,
215
+ settings.chunk_size,
216
+ bytes_length,
217
+ );
218
+ settings.steps = 1;
219
+ settings.chunk_size = bytes_length;
220
+ }
221
+
222
+ if settings.steps > 1 && bytes_length / settings.steps < settings.chunk_size {
223
+ settings.chunk_size = bytes_length / settings.steps;
224
+ }
225
+
226
+ // too small length
227
+ if bytes_length < TOO_SMALL_SEQUENCE {
228
+ trace!(
229
+ "Trying to detect encoding from a tiny portion of ({}) byte(s).",
230
+ bytes_length
231
+ );
232
+ }
233
+
234
+ // too big length
235
+ let is_too_large_sequence = bytes_length > TOO_BIG_SEQUENCE;
236
+ if is_too_large_sequence {
237
+ trace!(
238
+ "Using lazy str decoding because the payload is quite large, ({}) byte(s).",
239
+ bytes_length
240
+ );
241
+ }
242
+
243
+ // start to build prioritized encodings array
244
+ let mut prioritized_encodings: Vec<&str> = vec![];
245
+
246
+ // search for encoding in the content
247
+ let mut specified_encoding: String = String::new();
248
+ if settings.preemptive_behaviour {
249
+ if let Some(enc) = any_specified_encoding(bytes, 4096) {
250
+ trace!(
251
+ "Detected declarative mark in sequence. Priority +1 given for {}.",
252
+ &enc
253
+ );
254
+ specified_encoding = enc.to_string();
255
+ prioritized_encodings.push(&specified_encoding);
256
+ }
257
+ }
258
+
259
+ // check bom & sig
260
+ let (sig_encoding, sig_payload) = identify_sig_or_bom(bytes);
261
+ if let (Some(sig_enc), Some(sig_pay)) = (&sig_encoding, sig_payload) {
262
+ trace!(
263
+ "Detected a SIG or BOM mark on first {} byte(s). Priority +1 given for {}.",
264
+ sig_pay.len(),
265
+ sig_enc,
266
+ );
267
+ prioritized_encodings.push(sig_enc);
268
+ }
269
+
270
+ // add ascii & utf-8
271
+ prioritized_encodings.extend(&["ascii", "utf-8"]);
272
+
273
+ // generate array of encodings for probing with prioritizing
274
+ let mut iana_encodings: VecDeque<&str> = VecDeque::from(IANA_SUPPORTED.clone());
275
+ for pe in prioritized_encodings.iter().rev() {
276
+ if let Some(index) = iana_encodings.iter().position(|x| x == pe) {
277
+ let value = iana_encodings.remove(index).unwrap();
278
+ iana_encodings.push_front(value);
279
+ }
280
+ }
281
+
282
+ // Main processing loop variables
283
+ let mut tested_but_hard_failure: Vec<&str> = vec![];
284
+ let mut tested_but_soft_failure: Vec<&str> = vec![];
285
+ let mut fallback_ascii: Option<CharsetMatch> = None;
286
+ let mut fallback_u8: Option<CharsetMatch> = None;
287
+ let mut fallback_specified: Option<CharsetMatch> = None;
288
+ let mut results: CharsetMatches = CharsetMatches::default();
289
+
290
+ // Iterate and probe our encodings
291
+ 'iana_encodings_loop: for encoding_iana in iana_encodings {
292
+ if (!settings.include_encodings.is_empty()
293
+ && !settings
294
+ .include_encodings
295
+ .contains(&encoding_iana.to_string()))
296
+ || settings
297
+ .exclude_encodings
298
+ .contains(&encoding_iana.to_string())
299
+ {
300
+ continue;
301
+ }
302
+ let bom_or_sig_available: bool = sig_encoding.as_deref() == Some(encoding_iana);
303
+ // let strip_sig_or_bom = true // unlike python version this is always true in rust
304
+ let is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana);
305
+
306
+ // utf-16le & utf-16be cannot be identified without BOM
307
+ if !bom_or_sig_available && ["utf-16le", "utf-16be"].contains(&encoding_iana) {
308
+ trace!(
309
+ "Encoding {} won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE",
310
+ encoding_iana,
311
+ );
312
+ continue;
313
+ }
314
+
315
+ // fast pre-check
316
+ let start_idx = match bom_or_sig_available {
317
+ true => sig_payload.unwrap().len(),
318
+ false => 0,
319
+ };
320
+ let end_idx = match is_too_large_sequence && !is_multi_byte_decoder {
321
+ true => MAX_PROCESSED_BYTES,
322
+ false => bytes_length,
323
+ };
324
+ let decoded_payload: Option<String> = if let Ok(payload) = decode(
325
+ &bytes[start_idx..end_idx],
326
+ encoding_iana,
327
+ DecoderTrap::Strict,
328
+ is_too_large_sequence && !is_multi_byte_decoder,
329
+ false,
330
+ ) {
331
+ (!is_too_large_sequence || is_multi_byte_decoder).then_some(payload)
332
+ } else {
333
+ trace!(
334
+ "Code page {} does not fit given bytes sequence at ALL.",
335
+ encoding_iana,
336
+ );
337
+ tested_but_hard_failure.push(encoding_iana);
338
+ continue 'iana_encodings_loop;
339
+ };
340
+
341
+ // soft failed pre-check
342
+ // important thing! it occurs sometimes fail detection
343
+ for encoding_soft_failed in &tested_but_soft_failure {
344
+ if is_cp_similar(encoding_iana, encoding_soft_failed) {
345
+ trace!("{} is deemed too similar to code page {} and was consider unsuited already. Continuing!",
346
+ encoding_iana,
347
+ encoding_soft_failed,
348
+ );
349
+ continue 'iana_encodings_loop;
350
+ }
351
+ }
352
+
353
+ // lets split input by chunks and try to parse them
354
+ let max_chunk_gave_up = 2.max(settings.steps / 4);
355
+ let mut early_stop_count: usize = 0;
356
+ let mut lazy_str_hard_failure = false;
357
+ let mut md_ratios: Vec<f32> = vec![];
358
+
359
+ // detect target languages
360
+ let target_languages = if is_multi_byte_decoder {
361
+ mb_encoding_languages(encoding_iana)
362
+ } else {
363
+ encoding_languages(encoding_iana.to_string())
364
+ };
365
+ trace!(
366
+ "{} should target any language(s) of {:?}",
367
+ encoding_iana,
368
+ target_languages,
369
+ );
370
+
371
+ // main loop over chunks in our input
372
+ // we go over bytes or chars - it depends on previous code
373
+ let seq_len = match &decoded_payload {
374
+ Some(payload) => payload.chars().count(),
375
+ None => bytes_length,
376
+ };
377
+ let starting_offset = match (bom_or_sig_available, &decoded_payload) {
378
+ (true, None) => start_idx,
379
+ _ => 0,
380
+ };
381
+ let offsets = (starting_offset..seq_len).step_by((seq_len / settings.steps).max(1));
382
+
383
+ // Chunks Loop
384
+ // Iterate over chunks of bytes or chars
385
+ let mut md_chunks: Vec<String> = vec![];
386
+ 'chunks_loop: for offset in offsets {
387
+ let decoded_chunk_result = match &decoded_payload {
388
+ // Chars processing
389
+ Some(payload) => Ok(payload
390
+ .chars()
391
+ .skip(offset)
392
+ .take(settings.chunk_size)
393
+ .collect()),
394
+ // Bytes processing
395
+ None => decode(
396
+ &bytes[offset..(offset + settings.chunk_size).min(seq_len)],
397
+ encoding_iana,
398
+ DecoderTrap::Strict,
399
+ false,
400
+ false,
401
+ ),
402
+ };
403
+
404
+ if is_invalid_chunk(&decoded_chunk_result, encoding_iana) {
405
+ trace!(
406
+ "LazyStr Loading: After MD chunk decode, code page {} \
407
+ does not fit given bytes sequence at ALL. {}",
408
+ encoding_iana,
409
+ match decoded_chunk_result {
410
+ Ok(_) => String::from("non-ascii"),
411
+ Err(message) => message.to_string(),
412
+ },
413
+ );
414
+ early_stop_count = max_chunk_gave_up;
415
+ lazy_str_hard_failure = true;
416
+ break 'chunks_loop;
417
+ }
418
+ let decoded_chunk = decoded_chunk_result.unwrap();
419
+
420
+ // MD ratios calc
421
+ md_chunks.push(decoded_chunk.clone());
422
+ md_ratios.push(mess_ratio(decoded_chunk, Some(settings.threshold)));
423
+ if md_ratios.last().unwrap() >= &settings.threshold {
424
+ early_stop_count += 1;
425
+ }
426
+ if early_stop_count >= max_chunk_gave_up {
427
+ break 'chunks_loop;
428
+ }
429
+ }
430
+
431
+ // We might want to check the remainder of sequence
432
+ // Only if initial MD tests passes
433
+ if !lazy_str_hard_failure && is_too_large_sequence && !is_multi_byte_decoder {
434
+ let decoded_chunk_result = decode(
435
+ &bytes[MAX_PROCESSED_BYTES..],
436
+ encoding_iana,
437
+ DecoderTrap::Strict,
438
+ false,
439
+ false,
440
+ );
441
+ if is_invalid_chunk(&decoded_chunk_result, encoding_iana) {
442
+ trace!(
443
+ "LazyStr Loading: After final lookup, code page {} does not fit \
444
+ given bytes sequence at ALL. {}",
445
+ encoding_iana,
446
+ decoded_chunk_result.unwrap_err().to_string(),
447
+ );
448
+ tested_but_hard_failure.push(encoding_iana);
449
+ continue 'iana_encodings_loop;
450
+ }
451
+ }
452
+
453
+ // process mean mess ratio
454
+ let mean_mess_ratio = match md_ratios.is_empty() {
455
+ true => 0.0,
456
+ false => md_ratios.iter().sum::<f32>() / (md_ratios.len() as f32),
457
+ };
458
+
459
+ if mean_mess_ratio >= *settings.threshold || early_stop_count >= max_chunk_gave_up {
460
+ tested_but_soft_failure.push(encoding_iana);
461
+ trace!(
462
+ "{} was excluded because of initial chaos probing. \
463
+ Gave up {} time(s). Computed mean chaos is {} %.",
464
+ encoding_iana,
465
+ early_stop_count,
466
+ mean_mess_ratio * 100.0,
467
+ );
468
+ // Preparing those fallbacks in case we got nothing.
469
+ if settings.enable_fallback
470
+ && !lazy_str_hard_failure
471
+ && prioritized_encodings.contains(&encoding_iana)
472
+ {
473
+ let fallback_entry = Some(CharsetMatch::new(
474
+ bytes,
475
+ encoding_iana,
476
+ f32::from(settings.threshold),
477
+ false,
478
+ &vec![],
479
+ decoded_payload.as_deref(),
480
+ ));
481
+
482
+ match encoding_iana {
483
+ e if e == specified_encoding => fallback_specified = fallback_entry,
484
+ "ascii" => fallback_ascii = fallback_entry,
485
+ _ => fallback_u8 = fallback_entry,
486
+ }
487
+ }
488
+ continue 'iana_encodings_loop;
489
+ }
490
+ trace!(
491
+ "{} passed initial chaos probing. Mean measured chaos is {} %",
492
+ encoding_iana,
493
+ mean_mess_ratio * 100.0,
494
+ );
495
+
496
+ // CD rations calc
497
+ // We shall skip the CD when its about ASCII
498
+ // Most of the time its not relevant to run "language-detection" on it.
499
+ let mut cd_ratios: Vec<CoherenceMatches> = vec![];
500
+ if encoding_iana != "ascii" {
501
+ cd_ratios.extend(md_chunks.iter().filter_map(|chunk| {
502
+ coherence_ratio(
503
+ chunk.clone(),
504
+ Some(settings.language_threshold),
505
+ Some(target_languages.clone()),
506
+ )
507
+ .ok()
508
+ }));
509
+ }
510
+
511
+ // process cd ratios
512
+ let cd_ratios_merged = merge_coherence_ratios(&cd_ratios);
513
+ if !cd_ratios_merged.is_empty() {
514
+ trace!(
515
+ "We detected language {:?} using {}",
516
+ cd_ratios_merged,
517
+ encoding_iana
518
+ );
519
+ }
520
+
521
+ // process results
522
+ results.append(CharsetMatch::new(
523
+ bytes,
524
+ encoding_iana,
525
+ mean_mess_ratio,
526
+ bom_or_sig_available,
527
+ &cd_ratios_merged,
528
+ decoded_payload.as_deref(),
529
+ ));
530
+
531
+ if (mean_mess_ratio < 0.1 && prioritized_encodings.contains(&encoding_iana))
532
+ || encoding_iana == sig_encoding.clone().unwrap_or_default()
533
+ {
534
+ debug!(
535
+ "Encoding detection: {} is most likely the one.",
536
+ encoding_iana
537
+ );
538
+ return CharsetMatches::from_single(
539
+ results.get_by_encoding(encoding_iana).unwrap().clone(),
540
+ );
541
+ }
542
+ }
543
+
544
+ // fallbacks
545
+ if results.is_empty() {
546
+ let fb = match (&fallback_specified, &fallback_u8, &fallback_ascii) {
547
+ (Some(specified), _, _) => Some(specified),
548
+ (None, Some(u8_fallback), None) => Some(u8_fallback),
549
+ (None, Some(u8_fallback), Some(ascii))
550
+ if u8_fallback.decoded_payload() != ascii.decoded_payload() =>
551
+ {
552
+ Some(u8_fallback)
553
+ }
554
+ (None, _, Some(ascii)) => Some(ascii),
555
+ _ => None,
556
+ };
557
+ if let Some(fb_to_pass) = fb {
558
+ debug!(
559
+ "Encoding detection: will be used as a fallback match {}",
560
+ fb_to_pass.encoding()
561
+ );
562
+ results.append(fb_to_pass.clone());
563
+ };
564
+ }
565
+
566
+ // final logger information
567
+ if results.is_empty() {
568
+ debug!("Encoding detection: Unable to determine any suitable charset.");
569
+ } else {
570
+ debug!(
571
+ "Encoding detection: Found {} as plausible (best-candidate) for content. \
572
+ With {} alternatives.",
573
+ results.get_best().unwrap().encoding(),
574
+ results.len() - 1,
575
+ );
576
+ }
577
+ results
578
+ }
579
+
580
+ // Same thing than the function from_bytes but with one extra step.
581
+ // Opening and reading given file path in binary mode.
582
+ // Can return Error.
583
+ pub fn from_path(
584
+ path: &Path,
585
+ settings: Option<NormalizerSettings>,
586
+ ) -> Result<CharsetMatches, String> {
587
+ // read file
588
+ let mut file = File::open(path).map_err(|e| format!("Error opening file: {e}"))?;
589
+ let file_size = file.metadata().map(|m| m.len()).unwrap_or_default();
590
+
591
+ let mut buffer = Vec::with_capacity(file_size as usize);
592
+ file.read_to_end(&mut buffer)
593
+ .map_err(|e| format!("Error reading from file: {e}"))?;
594
+
595
+ // calculate
596
+ Ok(from_bytes(&buffer, settings))
597
+ }
Dataset_Construction/projects/charset-normalizer/rust/src/md.rs ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use cached::proc_macro::cached;
2
+ use log::{log_enabled, trace};
3
+ use ordered_float::OrderedFloat;
4
+
5
+ pub(crate) mod plugins;
6
+ pub(crate) mod structs;
7
+
8
+ use plugins::{
9
+ ArchaicUpperLowerPlugin, CjkInvalidStopPlugin, MessDetectorPlugin, SuperWeirdWordPlugin,
10
+ SuspiciousDuplicateAccentPlugin, SuspiciousRangePlugin, TooManyAccentuatedPlugin,
11
+ TooManySymbolOrPunctuationPlugin, UnprintablePlugin,
12
+ };
13
+ use structs::MessDetectorChar;
14
+
15
+ //
16
+ // Mess detection module
17
+ //
18
+
19
+ // Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
20
+ #[cached(size = 2048)]
21
+ pub(crate) fn mess_ratio(
22
+ decoded_sequence: String,
23
+ maximum_threshold: Option<OrderedFloat<f32>>,
24
+ ) -> f32 {
25
+ let maximum_threshold = f32::from(maximum_threshold.unwrap_or(OrderedFloat(0.2)));
26
+ let mut detectors: Vec<Box<dyn MessDetectorPlugin>> = vec![
27
+ Box::<TooManySymbolOrPunctuationPlugin>::default(),
28
+ Box::<TooManyAccentuatedPlugin>::default(),
29
+ Box::<UnprintablePlugin>::default(),
30
+ Box::<SuspiciousRangePlugin>::default(),
31
+ Box::<SuspiciousDuplicateAccentPlugin>::default(),
32
+ Box::<SuperWeirdWordPlugin>::default(),
33
+ Box::<CjkInvalidStopPlugin>::default(),
34
+ Box::<ArchaicUpperLowerPlugin>::default(),
35
+ ];
36
+
37
+ let mut mean_mess_ratio: Option<f32> = None;
38
+ let early_calc_period: usize = match decoded_sequence.chars().count() {
39
+ ..=510 => 32,
40
+ 511..=1023 => 64,
41
+ _ => 128,
42
+ };
43
+ // Traverse through chars and detectors
44
+ for (index, ch) in decoded_sequence
45
+ .chars()
46
+ .chain(std::iter::once('\n'))
47
+ .enumerate()
48
+ {
49
+ let mess_char = MessDetectorChar::new(ch);
50
+ detectors
51
+ .iter_mut()
52
+ .filter(|detector| detector.eligible(&mess_char))
53
+ .for_each(|detector| detector.feed(&mess_char));
54
+
55
+ if index % early_calc_period == early_calc_period - 1 {
56
+ let early_mess_ratio: f32 = detectors.iter().map(|x| x.ratio()).sum();
57
+ if early_mess_ratio >= maximum_threshold {
58
+ mean_mess_ratio = Some(early_mess_ratio);
59
+ break;
60
+ }
61
+ }
62
+ }
63
+ let return_ratio = mean_mess_ratio.unwrap_or(detectors.iter().map(|x| x.ratio()).sum());
64
+
65
+ if log_enabled!(log::Level::Trace) {
66
+ trace!(
67
+ "Mess-detector extended-analysis start: early_calc_period={}, mean_mess_ratio={}, maximum_threshold={} \
68
+ {}",
69
+ early_calc_period,
70
+ return_ratio,
71
+ maximum_threshold,
72
+ detectors
73
+ .iter()
74
+ .filter(|d| d.ratio() > 0.0)
75
+ .map(|d| format!("{} produces ratio: {}", d.name(), d.ratio()))
76
+ .collect::<Vec<String>>()
77
+ .join("===")
78
+ );
79
+ }
80
+
81
+ return_ratio
82
+ }
Dataset_Construction/projects/charset-normalizer/rust/src/md/plugins.rs ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use crate::{
2
+ md::structs::{MessDetectorChar, MessDetectorCharFlags},
3
+ utils::{is_suspiciously_successive_range, remove_accent},
4
+ };
5
+
6
+ // Base abstract trait used for mess detection plugins.
7
+ // All detectors MUST extend and implement given methods.
8
+ pub(super) trait MessDetectorPlugin {
9
+ // Name of plugin
10
+ fn name(&self) -> &str {
11
+ std::any::type_name::<Self>().split("::").last().unwrap()
12
+ }
13
+
14
+ // Determine if given character should be fed in
15
+ fn eligible(&self, character: &MessDetectorChar) -> bool;
16
+
17
+ // The main routine to be executed upon character.
18
+ // Insert the logic in witch the text would be considered chaotic.
19
+ fn feed(&mut self, character: &MessDetectorChar);
20
+
21
+ // Compute the chaos ratio based on what your feed() has seen.
22
+ // Must NOT be lower than 0.; No restriction gt 0.
23
+ fn ratio(&self) -> f32;
24
+ }
25
+
26
+ //
27
+ // TooManySymbolOrPunctuationPlugin implementation
28
+ //
29
+ #[derive(Default)]
30
+ pub(super) struct TooManySymbolOrPunctuationPlugin {
31
+ punctuation_count: u64,
32
+ symbol_count: u64,
33
+ character_count: u64,
34
+ last_printable_char: Option<MessDetectorChar>,
35
+ }
36
+
37
+ impl MessDetectorPlugin for TooManySymbolOrPunctuationPlugin {
38
+ fn eligible(&self, character: &MessDetectorChar) -> bool {
39
+ !character.is(MessDetectorCharFlags::UNPRINTABLE)
40
+ }
41
+ fn feed(&mut self, character: &MessDetectorChar) {
42
+ self.character_count += 1;
43
+ if (self.last_printable_char.is_none() || *character != self.last_printable_char.unwrap())
44
+ && !character.is(MessDetectorCharFlags::COMMON_SAFE)
45
+ {
46
+ if character.is(MessDetectorCharFlags::PUNCTUATION) {
47
+ self.punctuation_count += 1;
48
+ } else if !character.is(MessDetectorCharFlags::NUMERIC)
49
+ && character.is(MessDetectorCharFlags::SYMBOL)
50
+ && !character.is(MessDetectorCharFlags::EMOTICON)
51
+ {
52
+ self.symbol_count += 2;
53
+ }
54
+ }
55
+ self.last_printable_char = Some(*character);
56
+ }
57
+ fn ratio(&self) -> f32 {
58
+ if self.character_count == 0 {
59
+ return 0.0;
60
+ }
61
+ let ratio_of_punctuation =
62
+ (self.punctuation_count + self.symbol_count) as f32 / (self.character_count as f32);
63
+ if ratio_of_punctuation >= 0.3 {
64
+ ratio_of_punctuation
65
+ } else {
66
+ 0.0
67
+ }
68
+ }
69
+ }
70
+
71
+ //
72
+ // TooManyAccentuatedPlugin implementation
73
+ //
74
+
75
+ #[derive(Default)]
76
+ pub(super) struct TooManyAccentuatedPlugin {
77
+ character_count: u64,
78
+ accentuated_count: u64,
79
+ }
80
+
81
+ impl MessDetectorPlugin for TooManyAccentuatedPlugin {
82
+ fn eligible(&self, character: &MessDetectorChar) -> bool {
83
+ character.is(MessDetectorCharFlags::ALPHABETIC)
84
+ }
85
+ fn feed(&mut self, character: &MessDetectorChar) {
86
+ self.character_count += 1;
87
+ if character.is(MessDetectorCharFlags::ACCENTUATED) {
88
+ self.accentuated_count += 1;
89
+ }
90
+ }
91
+ fn ratio(&self) -> f32 {
92
+ (self.character_count >= 8)
93
+ .then_some(self.accentuated_count as f32 / self.character_count as f32)
94
+ .filter(|&ratio| ratio >= 0.35)
95
+ .unwrap_or(0.0)
96
+ }
97
+ }
98
+
99
+ //
100
+ // UnprintablePlugin implementation
101
+ //
102
+
103
+ #[derive(Default)]
104
+ pub(super) struct UnprintablePlugin {
105
+ character_count: u64,
106
+ unprintable_count: u64,
107
+ }
108
+
109
+ impl MessDetectorPlugin for UnprintablePlugin {
110
+ fn eligible(&self, _character: &MessDetectorChar) -> bool {
111
+ true
112
+ }
113
+ fn feed(&mut self, character: &MessDetectorChar) {
114
+ if character.is(MessDetectorCharFlags::UNPRINTABLE) {
115
+ self.unprintable_count += 1;
116
+ }
117
+ self.character_count += 1;
118
+ }
119
+ fn ratio(&self) -> f32 {
120
+ if self.character_count == 0 {
121
+ return 0.0;
122
+ }
123
+ (self.unprintable_count as f32 * 8.0) / self.character_count as f32
124
+ }
125
+ }
126
+
127
+ //
128
+ // SuspiciousDuplicateAccentPlugin implementation
129
+ //
130
+ #[derive(Default)]
131
+ pub(super) struct SuspiciousDuplicateAccentPlugin {
132
+ character_count: u64,
133
+ successive_count: u64,
134
+ last_latin_character: Option<MessDetectorChar>,
135
+ }
136
+
137
+ impl MessDetectorPlugin for SuspiciousDuplicateAccentPlugin {
138
+ fn eligible(&self, character: &MessDetectorChar) -> bool {
139
+ character.is(MessDetectorCharFlags::ALPHABETIC)
140
+ && character.is(MessDetectorCharFlags::LATIN)
141
+ }
142
+ fn feed(&mut self, character: &MessDetectorChar) {
143
+ self.character_count += 1;
144
+ if self.last_latin_character.is_some()
145
+ && character.is(MessDetectorCharFlags::ACCENTUATED)
146
+ && self
147
+ .last_latin_character
148
+ .unwrap()
149
+ .is(MessDetectorCharFlags::ACCENTUATED)
150
+ {
151
+ if character.is(MessDetectorCharFlags::UPPERCASE)
152
+ && self
153
+ .last_latin_character
154
+ .unwrap()
155
+ .is(MessDetectorCharFlags::UPPERCASE)
156
+ {
157
+ self.successive_count += 1;
158
+ }
159
+
160
+ // Worse if its the same char duplicated with different accent.
161
+ if remove_accent(character.character)
162
+ == remove_accent(self.last_latin_character.unwrap().character)
163
+ {
164
+ self.successive_count += 1;
165
+ }
166
+ }
167
+ self.last_latin_character = Some(*character);
168
+ }
169
+ fn ratio(&self) -> f32 {
170
+ if self.character_count == 0 {
171
+ return 0.0;
172
+ }
173
+ (self.successive_count as f32 * 2.0) / self.character_count as f32
174
+ }
175
+ }
176
+
177
+ //
178
+ // SuspiciousRangePlugin implementation
179
+ //
180
+ #[derive(Default)]
181
+ pub(super) struct SuspiciousRangePlugin {
182
+ character_count: u64,
183
+ suspicious_successive_range_count: u64,
184
+ last_printable_char: Option<MessDetectorChar>,
185
+ }
186
+
187
+ impl MessDetectorPlugin for SuspiciousRangePlugin {
188
+ fn eligible(&self, character: &MessDetectorChar) -> bool {
189
+ !character.is(MessDetectorCharFlags::UNPRINTABLE)
190
+ }
191
+ fn feed(&mut self, character: &MessDetectorChar) {
192
+ self.character_count += 1;
193
+
194
+ if character.is(MessDetectorCharFlags::WHITESPACE)
195
+ || character.is(MessDetectorCharFlags::PUNCTUATION)
196
+ || character.is(MessDetectorCharFlags::COMMON_SAFE)
197
+ {
198
+ self.last_printable_char = None;
199
+ return;
200
+ }
201
+
202
+ if self.last_printable_char.is_none() {
203
+ self.last_printable_char = Some(*character);
204
+ return;
205
+ }
206
+
207
+ if is_suspiciously_successive_range(
208
+ self.last_printable_char.unwrap().unicode_range,
209
+ character.unicode_range,
210
+ ) {
211
+ self.suspicious_successive_range_count += 1;
212
+ }
213
+
214
+ self.last_printable_char = Some(*character);
215
+ }
216
+ fn ratio(&self) -> f32 {
217
+ (self.character_count > 0)
218
+ .then_some(
219
+ ((self.suspicious_successive_range_count as f32) * 2.0)
220
+ / self.character_count as f32,
221
+ )
222
+ .filter(|&ratio| ratio >= 0.1)
223
+ .unwrap_or(0.0)
224
+ }
225
+ }
226
+
227
+ //
228
+ // SuperWeirdWordPlugin implementation
229
+ //
230
+
231
+ #[derive(Default)]
232
+ pub(super) struct SuperWeirdWordPlugin {
233
+ character_count: u64,
234
+ word_count: u64,
235
+ bad_word_count: u64,
236
+ foreign_long_count: u64,
237
+ is_current_word_bad: bool,
238
+ foreign_long_watch: bool,
239
+ bad_character_count: u64,
240
+ buffer_accent_count: u64,
241
+ buffer: Vec<MessDetectorChar>,
242
+ }
243
+
244
+ impl MessDetectorPlugin for SuperWeirdWordPlugin {
245
+ fn eligible(&self, _character: &MessDetectorChar) -> bool {
246
+ true
247
+ }
248
+ fn feed(&mut self, character: &MessDetectorChar) {
249
+ if character.is(MessDetectorCharFlags::ASCII_ALPHABETIC) {
250
+ self.buffer.push(*character);
251
+ if character.is(MessDetectorCharFlags::ACCENTUATED) {
252
+ self.buffer_accent_count += 1;
253
+ }
254
+ self.foreign_long_watch |= (!character.is(MessDetectorCharFlags::LATIN)
255
+ || character.is(MessDetectorCharFlags::ACCENTUATED))
256
+ && !character.is(MessDetectorCharFlags::CJK)
257
+ && !character.is(MessDetectorCharFlags::HANGUL)
258
+ && !character.is(MessDetectorCharFlags::KATAKANA)
259
+ && !character.is(MessDetectorCharFlags::HIRAGANA)
260
+ && !character.is(MessDetectorCharFlags::THAI);
261
+ return;
262
+ }
263
+ if self.buffer.is_empty() {
264
+ return;
265
+ }
266
+
267
+ if character.is(MessDetectorCharFlags::WHITESPACE)
268
+ || character.is(MessDetectorCharFlags::PUNCTUATION)
269
+ || character.is(MessDetectorCharFlags::SEPARATOR)
270
+ {
271
+ self.word_count += 1;
272
+ let buffer_length = self.buffer.len();
273
+ self.character_count += buffer_length as u64;
274
+
275
+ if buffer_length >= 4 {
276
+ if (self.buffer_accent_count as f32 / buffer_length as f32) > 0.34 {
277
+ self.is_current_word_bad = true;
278
+ }
279
+
280
+ // Word/Buffer ending with an upper case accentuated letter are so rare,
281
+ // that we will consider them all as suspicious. Same weight as foreign_long suspicious.
282
+ let last_char = self.buffer.last().unwrap();
283
+ if last_char.is(MessDetectorCharFlags::ACCENTUATED)
284
+ && last_char.is(MessDetectorCharFlags::UPPERCASE)
285
+ {
286
+ self.foreign_long_count += 1;
287
+ self.is_current_word_bad = true;
288
+ }
289
+ }
290
+ if buffer_length >= 24 && self.foreign_long_watch {
291
+ let uppercase_count = self
292
+ .buffer
293
+ .iter()
294
+ .filter(|&c| c.is(MessDetectorCharFlags::UPPERCASE))
295
+ .count();
296
+ let mut probable_camel_cased: bool = false;
297
+
298
+ if uppercase_count > 0 && (uppercase_count as f32 / buffer_length as f32) <= 0.3 {
299
+ probable_camel_cased = true;
300
+ }
301
+
302
+ if !probable_camel_cased {
303
+ self.foreign_long_count += 1;
304
+ self.is_current_word_bad = true;
305
+ }
306
+ }
307
+
308
+ if self.is_current_word_bad {
309
+ self.bad_word_count += 1;
310
+ self.bad_character_count += self.buffer.len() as u64;
311
+ self.is_current_word_bad = false;
312
+ }
313
+
314
+ self.foreign_long_watch = false;
315
+ self.buffer.clear();
316
+ self.buffer_accent_count = 0;
317
+ } else if !character.is(MessDetectorCharFlags::WEIRD_SAFE)
318
+ && !character.is(MessDetectorCharFlags::ASCII_DIGIT)
319
+ && character.is(MessDetectorCharFlags::SYMBOL)
320
+ {
321
+ self.is_current_word_bad = true;
322
+ self.buffer.push(*character);
323
+ }
324
+ }
325
+ fn ratio(&self) -> f32 {
326
+ if self.word_count <= 10 && self.foreign_long_count == 0 {
327
+ return 0.0;
328
+ }
329
+ self.bad_character_count as f32 / self.character_count as f32
330
+ }
331
+ }
332
+
333
+ //
334
+ // CjkInvalidStopPlugin implementation
335
+ //
336
+ // GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
337
+ // can be easily detected. Searching for the overuse of '丅' and '丄'.
338
+ #[derive(Default)]
339
+ pub(super) struct CjkInvalidStopPlugin {
340
+ wrong_stop_count: u64,
341
+ cjk_character_count: u64,
342
+ }
343
+
344
+ impl MessDetectorPlugin for CjkInvalidStopPlugin {
345
+ fn eligible(&self, _: &MessDetectorChar) -> bool {
346
+ true
347
+ }
348
+ fn feed(&mut self, character: &MessDetectorChar) {
349
+ if "丅丄".contains(character.character) {
350
+ self.wrong_stop_count += 1;
351
+ return;
352
+ }
353
+ if character.is(MessDetectorCharFlags::CJK) {
354
+ self.cjk_character_count += 1;
355
+ }
356
+ }
357
+ fn ratio(&self) -> f32 {
358
+ if self.cjk_character_count < 16 {
359
+ return 0.0;
360
+ }
361
+ self.wrong_stop_count as f32 / self.cjk_character_count as f32
362
+ }
363
+ }
364
+
365
+ //
366
+ // ArchaicUpperLowerPlugin implementation
367
+ //
368
+
369
+ pub(super) struct ArchaicUpperLowerPlugin {
370
+ buf: bool,
371
+ current_ascii_only: bool,
372
+ character_count_since_last_sep: u64,
373
+ successive_upper_lower_count: u64,
374
+ successive_upper_lower_count_final: u64,
375
+ character_count: u64,
376
+ last_alpha_seen: Option<MessDetectorChar>,
377
+ }
378
+
379
+ impl Default for ArchaicUpperLowerPlugin {
380
+ fn default() -> Self {
381
+ ArchaicUpperLowerPlugin {
382
+ buf: false,
383
+ current_ascii_only: true,
384
+ character_count_since_last_sep: 0,
385
+ successive_upper_lower_count: 0,
386
+ successive_upper_lower_count_final: 0,
387
+ character_count: 0,
388
+ last_alpha_seen: None,
389
+ }
390
+ }
391
+ }
392
+
393
+ impl MessDetectorPlugin for ArchaicUpperLowerPlugin {
394
+ fn eligible(&self, _: &MessDetectorChar) -> bool {
395
+ true
396
+ }
397
+ fn feed(&mut self, character: &MessDetectorChar) {
398
+ if !(character.is(MessDetectorCharFlags::ALPHABETIC)
399
+ && character.is(MessDetectorCharFlags::CASE_VARIABLE))
400
+ && self.character_count_since_last_sep > 0
401
+ {
402
+ if self.character_count_since_last_sep <= 64
403
+ && !character.is(MessDetectorCharFlags::ASCII_DIGIT)
404
+ && !self.current_ascii_only
405
+ {
406
+ self.successive_upper_lower_count_final += self.successive_upper_lower_count;
407
+ }
408
+
409
+ self.successive_upper_lower_count = 0;
410
+ self.character_count_since_last_sep = 0;
411
+ self.last_alpha_seen = None;
412
+ self.buf = false;
413
+ self.character_count += 1;
414
+ self.current_ascii_only = true;
415
+
416
+ return;
417
+ }
418
+
419
+ self.current_ascii_only &= character.is(MessDetectorCharFlags::ASCII);
420
+
421
+ if let Some(tmp_last_alpha) = self.last_alpha_seen {
422
+ if (character.is(MessDetectorCharFlags::UPPERCASE)
423
+ && tmp_last_alpha.is(MessDetectorCharFlags::LOWERCASE))
424
+ || (character.is(MessDetectorCharFlags::LOWERCASE)
425
+ && tmp_last_alpha.is(MessDetectorCharFlags::UPPERCASE))
426
+ {
427
+ if self.buf {
428
+ self.successive_upper_lower_count += 2;
429
+ self.buf = false;
430
+ } else {
431
+ self.buf = true;
432
+ }
433
+ } else {
434
+ self.buf = false;
435
+ }
436
+ }
437
+
438
+ self.character_count += 1;
439
+ self.character_count_since_last_sep += 1;
440
+ self.last_alpha_seen = Some(*character);
441
+ }
442
+ fn ratio(&self) -> f32 {
443
+ if self.character_count == 0 {
444
+ return 0.0;
445
+ }
446
+ self.successive_upper_lower_count_final as f32 / self.character_count as f32
447
+ }
448
+ }
Dataset_Construction/projects/charset-normalizer/rust/src/md/structs.rs ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use bitflags::bitflags;
2
+ use cached::proc_macro::cached;
3
+ use cached::UnboundCache;
4
+ use icu_properties::{maps, sets, GeneralCategory, GeneralCategoryGroup, Script};
5
+
6
+ use crate::consts::{COMMON_SAFE_ASCII_CHARACTERS, UTF8_MAXIMAL_ALLOCATION};
7
+ use crate::utils::{in_range, is_accentuated, unicode_range};
8
+
9
+ // Mess Plugin Char representation
10
+ // used to collect additional information about char
11
+ // and eliminate repeated calculations
12
+ #[derive(Copy, Clone, PartialEq)]
13
+ pub struct MessDetectorCharFlags(u32);
14
+
15
+ bitflags! {
16
+ impl MessDetectorCharFlags: u32 {
17
+ const WHITESPACE = 0b0000_0000_0000_0000_0000_0000_0000_0001;
18
+ const UNPRINTABLE = 0b0000_0000_0000_0000_0000_0000_0000_0010;
19
+ const SYMBOL = 0b0000_0000_0000_0000_0000_0000_0000_0100;
20
+ const EMOTICON = 0b0000_0000_0000_0000_0000_0000_0000_1000;
21
+ const COMMON_SAFE = 0b0000_0000_0000_0000_0000_0000_0001_0000;
22
+ const WEIRD_SAFE = 0b0000_0000_0000_0000_0000_0000_0010_0000;
23
+ const PUNCTUATION = 0b0000_0000_0000_0000_0000_0000_0100_0000;
24
+ const SEPARATOR = 0b0000_0000_0000_0000_0000_0000_1000_0000;
25
+ const ASCII = 0b0000_0000_0000_0000_0000_0001_0000_0000;
26
+ const ASCII_ALPHABETIC = 0b0000_0000_0000_0000_0000_0010_0000_0000;
27
+ const ASCII_GRAPHIC = 0b0000_0000_0000_0000_0000_0100_0000_0000;
28
+ const ASCII_DIGIT = 0b0000_0000_0000_0000_0000_1000_0000_0000;
29
+ const LATIN = 0b0000_0000_0000_0000_0001_0000_0000_0000;
30
+ const ALPHABETIC = 0b0000_0000_0000_0000_0010_0000_0000_0000;
31
+ const ACCENTUATED = 0b0000_0000_0000_0000_0100_0000_0000_0000;
32
+ const CJK = 0b0000_0000_0000_0000_1000_0000_0000_0000;
33
+ const HANGUL = 0b0000_0000_0000_0001_0000_0000_0000_0000;
34
+ const KATAKANA = 0b0000_0000_0000_0010_0000_0000_0000_0000;
35
+ const HIRAGANA = 0b0000_0000_0000_0100_0000_0000_0000_0000;
36
+ const THAI = 0b0000_0000_0000_1000_0000_0000_0000_0000;
37
+ const CASE_VARIABLE = 0b0000_0000_0001_0000_0000_0000_0000_0000;
38
+ const LOWERCASE = 0b0000_0000_0010_0000_0000_0000_0000_0000;
39
+ const UPPERCASE = 0b0000_0000_0100_0000_0000_0000_0000_0000;
40
+ const NUMERIC = 0b0000_0000_1000_0000_0000_0000_0000_0000;
41
+ }
42
+ }
43
+
44
+ #[derive(Copy, Clone)]
45
+ pub(crate) struct MessDetectorChar {
46
+ pub character: char,
47
+ pub flags: MessDetectorCharFlags,
48
+ pub unicode_range: Option<&'static str>,
49
+ }
50
+
51
+ impl PartialEq for MessDetectorChar {
52
+ fn eq(&self, other: &Self) -> bool {
53
+ self.character == other.character
54
+ }
55
+ }
56
+
57
+ impl MessDetectorChar {
58
+ pub fn new(character: char) -> Self {
59
+ new_mess_detector_character(character)
60
+ }
61
+
62
+ pub fn is(&self, flag: MessDetectorCharFlags) -> bool {
63
+ self.flags.contains(flag)
64
+ }
65
+ }
66
+
67
+ #[cached(
68
+ type = "UnboundCache<char, MessDetectorChar>",
69
+ create = "{ UnboundCache::with_capacity(UTF8_MAXIMAL_ALLOCATION) }",
70
+ convert = r#"{ character }"#
71
+ )]
72
+ fn new_mess_detector_character(character: char) -> MessDetectorChar {
73
+ let mut flags = MessDetectorCharFlags::empty();
74
+ // unicode information
75
+ let gc = maps::general_category().get(character);
76
+
77
+ // PLEASE NOTE! In case of idiomatic refactoring
78
+ // take in account performance. Sometimes match could be used but it
79
+ // will require calculate all conditions and can decrease performance
80
+ // in comparison to usual if then else
81
+
82
+ // ascii probing
83
+ if character.is_ascii() {
84
+ flags.insert(MessDetectorCharFlags::ASCII);
85
+ if character.is_ascii_graphic() {
86
+ flags.insert(MessDetectorCharFlags::ASCII_GRAPHIC);
87
+ if character.is_ascii_alphabetic() {
88
+ flags.insert(MessDetectorCharFlags::ASCII_ALPHABETIC);
89
+ } else if character.is_ascii_digit() {
90
+ flags.insert(MessDetectorCharFlags::ASCII_DIGIT);
91
+ }
92
+ }
93
+ }
94
+
95
+ let range = unicode_range(character);
96
+
97
+ // whitespace
98
+ if character.is_whitespace() {
99
+ flags.insert(MessDetectorCharFlags::WHITESPACE);
100
+ flags.insert(MessDetectorCharFlags::SEPARATOR);
101
+ } else {
102
+ // safe symbols (non-whitespace)
103
+ if COMMON_SAFE_ASCII_CHARACTERS.contains(character) {
104
+ flags.insert(MessDetectorCharFlags::COMMON_SAFE);
105
+ }
106
+ if "<>-=~|_".contains(character) {
107
+ flags.insert(MessDetectorCharFlags::WEIRD_SAFE);
108
+ }
109
+
110
+ // numeric
111
+ if flags.contains(MessDetectorCharFlags::ASCII_DIGIT) || character.is_numeric() {
112
+ flags.insert(MessDetectorCharFlags::NUMERIC);
113
+ } else if flags.contains(MessDetectorCharFlags::ASCII_ALPHABETIC)
114
+ || character.is_alphabetic()
115
+ {
116
+ // alphabetic
117
+ flags.insert(MessDetectorCharFlags::ALPHABETIC);
118
+ if character.is_lowercase() {
119
+ flags.insert(MessDetectorCharFlags::LOWERCASE);
120
+ flags.insert(MessDetectorCharFlags::CASE_VARIABLE);
121
+ } else if character.is_uppercase() {
122
+ flags.insert(MessDetectorCharFlags::UPPERCASE);
123
+ flags.insert(MessDetectorCharFlags::CASE_VARIABLE);
124
+ }
125
+ } else if !flags.contains(MessDetectorCharFlags::ASCII_GRAPHIC)
126
+ && !['\x1A', '\u{FEFF}'].contains(&character)
127
+ && GeneralCategoryGroup::Control.contains(gc)
128
+ {
129
+ flags.insert(MessDetectorCharFlags::UNPRINTABLE);
130
+ }
131
+
132
+ // emoticon
133
+ if sets::emoji_component().contains(character)
134
+ || sets::emoji_modifier().contains(character)
135
+ || sets::emoji_modifier_base().contains(character)
136
+ || sets::emoji_presentation().contains(character)
137
+ // || sets::emoji().contains(character) //tests::md::test_mess_ratio fails
138
+ {
139
+ flags.insert(MessDetectorCharFlags::EMOTICON);
140
+ }
141
+
142
+ // separator
143
+ if ['|', '+', '<', '>'].contains(&character)
144
+ || GeneralCategoryGroup::Separator.contains(gc)
145
+ || matches!(
146
+ gc,
147
+ GeneralCategory::OtherPunctuation
148
+ | GeneralCategory::DashPunctuation
149
+ | GeneralCategory::ConnectorPunctuation
150
+ )
151
+ {
152
+ flags.insert(MessDetectorCharFlags::SEPARATOR);
153
+ }
154
+ }
155
+
156
+ // punctuation
157
+ if GeneralCategoryGroup::Punctuation.contains(gc) {
158
+ flags.insert(MessDetectorCharFlags::PUNCTUATION);
159
+ }
160
+
161
+ // symbol
162
+ if GeneralCategoryGroup::Number.contains(gc)
163
+ || GeneralCategoryGroup::Symbol.contains(gc)
164
+ || in_range(range, &["Forms"])
165
+ {
166
+ flags.insert(MessDetectorCharFlags::SYMBOL);
167
+ }
168
+
169
+ match maps::script().get(character) {
170
+ Script::Latin => flags.insert(MessDetectorCharFlags::LATIN), // latin
171
+ Script::Han => flags.insert(MessDetectorCharFlags::CJK), // han implies cjk
172
+ Script::Hangul => flags.insert(MessDetectorCharFlags::HANGUL),
173
+ Script::Katakana => flags.insert(MessDetectorCharFlags::KATAKANA),
174
+ Script::Hiragana => flags.insert(MessDetectorCharFlags::HIRAGANA),
175
+ Script::Thai => flags.insert(MessDetectorCharFlags::THAI),
176
+ _ => {
177
+ // ideographic() includes some characters such as vietnamese that might not be Han
178
+ // but still be part of the expanded CJK(V) ideographs
179
+ // if sets::ideographic().contains(character)
180
+ if sets::unified_ideograph().contains(character) {
181
+ flags.insert(MessDetectorCharFlags::CJK)
182
+ }
183
+ }
184
+ }
185
+
186
+ // accentuated
187
+ if is_accentuated(character) {
188
+ flags.insert(MessDetectorCharFlags::ACCENTUATED);
189
+ }
190
+
191
+ // create new object
192
+ MessDetectorChar {
193
+ character,
194
+ flags,
195
+ unicode_range: range,
196
+ }
197
+ }
Dataset_Construction/projects/charset-normalizer/rust/src/normalizer.rs ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use charset_normalizer_rs::entity::{CLINormalizerArgs, CLINormalizerResult, NormalizerSettings};
2
+ use charset_normalizer_rs::from_path;
3
+ use clap::Parser;
4
+ use dialoguer::Confirm;
5
+ use env_logger::Env;
6
+ use ordered_float::OrderedFloat;
7
+ use std::fs::File;
8
+ use std::io::Write;
9
+ use std::{fs, process};
10
+
11
+ fn normalizer(args: &CLINormalizerArgs) -> Result<i32, String> {
12
+ match (args.replace, args.normalize, args.force, args.threshold) {
13
+ (true, false, _, _) => return Err("Use --replace in addition to --normalize only.".into()),
14
+ (false, _, true, _) => return Err("Use --force in addition to --replace only.".into()),
15
+ (_, _, _, threshold) if !(0.0..=1.0).contains(&threshold) => {
16
+ return Err("--threshold VALUE should be between 0.0 and 1.0.".into())
17
+ }
18
+ _ => {}
19
+ }
20
+
21
+ let mut results: Vec<CLINormalizerResult> = vec![];
22
+ let settings = NormalizerSettings {
23
+ threshold: OrderedFloat(args.threshold),
24
+ ..Default::default()
25
+ };
26
+
27
+ // go through the files
28
+ for path in &args.files {
29
+ let full_path = &mut fs::canonicalize(path).map_err(|err| err.to_string())?;
30
+ let matches = from_path(full_path, Some(settings.clone()))?;
31
+ match matches.get_best() {
32
+ None => {
33
+ results.push(CLINormalizerResult {
34
+ path: full_path.clone(),
35
+ language: "Unknown".to_string(),
36
+ chaos: format!("{:.1}", 1.0),
37
+ coherence: format!("{:.1}", 0.0),
38
+ is_preferred: true,
39
+ ..Default::default()
40
+ });
41
+ eprintln!(
42
+ "Unable to identify originating encoding for {:?}. {}",
43
+ full_path,
44
+ if args.threshold < 1.0 {
45
+ "Maybe try increasing maximum amount of chaos."
46
+ } else {
47
+ ""
48
+ }
49
+ );
50
+ }
51
+ Some(best_guess) => {
52
+ // add main result & alternative results
53
+ for m in matches.iter() {
54
+ let normalize_result = CLINormalizerResult {
55
+ path: full_path.clone(),
56
+ encoding: Some(m.encoding().to_string()),
57
+ encoding_aliases: m
58
+ .encoding_aliases()
59
+ .iter()
60
+ .map(|s| (*s).to_string())
61
+ .collect(),
62
+ alternative_encodings: m
63
+ .suitable_encodings()
64
+ .iter()
65
+ .filter(|&e| e != m.encoding())
66
+ .cloned()
67
+ .collect(),
68
+ language: format!("{}", m.most_probably_language()),
69
+ alphabets: m.unicode_ranges(),
70
+ has_sig_or_bom: m.bom(),
71
+ chaos: format!("{:.1}", m.chaos_percents()),
72
+ coherence: format!("{:.1}", m.coherence_percents()),
73
+ unicode_path: None,
74
+ is_preferred: true,
75
+ };
76
+ if m == best_guess {
77
+ results.insert(0, normalize_result);
78
+ } else if args.alternatives {
79
+ results.push(normalize_result);
80
+ } else {
81
+ break;
82
+ }
83
+ }
84
+
85
+ // normalizing if need
86
+ if args.normalize {
87
+ if best_guess.encoding().starts_with("utf") {
88
+ eprintln!(
89
+ "{:?} file does not need to be normalized, as it already came from unicode.",
90
+ full_path,
91
+ );
92
+ continue;
93
+ }
94
+
95
+ // force or confirm of replacement
96
+ if !args.replace {
97
+ let filename = full_path.file_name().unwrap().to_str().unwrap();
98
+ let filename = match filename.rsplit_once('.') {
99
+ None => format!("{}.{}", filename, best_guess.encoding()),
100
+ Some(split) => {
101
+ format!("{}.{}.{}", split.0, best_guess.encoding(), split.1)
102
+ }
103
+ };
104
+ full_path.set_file_name(filename);
105
+ } else if !args.force
106
+ && !Confirm::new()
107
+ .with_prompt(format!(
108
+ "Are you sure to normalize {:?} by replacing it?",
109
+ full_path,
110
+ ))
111
+ .interact()
112
+ .unwrap_or(false)
113
+ {
114
+ continue;
115
+ }
116
+
117
+ // save path to result
118
+ results[0].unicode_path = Some(full_path.clone());
119
+
120
+ // replace file contents
121
+ if let Err(err) = File::create(full_path).and_then(|mut file| {
122
+ file.write_all(best_guess.decoded_payload().unwrap().as_bytes())
123
+ }) {
124
+ return Err(err.to_string());
125
+ }
126
+ }
127
+ }
128
+ }
129
+ }
130
+
131
+ // print out results
132
+ if args.minimal {
133
+ for path in &args.files {
134
+ let full_path = fs::canonicalize(path).map_err(|err| err.to_string())?;
135
+ println!(
136
+ "{}",
137
+ results
138
+ .iter()
139
+ .filter(|r| r.path == full_path)
140
+ .map(|r| r.encoding.clone().unwrap_or("undefined".to_string()))
141
+ .collect::<Vec<_>>()
142
+ .join(", ")
143
+ );
144
+ }
145
+ } else {
146
+ println!(
147
+ "{}",
148
+ if results.len() > 1 {
149
+ serde_json::to_string_pretty(&results).unwrap()
150
+ } else {
151
+ serde_json::to_string_pretty(&results[0]).unwrap()
152
+ }
153
+ );
154
+ }
155
+ Ok(0)
156
+ }
157
+
158
+ pub fn main() {
159
+ let args = CLINormalizerArgs::parse();
160
+
161
+ // verbose mode
162
+ if args.verbose {
163
+ env_logger::Builder::from_env(Env::default().default_filter_or("trace")).init();
164
+ }
165
+
166
+ // run normalizer
167
+ match normalizer(&args) {
168
+ Err(e) => panic!("{e}"),
169
+ Ok(exit_code) => process::exit(exit_code),
170
+ }
171
+ }
Dataset_Construction/projects/charset-normalizer/rust/src/performance.rs ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use chardetng::EncodingDetector;
2
+ use charset_normalizer_rs::consts::CHARDET_CORRESPONDENCE;
3
+ use charset_normalizer_rs::entity::{PerformanceArgs, PerformanceResult};
4
+ use charset_normalizer_rs::from_bytes;
5
+ use charset_normalizer_rs::utils::get_large_test_datasets;
6
+ use clap::Parser;
7
+ use encoding::label::encoding_from_whatwg_label;
8
+ use encoding::DecoderTrap;
9
+ use log::trace;
10
+ use std::collections::{BTreeMap, HashMap};
11
+ use std::fs::File;
12
+ use std::io::Read;
13
+ use std::process;
14
+ use std::time::{Duration, Instant};
15
+
16
+ // Check result
17
+ fn check_result(
18
+ correct_encodings: &Vec<String>,
19
+ guessed_encoding: &String,
20
+ buffer: &Vec<u8>,
21
+ ) -> bool {
22
+ // check by encoding name
23
+ if correct_encodings.iter().any(|e| guessed_encoding == e) {
24
+ return true;
25
+ }
26
+
27
+ // if correct encoding wasn't found we will try to decode and compare results
28
+ let whatwg_correct_encoding = correct_encodings
29
+ .first()
30
+ .and_then(|enc| encoding_from_whatwg_label(enc));
31
+ let whatwg_guessed_encoding = encoding_from_whatwg_label(guessed_encoding);
32
+ match (whatwg_correct_encoding, whatwg_guessed_encoding) {
33
+ (Some(correct_encoding), Some(guessed_encoding)) => {
34
+ let correct_decoded = correct_encoding.decode(buffer.as_slice(), DecoderTrap::Strict);
35
+ let guessed_decoded = guessed_encoding.decode(buffer.as_slice(), DecoderTrap::Strict);
36
+ match (correct_decoded, guessed_decoded) {
37
+ (Ok(correct_result), Ok(guessed_result)) => correct_result == guessed_result,
38
+ _ => false,
39
+ }
40
+ }
41
+ _ => false,
42
+ }
43
+ }
44
+
45
+ // Calculate percentile
46
+ fn calc_percentile(results: &Vec<PerformanceResult>, percentile: f64) -> Duration {
47
+ let mut sorted_data: Vec<Duration> = results.iter().map(|r| r.duration).collect();
48
+ sorted_data.sort_unstable();
49
+ let index = ((percentile / 100.0) * sorted_data.len() as f64) as usize;
50
+ sorted_data[index]
51
+ }
52
+
53
+ // Calculate mean duration
54
+ fn calc_stat(results: &Vec<PerformanceResult>) -> (Duration, Duration, f32) {
55
+ let durations: Vec<Duration> = results.iter().map(|r| r.duration).collect();
56
+ if durations.is_empty() {
57
+ // Handle the case where the input vector is empty (avoid division by zero)
58
+ (Duration::new(0, 0), Duration::new(0, 0), 0.0)
59
+ } else {
60
+ // Calculate the total duration by summing all the durations in the vector
61
+ let total_duration: Duration = durations.iter().sum();
62
+
63
+ // Divide the total duration by the number of durations to get the mean
64
+ let num_durations = durations.len() as u32;
65
+
66
+ // Accuracy
67
+ let accuracy =
68
+ 100.0 * results.iter().filter(|r| r.correct).count() as f32 / num_durations as f32;
69
+
70
+ (total_duration, total_duration / num_durations, accuracy)
71
+ }
72
+ }
73
+
74
+ // Performance comparison
75
+ fn performance_compare(args: &PerformanceArgs) -> i32 {
76
+ // read datasets from /src/tests/data/largesets
77
+ let datasets = get_large_test_datasets();
78
+ if datasets.is_err() {
79
+ println!("{}", datasets.unwrap_err());
80
+ process::exit(1);
81
+ }
82
+ let datasets = datasets.unwrap();
83
+ let nof_files = datasets.len();
84
+ println!("Found {} datasets for performance tests", nof_files);
85
+
86
+ // tested functions
87
+ let mut performance_results: HashMap<&str, Vec<PerformanceResult>> = HashMap::new();
88
+
89
+ // we need BTreeMap as we need preserve keys order
90
+ let mut tested_functions: BTreeMap<&str, Box<dyn Fn(&Vec<u8>) -> String>> = BTreeMap::new();
91
+
92
+ /////////////////////////////////////////////////////////////////
93
+ // Tested functions (libraries)
94
+ /////////////////////////////////////////////////////////////////
95
+
96
+ // charset-normalizer-rs
97
+ tested_functions.insert(
98
+ "A) charset-normalizer-rs",
99
+ Box::new(|bytes: &Vec<u8>| {
100
+ if let Some(gb) = from_bytes(bytes, None).get_best() {
101
+ gb.encoding().to_string()
102
+ } else {
103
+ String::from("None")
104
+ }
105
+ }),
106
+ );
107
+
108
+ // chardet
109
+ tested_functions.insert(
110
+ "B) chardet",
111
+ Box::new(|bytes: &Vec<u8>| {
112
+ let detected = &chardet::detect(bytes).0.to_ascii_lowercase();
113
+ let alternative = CHARDET_CORRESPONDENCE.get(&detected.as_str());
114
+ if let Some(r) = encoding_from_whatwg_label(&detected) {
115
+ r.whatwg_name()
116
+ .unwrap_or(alternative.unwrap_or(&r.name()))
117
+ .to_string()
118
+ } else {
119
+ String::from("None")
120
+ }
121
+ }),
122
+ );
123
+
124
+ // chardetng
125
+ tested_functions.insert(
126
+ "C) chardetng",
127
+ Box::new(|bytes: &Vec<u8>| {
128
+ let mut ed = EncodingDetector::new();
129
+ ed.feed(bytes, true);
130
+ let found = ed.guess(None, true).name();
131
+ found.to_ascii_lowercase().to_string()
132
+ }),
133
+ );
134
+
135
+ // start tests
136
+ for (filename, correct_encodings) in &datasets {
137
+ println!("{}", filename);
138
+
139
+ // read file contents to buffer
140
+ let mut file = File::open(filename).expect(&format!("Error opening file {}", filename));
141
+ let mut buffer = Vec::new();
142
+ file.read_to_end(&mut buffer)
143
+ .expect(&format!("Error reading from file {}", filename));
144
+
145
+ // multiply buffer
146
+ buffer = buffer.repeat(args.size_increase as usize);
147
+
148
+ // traverse tested functions
149
+ for (&name, &ref foo) in &tested_functions {
150
+ if !performance_results.contains_key(name) {
151
+ performance_results.insert(name, vec![]);
152
+ }
153
+ let duration = Instant::now();
154
+ let guessed_encoding = foo(&buffer);
155
+ let duration = duration.elapsed();
156
+
157
+ // save result
158
+ performance_results
159
+ .get_mut(name)
160
+ .unwrap()
161
+ .push(PerformanceResult {
162
+ duration,
163
+ correct: check_result(correct_encodings, &guessed_encoding, &buffer),
164
+ });
165
+ println!(" --> {}: {:?}", name, duration,);
166
+
167
+ if !correct_encodings.contains(&guessed_encoding.to_string()) {
168
+ trace!(
169
+ "{} WRONG DETECTION: {} not in {:?}\nSee {}",
170
+ name,
171
+ guessed_encoding,
172
+ correct_encodings,
173
+ filename,
174
+ );
175
+ }
176
+ }
177
+ }
178
+
179
+ // Statistics
180
+ let mut our_accuracy = 0.0;
181
+ let mut our_total_time: Duration = Duration::new(0, 0);
182
+ for (&name, _) in &tested_functions {
183
+ if let Some(results) = performance_results.get(name) {
184
+ let (total_duration, mean_duration, accuracy) = calc_stat(results);
185
+ println!("\n------------------------------");
186
+ println!("--> {} Conclusions", name);
187
+ if name == "A) charset-normalizer-rs" {
188
+ our_accuracy = accuracy;
189
+ our_total_time = total_duration.clone();
190
+ } else {
191
+ // compare speed in %
192
+ println!(
193
+ " --> Faster than charset-normalizer-rs by {:.1} times",
194
+ our_total_time.as_secs_f32() / total_duration.as_secs_f32(),
195
+ );
196
+ }
197
+ println!(" --> Accuracy: {:.1}%", accuracy);
198
+ println!(" --> Total time: {:?}", total_duration);
199
+ println!(" --> Avg time: {:?}", mean_duration);
200
+ for p in [50.0, 95.0, 99.0] {
201
+ println!(" --> {}th: {:?}", p, calc_percentile(results, p));
202
+ }
203
+ }
204
+ }
205
+
206
+ // Correct exit code, if charset-normalizer-rs accuracy lower than 95%
207
+ if our_accuracy < 97.0 {
208
+ println!("LOW ACCURACY!!!");
209
+ 1
210
+ } else {
211
+ 0
212
+ }
213
+ }
214
+
215
+ // Main function
216
+ pub fn main() {
217
+ let args = PerformanceArgs::parse();
218
+ process::exit(performance_compare(&args));
219
+ }
Dataset_Construction/projects/charset-normalizer/rust/src/tests/cd.rs ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use crate::cd::*;
2
+ use crate::entity::{CoherenceMatch, CoherenceMatches, Language};
3
+
4
+ #[test]
5
+ fn test_encoding_unicode_range() {
6
+ let err_tests = [
7
+ "utf-8",
8
+ "big5",
9
+ "utf-16le", // multibyte encodings
10
+ "non-existing-charset", // non-existing
11
+ ];
12
+ for test in &err_tests {
13
+ assert!(encoding_unicode_range(test).is_err());
14
+ }
15
+
16
+ let ok_tests = [
17
+ ("windows-1251", Ok(vec!["Basic Latin", "Cyrillic"])),
18
+ ("windows-1255", Ok(vec!["Basic Latin", "Hebrew"])),
19
+ ];
20
+ for test in &ok_tests {
21
+ assert_eq!(encoding_unicode_range(test.0), test.1);
22
+ }
23
+ }
24
+
25
+ #[test]
26
+ fn test_unicode_range_languages() {
27
+ let tests = [
28
+ ("Cyrillic", Language::Russian),
29
+ ("Latin Extended Additional", Language::Vietnamese),
30
+ ("Greek and Coptic", Language::Greek),
31
+ ];
32
+ for (input, lang) in tests {
33
+ let languages = unicode_range_languages(input);
34
+ assert!(languages.contains(&&lang));
35
+ }
36
+ }
37
+
38
+ #[test]
39
+ fn test_encoding_languages() {
40
+ let tests = [
41
+ ("utf-8", Language::Unknown),
42
+ ("windows-1251", Language::Russian),
43
+ ("windows-1255", Language::Hebrew),
44
+ ];
45
+ for (input, lang) in tests {
46
+ let languages = encoding_languages(input.to_string());
47
+ assert!(languages.contains(&&lang));
48
+ }
49
+ }
50
+
51
+ #[test]
52
+ fn test_alphabet_languages() {
53
+ let tests = [
54
+ ("В низинах на восточной стороне полуострова Люнген в основном встречаются слюдяные сланцы, филлиты и доломиты. Низменности на западной стороне в основном состоят из слюдяных сланцев и небольшого количества кварцитов. За исключением ледяных шапок на вершинах Йеккеварри и Балггесварри, на полуострове Люнген преобладают каровые ледники", true, None),
55
+ ("В низинах на восточной стороне полуострова Люнген в основном встречаются слюдяные сланцы, филлиты и доломиты. Низменности на западной стороне в основном состоят из слюдяных сланцев и небольшого количества кварцитов. За исключением ледяных шапок на вершинах Йеккеварри и Балггесварри, на полуострове Люнген преобладают каровые ледники", false, Some(Language::Russian)),
56
+ ("Ailem ve Ben Adım Ece ve on iki yaşındayım. Her sabah 7'de uyanırım, kahvaltımı yaparım ve okula giderim. Boş zamanlarımda bahçede kitap okumayı severim. Küçük bir erkek kardeşim var. Kardeşim üç yaşında ve resim yapmayı sever. Evde her gün top oynar ve şarkı söyler. Kardeşim ve ben makarna yemeyi severiz. Bazen mutfakta yemekleri biz hazırlarız.", false, Some(Language::Turkish)),
57
+ ];
58
+ for (input, ignore_non_latin, expected) in tests {
59
+ let characters: Vec<char> = input.chars().collect();
60
+ let languages = alphabet_languages(&characters, ignore_non_latin);
61
+ if expected.is_none() {
62
+ assert_eq!(languages.len(), 0);
63
+ } else {
64
+ assert!(languages.contains(&&expected.unwrap()));
65
+ }
66
+ }
67
+ }
68
+
69
+ #[test]
70
+ fn test_alpha_unicode_split() {
71
+ let tests = [
72
+ (
73
+ "Люнгенские Альпы (норв. Lyngsalpene, сев.‑саам. Ittuvárit, квенск. Yykeänvaarat) — горный \
74
+ массив на северо-востоке фюльке Тромс-ог-Финнмарк в Норвегии, к востоку от города Тромсё",
75
+ vec![
76
+ "люнгенскиеальпынорвсевсаамквенскгорныймассивнасеверовостокефюлькетромсогфиннмарквнорвегииквостокуотгородатромсё",
77
+ "lyngsalpeneittuvárityykeänvaarat",
78
+ ]
79
+ ),
80
+ ];
81
+ for input in tests {
82
+ let mut layers = alpha_unicode_split(input.0);
83
+ let mut expected = input.1.clone();
84
+ layers.sort_unstable();
85
+ expected.sort_unstable();
86
+ assert_eq!(layers, expected);
87
+ }
88
+ }
89
+
90
+ #[test]
91
+ fn test_characters_popularity_compare() {
92
+ let tests = [
93
+ ("оаніирвтесклудмпзяьбгйчхцї", Language::Russian, 0.8, 0.9),
94
+ ("оаеинстрвлкмдпугяызбйьчхжц", Language::Russian, 1.0, 1.0),
95
+ ];
96
+ for (seq, lang, mmin, mmax) in &tests {
97
+ let res = characters_popularity_compare(lang, seq).unwrap();
98
+ assert!(res >= (*mmin as f32) && res <= (*mmax as f32));
99
+ }
100
+ }
101
+
102
+ #[test]
103
+ fn test_filter_alt_coherence_matches() {
104
+ let input: CoherenceMatches = vec![
105
+ CoherenceMatch {
106
+ language: &Language::English,
107
+ score: 7.77,
108
+ },
109
+ CoherenceMatch {
110
+ language: &Language::English,
111
+ score: 4.44,
112
+ },
113
+ ];
114
+ let expected_output: CoherenceMatches = vec![CoherenceMatch {
115
+ language: &Language::English,
116
+ score: 7.77,
117
+ }];
118
+ assert_eq!(filter_alt_coherence_matches(&input), expected_output);
119
+ }
120
+
121
+ #[test]
122
+ fn test_merge_coherence_ratios() {
123
+ let input: Vec<CoherenceMatches> = vec![
124
+ vec![
125
+ CoherenceMatch {
126
+ language: &Language::English,
127
+ score: 7.77,
128
+ },
129
+ CoherenceMatch {
130
+ language: &Language::English,
131
+ score: 4.44,
132
+ },
133
+ ],
134
+ vec![
135
+ CoherenceMatch {
136
+ language: &Language::Ukrainian,
137
+ score: 5.0,
138
+ },
139
+ CoherenceMatch {
140
+ language: &Language::Ukrainian,
141
+ score: 10.0,
142
+ },
143
+ ],
144
+ vec![CoherenceMatch {
145
+ language: &Language::Bulgarian,
146
+ score: 12.0,
147
+ }],
148
+ ];
149
+ let mut expected_output: CoherenceMatches = vec![
150
+ CoherenceMatch {
151
+ language: &Language::English,
152
+ score: 6.105,
153
+ },
154
+ CoherenceMatch {
155
+ language: &Language::Ukrainian,
156
+ score: 7.5,
157
+ },
158
+ CoherenceMatch {
159
+ language: &Language::Bulgarian,
160
+ score: 12.0,
161
+ },
162
+ ];
163
+ let mut output = merge_coherence_ratios(&input);
164
+ output.sort_unstable_by(|a, b| a.score.partial_cmp(&b.score).unwrap());
165
+ expected_output.sort_unstable_by(|a, b| a.score.partial_cmp(&b.score).unwrap());
166
+ assert_eq!(output, expected_output);
167
+ }
168
+
169
+ #[test]
170
+ fn test_coherence_ratio() {
171
+ let tests = [
172
+ (
173
+ "Bсеки човек има право на образование. Oбразованието трябва да бъде безплатно, поне що се отнася до началното и основното образование.",
174
+ vec![&Language::Bulgarian],
175
+ ),
176
+ (
177
+ "Lietuviø kalba – ið baltø prokalbës kilusi lietuviø tautos kalba, kuri Lietuvoje yra valstybinë, o Europos Sàjungoje – viena ið oficialiøjø kalbø. Lietuviðkai kalba apie tris milijonus þmoniø (dauguma jø gyvena Lietuvoje). Drauge su latviø, mirusiomis prûsø, jotvingiø ir kitomis baltø kalbomis, priklauso indoeuropieèiø kalbø ðeimos baltø kalbø grupei.
178
+ Pirmieji lietuviø kalbos raðytiniai paminklai atsirado vëlokai, apie XVI a., taèiau net dabartinë lietuviø kalba pasiþymi dideliu archajiðkumu (ypaè vardaþodþiø linksniavimo srityje).[3] Fonetiðkai ir morfologiðkai konservatyvi lietuviø kalba þymiai artimesnë baltø prokalbei negu naujoviðkesnë latviø kalba.[4] Lietuviø kalba – archajiðkiausia ið gyvøjø indoeuropieèiø kalbø, iðsaugojusi daugybæ indoeuropieèiø prokalbës ypatybiø.[5]
179
+ Lietuviø kalba skirstoma á dvi pagrindines tarmes: aukðtaièiø ir þemaièiø. Dabartinë bendrinë lietuviø kalba grindþiama vakarø aukðtaièiø kauniðkiø patarme.",
180
+ vec![&Language::Estonian],
181
+ ),
182
+ (
183
+ "In a statement by players' union Futpro, which is representing 33-year-old Hermoso, she is quoted as saying in no case did I seek to raise (lift) the president while they embraced on the podium.
184
+ The Spanish Football Federation (RFEF) said: The RFEF and Mr President will demonstrate each of the lies that are spread either by someone on behalf of the player or, if applicable, by the player hersel. Bсеки човек има право на образование. Oбразованието трябва да бъде безплатно, поне що се отнася до началното и основното образование.",
185
+ vec![&Language::Bulgarian, &Language::English],
186
+ ),
187
+ ];
188
+
189
+ for (text, expected_languages) in tests {
190
+ let result = coherence_ratio(text.to_string(), None, None).unwrap();
191
+ for lang in expected_languages {
192
+ assert!(result.iter().any(|cm| cm.language == lang));
193
+ }
194
+ }
195
+ }
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.gif ADDED

Git LFS Details

  • SHA256: cf4724b2f736ed1a0ae6bc28f1ead963d9cd2c1fd87b6ef32e7799fc1c5c8bda
  • Pointer size: 127 Bytes
  • Size of remote file: 43 Bytes
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.jpg ADDED

Git LFS Details

  • SHA256: f3651a46cbba2ff2db5d272b03134cfb6b9c0ae3e887eb674e1306af6731b5a7
  • Pointer size: 130 Bytes
  • Size of remote file: 32.4 kB
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71944d7430c461f0cd6e7fd10cee7eb72786352a3678fc7bc0ae3d410f72aece
3
+ size 1570024
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.png ADDED

Git LFS Details

  • SHA256: f0d6bb0ca95cf8f025e3ba36cfd888e518bcd1ffbb822c58e0f84bdb9b39a42c
  • Pointer size: 129 Bytes
  • Size of remote file: 7.98 kB
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.webp ADDED

Git LFS Details

  • SHA256: b22535f44e5683233a14afa3325edd66ae35037d665a3b4ad1b4cd607066bb5b
  • Pointer size: 129 Bytes
  • Size of remote file: 2.94 kB
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-1.xlsx ADDED
Binary file (42.7 kB). View file
 
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-2.png ADDED

Git LFS Details

  • SHA256: 1a8352b9372452ab024b5dfd3c74cd8fac2c84e7ff152879f83949c4707fd87e
  • Pointer size: 129 Bytes
  • Size of remote file: 6.15 kB
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/None/sample-3.png ADDED

Git LFS Details

  • SHA256: fd9758e7a7efd038246df6c3ca2e0d9b1637c1b90f98fa56748ab0d0dbfbd36f
  • Pointer size: 130 Bytes
  • Size of remote file: 14.7 kB
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/ascii/CHANGELOG.md ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Changelog
2
+ All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
3
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
4
+
5
+ ## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30)
6
+
7
+ ### Added
8
+ - Explicit support for Python 3.11 (PR #164)
9
+
10
+ ### Changed
11
+ - The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165)
12
+
13
+ ## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04)
14
+
15
+ ### Fixed
16
+ - Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154)
17
+
18
+ ### Changed
19
+ - Skipping the language-detection (CD) on ASCII (PR #155)
20
+
21
+ ## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03)
22
+
23
+ ### Changed
24
+ - Moderating the logging impact (since 2.0.8) for specific environments (PR #147)
25
+
26
+ ### Fixed
27
+ - Wrong logging level applied when setting kwarg `explain` to True (PR #146)
28
+
29
+ ## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24)
30
+ ### Changed
31
+ - Improvement over Vietnamese detection (PR #126)
32
+ - MD improvement on trailing data and long foreign (non-pure latin) data (PR #124)
33
+ - Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122)
34
+ - call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129)
35
+ - Code style as refactored by Sourcery-AI (PR #131)
36
+ - Minor adjustment on the MD around european words (PR #133)
37
+ - Remove and replace SRTs from assets / tests (PR #139)
38
+ - Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135)
39
+ - Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135)
40
+
41
+ ### Fixed
42
+ - Fix large (misleading) sequence giving UnicodeDecodeError (PR #137)
43
+ - Avoid using too insignificant chunk (PR #137)
44
+
45
+ ### Added
46
+ - Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135)
47
+ - Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141)
48
+
49
+ ## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11)
50
+ ### Added
51
+ - Add support for Kazakh (Cyrillic) language detection (PR #109)
52
+
53
+ ### Changed
54
+ - Further, improve inferring the language from a given single-byte code page (PR #112)
55
+ - Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116)
56
+ - Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113)
57
+ - Various detection improvement (MD+CD) (PR #117)
58
+
59
+ ### Removed
60
+ - Remove redundant logging entry about detected language(s) (PR #115)
61
+
62
+ ### Fixed
63
+ - Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102)
64
+
65
+ ## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18)
66
+ ### Fixed
67
+ - Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100)
68
+ - Fix CLI crash when using --minimal output in certain cases (PR #103)
69
+
70
+ ### Changed
71
+ - Minor improvement to the detection efficiency (less than 1%) (PR #106 #101)
72
+
73
+ ## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14)
74
+ ### Changed
75
+ - The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81)
76
+ - The BC-support with v1.x was improved, the old staticmethods are restored (PR #82)
77
+ - The Unicode detection is slightly improved (PR #93)
78
+ - Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91)
79
+
80
+ ### Removed
81
+ - The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92)
82
+
83
+ ### Fixed
84
+ - In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95)
85
+ - Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96)
86
+ - The MANIFEST.in was not exhaustive (PR #78)
87
+
88
+ ## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30)
89
+ ### Fixed
90
+ - The CLI no longer raise an unexpected exception when no encoding has been found (PR #70)
91
+ - Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68)
92
+ - The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72)
93
+ - Submatch factoring could be wrong in rare edge cases (PR #72)
94
+ - Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72)
95
+ - Fix line endings from CRLF to LF for certain project files (PR #67)
96
+
97
+ ### Changed
98
+ - Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76)
99
+ - Allow fallback on specified encoding if any (PR #71)
100
+
101
+ ## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16)
102
+ ### Changed
103
+ - Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63)
104
+ - According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64)
105
+
106
+ ## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15)
107
+ ### Fixed
108
+ - Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59)
109
+
110
+ ### Changed
111
+ - Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57)
112
+
113
+ ## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13)
114
+ ### Fixed
115
+ - Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55)
116
+ - Using explain=False permanently disable the verbose output in the current runtime (PR #47)
117
+ - One log entry (language target preemptive) was not show in logs when using explain=True (PR #47)
118
+ - Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52)
119
+
120
+ ### Changed
121
+ - Public function normalize default args values were not aligned with from_bytes (PR #53)
122
+
123
+ ### Added
124
+ - You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47)
125
+
126
+ ## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02)
127
+ ### Changed
128
+ - 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet.
129
+ - Accent has been made on UTF-8 detection, should perform rather instantaneous.
130
+ - The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible.
131
+ - The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time)
132
+ - The program has been rewritten to ease the readability and maintainability. (+Using static typing)+
133
+ - utf_7 detection has been reinstated.
134
+
135
+ ### Removed
136
+ - This package no longer require anything when used with Python 3.5 (Dropped cached_property)
137
+ - Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian.
138
+ - The exception hook on UnicodeDecodeError has been removed.
139
+
140
+ ### Deprecated
141
+ - Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0
142
+
143
+ ### Fixed
144
+ - The CLI output used the relative path of the file(s). Should be absolute.
145
+
146
+ ## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28)
147
+ ### Fixed
148
+ - Logger configuration/usage no longer conflict with others (PR #44)
149
+
150
+ ## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21)
151
+ ### Removed
152
+ - Using standard logging instead of using the package loguru.
153
+ - Dropping nose test framework in favor of the maintained pytest.
154
+ - Choose to not use dragonmapper package to help with gibberish Chinese/CJK text.
155
+ - Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version.
156
+ - Stop support for UTF-7 that does not contain a SIG.
157
+ - Dropping PrettyTable, replaced with pure JSON output in CLI.
158
+
159
+ ### Fixed
160
+ - BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process.
161
+ - Not searching properly for the BOM when trying utf32/16 parent codec.
162
+
163
+ ### Changed
164
+ - Improving the package final size by compressing frequencies.json.
165
+ - Huge improvement over the larges payload.
166
+
167
+ ### Added
168
+ - CLI now produces JSON consumable output.
169
+ - Return ASCII if given sequences fit. Given reasonable confidence.
170
+
171
+ ## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13)
172
+
173
+ ### Fixed
174
+ - In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40)
175
+
176
+ ## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12)
177
+
178
+ ### Fixed
179
+ - Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39)
180
+
181
+ ## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12)
182
+
183
+ ### Fixed
184
+ - The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38)
185
+
186
+ ## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09)
187
+
188
+ ### Changed
189
+ - Amend the previous release to allow prettytable 2.0 (PR #35)
190
+
191
+ ## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08)
192
+
193
+ ### Fixed
194
+ - Fix error while using the package with a python pre-release interpreter (PR #33)
195
+
196
+ ### Changed
197
+ - Dependencies refactoring, constraints revised.
198
+
199
+ ### Added
200
+ - Add python 3.9 and 3.10 to the supported interpreters
Dataset_Construction/projects/charset-normalizer/rust/src/tests/data/largesets/ascii/_chromium_iso-8859-1_with_no_encoding_specified.html ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html PUBLIC "-//W3C//DTD html 4.01 Transitional//EN" "http://www.w3c.o rg/TR/1999/REC-html401-19991224/loose.dtd">
2
+ <html lang=en-US xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml">
3
+ <head profile=http://www.w3.org/2000/08/w3c-synd/#>
4
+ <title>iso-8859-1</title>
5
+ </head>
6
+ <body>
7
+ <h1 id=logo><img height=48 alt="The World Wide Web Consortium (W3C)" src="/Icons/w3c_main" width=315></h1>
8
+ <h2 id=slogan>Leading the Web to Its Full Potential...</h2>
9
+ </body>
10
+ </html>