koichi12 commited on
Commit
dfe9fa3
·
verified ·
1 Parent(s): ea95298

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. .venv/lib/python3.11/site-packages/numpy/core/_internal.py +935 -0
  3. .venv/lib/python3.11/site-packages/numpy/core/_type_aliases.pyi +13 -0
  4. .venv/lib/python3.11/site-packages/numpy/core/_umath_tests.cpython-311-x86_64-linux-gnu.so +0 -0
  5. .venv/lib/python3.11/site-packages/numpy/core/cversions.py +13 -0
  6. .venv/lib/python3.11/site-packages/numpy/core/fromnumeric.py +0 -0
  7. .venv/lib/python3.11/site-packages/numpy/core/numerictypes.py +689 -0
  8. .venv/lib/python3.11/site-packages/numpy/core/overrides.py +181 -0
  9. .venv/lib/python3.11/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/numpy/fft/tests/__init__.py +0 -0
  11. .venv/lib/python3.11/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/numpy/fft/tests/test_helper.py +167 -0
  15. .venv/lib/python3.11/site-packages/numpy/random/LICENSE.md +71 -0
  16. .venv/lib/python3.11/site-packages/numpy/random/__init__.pxd +14 -0
  17. .venv/lib/python3.11/site-packages/numpy/random/__init__.py +215 -0
  18. .venv/lib/python3.11/site-packages/numpy/random/__init__.pyi +72 -0
  19. .venv/lib/python3.11/site-packages/numpy/random/__pycache__/__init__.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/numpy/random/__pycache__/_pickle.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/numpy/random/_bounded_integers.pxd +29 -0
  22. .venv/lib/python3.11/site-packages/numpy/random/_common.pxd +106 -0
  23. .venv/lib/python3.11/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/numpy/random/_examples/cffi/extending.py +40 -0
  26. .venv/lib/python3.11/site-packages/numpy/random/_examples/cffi/parse.py +54 -0
  27. .venv/lib/python3.11/site-packages/numpy/random/_examples/cython/extending.pyx +78 -0
  28. .venv/lib/python3.11/site-packages/numpy/random/_examples/cython/extending_distributions.pyx +117 -0
  29. .venv/lib/python3.11/site-packages/numpy/random/_examples/cython/meson.build +45 -0
  30. .venv/lib/python3.11/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-311.pyc +0 -0
  31. .venv/lib/python3.11/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-311.pyc +0 -0
  32. .venv/lib/python3.11/site-packages/numpy/random/_examples/numba/extending.py +84 -0
  33. .venv/lib/python3.11/site-packages/numpy/random/_examples/numba/extending_distributions.py +67 -0
  34. .venv/lib/python3.11/site-packages/numpy/random/_generator.cpython-311-x86_64-linux-gnu.so +3 -0
  35. .venv/lib/python3.11/site-packages/numpy/random/_generator.pyi +681 -0
  36. .venv/lib/python3.11/site-packages/numpy/random/_mt19937.cpython-311-x86_64-linux-gnu.so +3 -0
  37. .venv/lib/python3.11/site-packages/numpy/random/_mt19937.pyi +22 -0
  38. .venv/lib/python3.11/site-packages/numpy/random/_pcg64.pyi +42 -0
  39. .venv/lib/python3.11/site-packages/numpy/random/_philox.pyi +36 -0
  40. .venv/lib/python3.11/site-packages/numpy/random/_pickle.py +80 -0
  41. .venv/lib/python3.11/site-packages/numpy/random/_sfc64.cpython-311-x86_64-linux-gnu.so +0 -0
  42. .venv/lib/python3.11/site-packages/numpy/random/_sfc64.pyi +28 -0
  43. .venv/lib/python3.11/site-packages/numpy/random/bit_generator.cpython-311-x86_64-linux-gnu.so +3 -0
  44. .venv/lib/python3.11/site-packages/numpy/random/bit_generator.pxd +35 -0
  45. .venv/lib/python3.11/site-packages/numpy/random/bit_generator.pyi +112 -0
  46. .venv/lib/python3.11/site-packages/numpy/random/c_distributions.pxd +120 -0
  47. .venv/lib/python3.11/site-packages/numpy/random/lib/libnpyrandom.a +0 -0
  48. .venv/lib/python3.11/site-packages/numpy/random/mtrand.pyi +571 -0
  49. .venv/lib/python3.11/site-packages/numpy/random/tests/__init__.py +0 -0
  50. .venv/lib/python3.11/site-packages/numpy/random/tests/__pycache__/__init__.cpython-311.pyc +0 -0
.gitattributes CHANGED
@@ -357,3 +357,7 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/
357
  .venv/lib/python3.11/site-packages/numpy/linalg/_umath_linalg.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
358
  .venv/lib/python3.11/site-packages/numpy/linalg/__pycache__/linalg.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
359
  .venv/lib/python3.11/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
357
  .venv/lib/python3.11/site-packages/numpy/linalg/_umath_linalg.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
358
  .venv/lib/python3.11/site-packages/numpy/linalg/__pycache__/linalg.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
359
  .venv/lib/python3.11/site-packages/numpy/linalg/tests/__pycache__/test_linalg.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
360
+ .venv/lib/python3.11/site-packages/numpy/typing/tests/data/pass/__pycache__/random.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
361
+ .venv/lib/python3.11/site-packages/numpy/random/_mt19937.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
362
+ .venv/lib/python3.11/site-packages/numpy/random/_generator.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
363
+ .venv/lib/python3.11/site-packages/numpy/random/bit_generator.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
.venv/lib/python3.11/site-packages/numpy/core/_internal.py ADDED
@@ -0,0 +1,935 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A place for internal code
3
+
4
+ Some things are more easily handled Python.
5
+
6
+ """
7
+ import ast
8
+ import re
9
+ import sys
10
+ import warnings
11
+
12
+ from ..exceptions import DTypePromotionError
13
+ from .multiarray import dtype, array, ndarray, promote_types
14
+ try:
15
+ import ctypes
16
+ except ImportError:
17
+ ctypes = None
18
+
19
+ IS_PYPY = sys.implementation.name == 'pypy'
20
+
21
+ if sys.byteorder == 'little':
22
+ _nbo = '<'
23
+ else:
24
+ _nbo = '>'
25
+
26
+ def _makenames_list(adict, align):
27
+ allfields = []
28
+
29
+ for fname, obj in adict.items():
30
+ n = len(obj)
31
+ if not isinstance(obj, tuple) or n not in (2, 3):
32
+ raise ValueError("entry not a 2- or 3- tuple")
33
+ if n > 2 and obj[2] == fname:
34
+ continue
35
+ num = int(obj[1])
36
+ if num < 0:
37
+ raise ValueError("invalid offset.")
38
+ format = dtype(obj[0], align=align)
39
+ if n > 2:
40
+ title = obj[2]
41
+ else:
42
+ title = None
43
+ allfields.append((fname, format, num, title))
44
+ # sort by offsets
45
+ allfields.sort(key=lambda x: x[2])
46
+ names = [x[0] for x in allfields]
47
+ formats = [x[1] for x in allfields]
48
+ offsets = [x[2] for x in allfields]
49
+ titles = [x[3] for x in allfields]
50
+
51
+ return names, formats, offsets, titles
52
+
53
+ # Called in PyArray_DescrConverter function when
54
+ # a dictionary without "names" and "formats"
55
+ # fields is used as a data-type descriptor.
56
+ def _usefields(adict, align):
57
+ try:
58
+ names = adict[-1]
59
+ except KeyError:
60
+ names = None
61
+ if names is None:
62
+ names, formats, offsets, titles = _makenames_list(adict, align)
63
+ else:
64
+ formats = []
65
+ offsets = []
66
+ titles = []
67
+ for name in names:
68
+ res = adict[name]
69
+ formats.append(res[0])
70
+ offsets.append(res[1])
71
+ if len(res) > 2:
72
+ titles.append(res[2])
73
+ else:
74
+ titles.append(None)
75
+
76
+ return dtype({"names": names,
77
+ "formats": formats,
78
+ "offsets": offsets,
79
+ "titles": titles}, align)
80
+
81
+
82
+ # construct an array_protocol descriptor list
83
+ # from the fields attribute of a descriptor
84
+ # This calls itself recursively but should eventually hit
85
+ # a descriptor that has no fields and then return
86
+ # a simple typestring
87
+
88
+ def _array_descr(descriptor):
89
+ fields = descriptor.fields
90
+ if fields is None:
91
+ subdtype = descriptor.subdtype
92
+ if subdtype is None:
93
+ if descriptor.metadata is None:
94
+ return descriptor.str
95
+ else:
96
+ new = descriptor.metadata.copy()
97
+ if new:
98
+ return (descriptor.str, new)
99
+ else:
100
+ return descriptor.str
101
+ else:
102
+ return (_array_descr(subdtype[0]), subdtype[1])
103
+
104
+ names = descriptor.names
105
+ ordered_fields = [fields[x] + (x,) for x in names]
106
+ result = []
107
+ offset = 0
108
+ for field in ordered_fields:
109
+ if field[1] > offset:
110
+ num = field[1] - offset
111
+ result.append(('', f'|V{num}'))
112
+ offset += num
113
+ elif field[1] < offset:
114
+ raise ValueError(
115
+ "dtype.descr is not defined for types with overlapping or "
116
+ "out-of-order fields")
117
+ if len(field) > 3:
118
+ name = (field[2], field[3])
119
+ else:
120
+ name = field[2]
121
+ if field[0].subdtype:
122
+ tup = (name, _array_descr(field[0].subdtype[0]),
123
+ field[0].subdtype[1])
124
+ else:
125
+ tup = (name, _array_descr(field[0]))
126
+ offset += field[0].itemsize
127
+ result.append(tup)
128
+
129
+ if descriptor.itemsize > offset:
130
+ num = descriptor.itemsize - offset
131
+ result.append(('', f'|V{num}'))
132
+
133
+ return result
134
+
135
+ # Build a new array from the information in a pickle.
136
+ # Note that the name numpy.core._internal._reconstruct is embedded in
137
+ # pickles of ndarrays made with NumPy before release 1.0
138
+ # so don't remove the name here, or you'll
139
+ # break backward compatibility.
140
+ def _reconstruct(subtype, shape, dtype):
141
+ return ndarray.__new__(subtype, shape, dtype)
142
+
143
+
144
+ # format_re was originally from numarray by J. Todd Miller
145
+
146
+ format_re = re.compile(r'(?P<order1>[<>|=]?)'
147
+ r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
148
+ r'(?P<order2>[<>|=]?)'
149
+ r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
150
+ sep_re = re.compile(r'\s*,\s*')
151
+ space_re = re.compile(r'\s+$')
152
+
153
+ # astr is a string (perhaps comma separated)
154
+
155
+ _convorder = {'=': _nbo}
156
+
157
+ def _commastring(astr):
158
+ startindex = 0
159
+ result = []
160
+ while startindex < len(astr):
161
+ mo = format_re.match(astr, pos=startindex)
162
+ try:
163
+ (order1, repeats, order2, dtype) = mo.groups()
164
+ except (TypeError, AttributeError):
165
+ raise ValueError(
166
+ f'format number {len(result)+1} of "{astr}" is not recognized'
167
+ ) from None
168
+ startindex = mo.end()
169
+ # Separator or ending padding
170
+ if startindex < len(astr):
171
+ if space_re.match(astr, pos=startindex):
172
+ startindex = len(astr)
173
+ else:
174
+ mo = sep_re.match(astr, pos=startindex)
175
+ if not mo:
176
+ raise ValueError(
177
+ 'format number %d of "%s" is not recognized' %
178
+ (len(result)+1, astr))
179
+ startindex = mo.end()
180
+
181
+ if order2 == '':
182
+ order = order1
183
+ elif order1 == '':
184
+ order = order2
185
+ else:
186
+ order1 = _convorder.get(order1, order1)
187
+ order2 = _convorder.get(order2, order2)
188
+ if (order1 != order2):
189
+ raise ValueError(
190
+ 'inconsistent byte-order specification %s and %s' %
191
+ (order1, order2))
192
+ order = order1
193
+
194
+ if order in ('|', '=', _nbo):
195
+ order = ''
196
+ dtype = order + dtype
197
+ if (repeats == ''):
198
+ newitem = dtype
199
+ else:
200
+ newitem = (dtype, ast.literal_eval(repeats))
201
+ result.append(newitem)
202
+
203
+ return result
204
+
205
+ class dummy_ctype:
206
+ def __init__(self, cls):
207
+ self._cls = cls
208
+ def __mul__(self, other):
209
+ return self
210
+ def __call__(self, *other):
211
+ return self._cls(other)
212
+ def __eq__(self, other):
213
+ return self._cls == other._cls
214
+ def __ne__(self, other):
215
+ return self._cls != other._cls
216
+
217
+ def _getintp_ctype():
218
+ val = _getintp_ctype.cache
219
+ if val is not None:
220
+ return val
221
+ if ctypes is None:
222
+ import numpy as np
223
+ val = dummy_ctype(np.intp)
224
+ else:
225
+ char = dtype('p').char
226
+ if char == 'i':
227
+ val = ctypes.c_int
228
+ elif char == 'l':
229
+ val = ctypes.c_long
230
+ elif char == 'q':
231
+ val = ctypes.c_longlong
232
+ else:
233
+ val = ctypes.c_long
234
+ _getintp_ctype.cache = val
235
+ return val
236
+ _getintp_ctype.cache = None
237
+
238
+ # Used for .ctypes attribute of ndarray
239
+
240
+ class _missing_ctypes:
241
+ def cast(self, num, obj):
242
+ return num.value
243
+
244
+ class c_void_p:
245
+ def __init__(self, ptr):
246
+ self.value = ptr
247
+
248
+
249
+ class _ctypes:
250
+ def __init__(self, array, ptr=None):
251
+ self._arr = array
252
+
253
+ if ctypes:
254
+ self._ctypes = ctypes
255
+ self._data = self._ctypes.c_void_p(ptr)
256
+ else:
257
+ # fake a pointer-like object that holds onto the reference
258
+ self._ctypes = _missing_ctypes()
259
+ self._data = self._ctypes.c_void_p(ptr)
260
+ self._data._objects = array
261
+
262
+ if self._arr.ndim == 0:
263
+ self._zerod = True
264
+ else:
265
+ self._zerod = False
266
+
267
+ def data_as(self, obj):
268
+ """
269
+ Return the data pointer cast to a particular c-types object.
270
+ For example, calling ``self._as_parameter_`` is equivalent to
271
+ ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
272
+ pointer to a ctypes array of floating-point data:
273
+ ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
274
+
275
+ The returned pointer will keep a reference to the array.
276
+ """
277
+ # _ctypes.cast function causes a circular reference of self._data in
278
+ # self._data._objects. Attributes of self._data cannot be released
279
+ # until gc.collect is called. Make a copy of the pointer first then let
280
+ # it hold the array reference. This is a workaround to circumvent the
281
+ # CPython bug https://bugs.python.org/issue12836
282
+ ptr = self._ctypes.cast(self._data, obj)
283
+ ptr._arr = self._arr
284
+ return ptr
285
+
286
+ def shape_as(self, obj):
287
+ """
288
+ Return the shape tuple as an array of some other c-types
289
+ type. For example: ``self.shape_as(ctypes.c_short)``.
290
+ """
291
+ if self._zerod:
292
+ return None
293
+ return (obj*self._arr.ndim)(*self._arr.shape)
294
+
295
+ def strides_as(self, obj):
296
+ """
297
+ Return the strides tuple as an array of some other
298
+ c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
299
+ """
300
+ if self._zerod:
301
+ return None
302
+ return (obj*self._arr.ndim)(*self._arr.strides)
303
+
304
+ @property
305
+ def data(self):
306
+ """
307
+ A pointer to the memory area of the array as a Python integer.
308
+ This memory area may contain data that is not aligned, or not in correct
309
+ byte-order. The memory area may not even be writeable. The array
310
+ flags and data-type of this array should be respected when passing this
311
+ attribute to arbitrary C-code to avoid trouble that can include Python
312
+ crashing. User Beware! The value of this attribute is exactly the same
313
+ as ``self._array_interface_['data'][0]``.
314
+
315
+ Note that unlike ``data_as``, a reference will not be kept to the array:
316
+ code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
317
+ pointer to a deallocated array, and should be spelt
318
+ ``(a + b).ctypes.data_as(ctypes.c_void_p)``
319
+ """
320
+ return self._data.value
321
+
322
+ @property
323
+ def shape(self):
324
+ """
325
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
326
+ the basetype is the C-integer corresponding to ``dtype('p')`` on this
327
+ platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
328
+ `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
329
+ the platform. The ctypes array contains the shape of
330
+ the underlying array.
331
+ """
332
+ return self.shape_as(_getintp_ctype())
333
+
334
+ @property
335
+ def strides(self):
336
+ """
337
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
338
+ the basetype is the same as for the shape attribute. This ctypes array
339
+ contains the strides information from the underlying array. This strides
340
+ information is important for showing how many bytes must be jumped to
341
+ get to the next element in the array.
342
+ """
343
+ return self.strides_as(_getintp_ctype())
344
+
345
+ @property
346
+ def _as_parameter_(self):
347
+ """
348
+ Overrides the ctypes semi-magic method
349
+
350
+ Enables `c_func(some_array.ctypes)`
351
+ """
352
+ return self.data_as(ctypes.c_void_p)
353
+
354
+ # Numpy 1.21.0, 2021-05-18
355
+
356
+ def get_data(self):
357
+ """Deprecated getter for the `_ctypes.data` property.
358
+
359
+ .. deprecated:: 1.21
360
+ """
361
+ warnings.warn('"get_data" is deprecated. Use "data" instead',
362
+ DeprecationWarning, stacklevel=2)
363
+ return self.data
364
+
365
+ def get_shape(self):
366
+ """Deprecated getter for the `_ctypes.shape` property.
367
+
368
+ .. deprecated:: 1.21
369
+ """
370
+ warnings.warn('"get_shape" is deprecated. Use "shape" instead',
371
+ DeprecationWarning, stacklevel=2)
372
+ return self.shape
373
+
374
+ def get_strides(self):
375
+ """Deprecated getter for the `_ctypes.strides` property.
376
+
377
+ .. deprecated:: 1.21
378
+ """
379
+ warnings.warn('"get_strides" is deprecated. Use "strides" instead',
380
+ DeprecationWarning, stacklevel=2)
381
+ return self.strides
382
+
383
+ def get_as_parameter(self):
384
+ """Deprecated getter for the `_ctypes._as_parameter_` property.
385
+
386
+ .. deprecated:: 1.21
387
+ """
388
+ warnings.warn(
389
+ '"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
390
+ DeprecationWarning, stacklevel=2,
391
+ )
392
+ return self._as_parameter_
393
+
394
+
395
+ def _newnames(datatype, order):
396
+ """
397
+ Given a datatype and an order object, return a new names tuple, with the
398
+ order indicated
399
+ """
400
+ oldnames = datatype.names
401
+ nameslist = list(oldnames)
402
+ if isinstance(order, str):
403
+ order = [order]
404
+ seen = set()
405
+ if isinstance(order, (list, tuple)):
406
+ for name in order:
407
+ try:
408
+ nameslist.remove(name)
409
+ except ValueError:
410
+ if name in seen:
411
+ raise ValueError(f"duplicate field name: {name}") from None
412
+ else:
413
+ raise ValueError(f"unknown field name: {name}") from None
414
+ seen.add(name)
415
+ return tuple(list(order) + nameslist)
416
+ raise ValueError(f"unsupported order value: {order}")
417
+
418
+ def _copy_fields(ary):
419
+ """Return copy of structured array with padding between fields removed.
420
+
421
+ Parameters
422
+ ----------
423
+ ary : ndarray
424
+ Structured array from which to remove padding bytes
425
+
426
+ Returns
427
+ -------
428
+ ary_copy : ndarray
429
+ Copy of ary with padding bytes removed
430
+ """
431
+ dt = ary.dtype
432
+ copy_dtype = {'names': dt.names,
433
+ 'formats': [dt.fields[name][0] for name in dt.names]}
434
+ return array(ary, dtype=copy_dtype, copy=True)
435
+
436
+ def _promote_fields(dt1, dt2):
437
+ """ Perform type promotion for two structured dtypes.
438
+
439
+ Parameters
440
+ ----------
441
+ dt1 : structured dtype
442
+ First dtype.
443
+ dt2 : structured dtype
444
+ Second dtype.
445
+
446
+ Returns
447
+ -------
448
+ out : dtype
449
+ The promoted dtype
450
+
451
+ Notes
452
+ -----
453
+ If one of the inputs is aligned, the result will be. The titles of
454
+ both descriptors must match (point to the same field).
455
+ """
456
+ # Both must be structured and have the same names in the same order
457
+ if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
458
+ raise DTypePromotionError(
459
+ f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
460
+
461
+ # if both are identical, we can (maybe!) just return the same dtype.
462
+ identical = dt1 is dt2
463
+ new_fields = []
464
+ for name in dt1.names:
465
+ field1 = dt1.fields[name]
466
+ field2 = dt2.fields[name]
467
+ new_descr = promote_types(field1[0], field2[0])
468
+ identical = identical and new_descr is field1[0]
469
+
470
+ # Check that the titles match (if given):
471
+ if field1[2:] != field2[2:]:
472
+ raise DTypePromotionError(
473
+ f"field titles of field '{name}' mismatch")
474
+ if len(field1) == 2:
475
+ new_fields.append((name, new_descr))
476
+ else:
477
+ new_fields.append(((field1[2], name), new_descr))
478
+
479
+ res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
480
+
481
+ # Might as well preserve identity (and metadata) if the dtype is identical
482
+ # and the itemsize, offsets are also unmodified. This could probably be
483
+ # sped up, but also probably just be removed entirely.
484
+ if identical and res.itemsize == dt1.itemsize:
485
+ for name in dt1.names:
486
+ if dt1.fields[name][1] != res.fields[name][1]:
487
+ return res # the dtype changed.
488
+ return dt1
489
+
490
+ return res
491
+
492
+
493
+ def _getfield_is_safe(oldtype, newtype, offset):
494
+ """ Checks safety of getfield for object arrays.
495
+
496
+ As in _view_is_safe, we need to check that memory containing objects is not
497
+ reinterpreted as a non-object datatype and vice versa.
498
+
499
+ Parameters
500
+ ----------
501
+ oldtype : data-type
502
+ Data type of the original ndarray.
503
+ newtype : data-type
504
+ Data type of the field being accessed by ndarray.getfield
505
+ offset : int
506
+ Offset of the field being accessed by ndarray.getfield
507
+
508
+ Raises
509
+ ------
510
+ TypeError
511
+ If the field access is invalid
512
+
513
+ """
514
+ if newtype.hasobject or oldtype.hasobject:
515
+ if offset == 0 and newtype == oldtype:
516
+ return
517
+ if oldtype.names is not None:
518
+ for name in oldtype.names:
519
+ if (oldtype.fields[name][1] == offset and
520
+ oldtype.fields[name][0] == newtype):
521
+ return
522
+ raise TypeError("Cannot get/set field of an object array")
523
+ return
524
+
525
+ def _view_is_safe(oldtype, newtype):
526
+ """ Checks safety of a view involving object arrays, for example when
527
+ doing::
528
+
529
+ np.zeros(10, dtype=oldtype).view(newtype)
530
+
531
+ Parameters
532
+ ----------
533
+ oldtype : data-type
534
+ Data type of original ndarray
535
+ newtype : data-type
536
+ Data type of the view
537
+
538
+ Raises
539
+ ------
540
+ TypeError
541
+ If the new type is incompatible with the old type.
542
+
543
+ """
544
+
545
+ # if the types are equivalent, there is no problem.
546
+ # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
547
+ if oldtype == newtype:
548
+ return
549
+
550
+ if newtype.hasobject or oldtype.hasobject:
551
+ raise TypeError("Cannot change data-type for object array.")
552
+ return
553
+
554
+ # Given a string containing a PEP 3118 format specifier,
555
+ # construct a NumPy dtype
556
+
557
+ _pep3118_native_map = {
558
+ '?': '?',
559
+ 'c': 'S1',
560
+ 'b': 'b',
561
+ 'B': 'B',
562
+ 'h': 'h',
563
+ 'H': 'H',
564
+ 'i': 'i',
565
+ 'I': 'I',
566
+ 'l': 'l',
567
+ 'L': 'L',
568
+ 'q': 'q',
569
+ 'Q': 'Q',
570
+ 'e': 'e',
571
+ 'f': 'f',
572
+ 'd': 'd',
573
+ 'g': 'g',
574
+ 'Zf': 'F',
575
+ 'Zd': 'D',
576
+ 'Zg': 'G',
577
+ 's': 'S',
578
+ 'w': 'U',
579
+ 'O': 'O',
580
+ 'x': 'V', # padding
581
+ }
582
+ _pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
583
+
584
+ _pep3118_standard_map = {
585
+ '?': '?',
586
+ 'c': 'S1',
587
+ 'b': 'b',
588
+ 'B': 'B',
589
+ 'h': 'i2',
590
+ 'H': 'u2',
591
+ 'i': 'i4',
592
+ 'I': 'u4',
593
+ 'l': 'i4',
594
+ 'L': 'u4',
595
+ 'q': 'i8',
596
+ 'Q': 'u8',
597
+ 'e': 'f2',
598
+ 'f': 'f',
599
+ 'd': 'd',
600
+ 'Zf': 'F',
601
+ 'Zd': 'D',
602
+ 's': 'S',
603
+ 'w': 'U',
604
+ 'O': 'O',
605
+ 'x': 'V', # padding
606
+ }
607
+ _pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
608
+
609
+ _pep3118_unsupported_map = {
610
+ 'u': 'UCS-2 strings',
611
+ '&': 'pointers',
612
+ 't': 'bitfields',
613
+ 'X': 'function pointers',
614
+ }
615
+
616
+ class _Stream:
617
+ def __init__(self, s):
618
+ self.s = s
619
+ self.byteorder = '@'
620
+
621
+ def advance(self, n):
622
+ res = self.s[:n]
623
+ self.s = self.s[n:]
624
+ return res
625
+
626
+ def consume(self, c):
627
+ if self.s[:len(c)] == c:
628
+ self.advance(len(c))
629
+ return True
630
+ return False
631
+
632
+ def consume_until(self, c):
633
+ if callable(c):
634
+ i = 0
635
+ while i < len(self.s) and not c(self.s[i]):
636
+ i = i + 1
637
+ return self.advance(i)
638
+ else:
639
+ i = self.s.index(c)
640
+ res = self.advance(i)
641
+ self.advance(len(c))
642
+ return res
643
+
644
+ @property
645
+ def next(self):
646
+ return self.s[0]
647
+
648
+ def __bool__(self):
649
+ return bool(self.s)
650
+
651
+
652
+ def _dtype_from_pep3118(spec):
653
+ stream = _Stream(spec)
654
+ dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
655
+ return dtype
656
+
657
+ def __dtype_from_pep3118(stream, is_subdtype):
658
+ field_spec = dict(
659
+ names=[],
660
+ formats=[],
661
+ offsets=[],
662
+ itemsize=0
663
+ )
664
+ offset = 0
665
+ common_alignment = 1
666
+ is_padding = False
667
+
668
+ # Parse spec
669
+ while stream:
670
+ value = None
671
+
672
+ # End of structure, bail out to upper level
673
+ if stream.consume('}'):
674
+ break
675
+
676
+ # Sub-arrays (1)
677
+ shape = None
678
+ if stream.consume('('):
679
+ shape = stream.consume_until(')')
680
+ shape = tuple(map(int, shape.split(',')))
681
+
682
+ # Byte order
683
+ if stream.next in ('@', '=', '<', '>', '^', '!'):
684
+ byteorder = stream.advance(1)
685
+ if byteorder == '!':
686
+ byteorder = '>'
687
+ stream.byteorder = byteorder
688
+
689
+ # Byte order characters also control native vs. standard type sizes
690
+ if stream.byteorder in ('@', '^'):
691
+ type_map = _pep3118_native_map
692
+ type_map_chars = _pep3118_native_typechars
693
+ else:
694
+ type_map = _pep3118_standard_map
695
+ type_map_chars = _pep3118_standard_typechars
696
+
697
+ # Item sizes
698
+ itemsize_str = stream.consume_until(lambda c: not c.isdigit())
699
+ if itemsize_str:
700
+ itemsize = int(itemsize_str)
701
+ else:
702
+ itemsize = 1
703
+
704
+ # Data types
705
+ is_padding = False
706
+
707
+ if stream.consume('T{'):
708
+ value, align = __dtype_from_pep3118(
709
+ stream, is_subdtype=True)
710
+ elif stream.next in type_map_chars:
711
+ if stream.next == 'Z':
712
+ typechar = stream.advance(2)
713
+ else:
714
+ typechar = stream.advance(1)
715
+
716
+ is_padding = (typechar == 'x')
717
+ dtypechar = type_map[typechar]
718
+ if dtypechar in 'USV':
719
+ dtypechar += '%d' % itemsize
720
+ itemsize = 1
721
+ numpy_byteorder = {'@': '=', '^': '='}.get(
722
+ stream.byteorder, stream.byteorder)
723
+ value = dtype(numpy_byteorder + dtypechar)
724
+ align = value.alignment
725
+ elif stream.next in _pep3118_unsupported_map:
726
+ desc = _pep3118_unsupported_map[stream.next]
727
+ raise NotImplementedError(
728
+ "Unrepresentable PEP 3118 data type {!r} ({})"
729
+ .format(stream.next, desc))
730
+ else:
731
+ raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
732
+
733
+ #
734
+ # Native alignment may require padding
735
+ #
736
+ # Here we assume that the presence of a '@' character implicitly implies
737
+ # that the start of the array is *already* aligned.
738
+ #
739
+ extra_offset = 0
740
+ if stream.byteorder == '@':
741
+ start_padding = (-offset) % align
742
+ intra_padding = (-value.itemsize) % align
743
+
744
+ offset += start_padding
745
+
746
+ if intra_padding != 0:
747
+ if itemsize > 1 or (shape is not None and _prod(shape) > 1):
748
+ # Inject internal padding to the end of the sub-item
749
+ value = _add_trailing_padding(value, intra_padding)
750
+ else:
751
+ # We can postpone the injection of internal padding,
752
+ # as the item appears at most once
753
+ extra_offset += intra_padding
754
+
755
+ # Update common alignment
756
+ common_alignment = _lcm(align, common_alignment)
757
+
758
+ # Convert itemsize to sub-array
759
+ if itemsize != 1:
760
+ value = dtype((value, (itemsize,)))
761
+
762
+ # Sub-arrays (2)
763
+ if shape is not None:
764
+ value = dtype((value, shape))
765
+
766
+ # Field name
767
+ if stream.consume(':'):
768
+ name = stream.consume_until(':')
769
+ else:
770
+ name = None
771
+
772
+ if not (is_padding and name is None):
773
+ if name is not None and name in field_spec['names']:
774
+ raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
775
+ field_spec['names'].append(name)
776
+ field_spec['formats'].append(value)
777
+ field_spec['offsets'].append(offset)
778
+
779
+ offset += value.itemsize
780
+ offset += extra_offset
781
+
782
+ field_spec['itemsize'] = offset
783
+
784
+ # extra final padding for aligned types
785
+ if stream.byteorder == '@':
786
+ field_spec['itemsize'] += (-offset) % common_alignment
787
+
788
+ # Check if this was a simple 1-item type, and unwrap it
789
+ if (field_spec['names'] == [None]
790
+ and field_spec['offsets'][0] == 0
791
+ and field_spec['itemsize'] == field_spec['formats'][0].itemsize
792
+ and not is_subdtype):
793
+ ret = field_spec['formats'][0]
794
+ else:
795
+ _fix_names(field_spec)
796
+ ret = dtype(field_spec)
797
+
798
+ # Finished
799
+ return ret, common_alignment
800
+
801
+ def _fix_names(field_spec):
802
+ """ Replace names which are None with the next unused f%d name """
803
+ names = field_spec['names']
804
+ for i, name in enumerate(names):
805
+ if name is not None:
806
+ continue
807
+
808
+ j = 0
809
+ while True:
810
+ name = f'f{j}'
811
+ if name not in names:
812
+ break
813
+ j = j + 1
814
+ names[i] = name
815
+
816
+ def _add_trailing_padding(value, padding):
817
+ """Inject the specified number of padding bytes at the end of a dtype"""
818
+ if value.fields is None:
819
+ field_spec = dict(
820
+ names=['f0'],
821
+ formats=[value],
822
+ offsets=[0],
823
+ itemsize=value.itemsize
824
+ )
825
+ else:
826
+ fields = value.fields
827
+ names = value.names
828
+ field_spec = dict(
829
+ names=names,
830
+ formats=[fields[name][0] for name in names],
831
+ offsets=[fields[name][1] for name in names],
832
+ itemsize=value.itemsize
833
+ )
834
+
835
+ field_spec['itemsize'] += padding
836
+ return dtype(field_spec)
837
+
838
+ def _prod(a):
839
+ p = 1
840
+ for x in a:
841
+ p *= x
842
+ return p
843
+
844
+ def _gcd(a, b):
845
+ """Calculate the greatest common divisor of a and b"""
846
+ while b:
847
+ a, b = b, a % b
848
+ return a
849
+
850
+ def _lcm(a, b):
851
+ return a // _gcd(a, b) * b
852
+
853
+ def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
854
+ """ Format the error message for when __array_ufunc__ gives up. """
855
+ args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
856
+ ['{}={!r}'.format(k, v)
857
+ for k, v in kwargs.items()])
858
+ args = inputs + kwargs.get('out', ())
859
+ types_string = ', '.join(repr(type(arg).__name__) for arg in args)
860
+ return ('operand type(s) all returned NotImplemented from '
861
+ '__array_ufunc__({!r}, {!r}, {}): {}'
862
+ .format(ufunc, method, args_string, types_string))
863
+
864
+
865
+ def array_function_errmsg_formatter(public_api, types):
866
+ """ Format the error message for when __array_ufunc__ gives up. """
867
+ func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
868
+ return ("no implementation found for '{}' on types that implement "
869
+ '__array_function__: {}'.format(func_name, list(types)))
870
+
871
+
872
+ def _ufunc_doc_signature_formatter(ufunc):
873
+ """
874
+ Builds a signature string which resembles PEP 457
875
+
876
+ This is used to construct the first line of the docstring
877
+ """
878
+
879
+ # input arguments are simple
880
+ if ufunc.nin == 1:
881
+ in_args = 'x'
882
+ else:
883
+ in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
884
+
885
+ # output arguments are both keyword or positional
886
+ if ufunc.nout == 0:
887
+ out_args = ', /, out=()'
888
+ elif ufunc.nout == 1:
889
+ out_args = ', /, out=None'
890
+ else:
891
+ out_args = '[, {positional}], / [, out={default}]'.format(
892
+ positional=', '.join(
893
+ 'out{}'.format(i+1) for i in range(ufunc.nout)),
894
+ default=repr((None,)*ufunc.nout)
895
+ )
896
+
897
+ # keyword only args depend on whether this is a gufunc
898
+ kwargs = (
899
+ ", casting='same_kind'"
900
+ ", order='K'"
901
+ ", dtype=None"
902
+ ", subok=True"
903
+ )
904
+
905
+ # NOTE: gufuncs may or may not support the `axis` parameter
906
+ if ufunc.signature is None:
907
+ kwargs = f", where=True{kwargs}[, signature, extobj]"
908
+ else:
909
+ kwargs += "[, signature, extobj, axes, axis]"
910
+
911
+ # join all the parts together
912
+ return '{name}({in_args}{out_args}, *{kwargs})'.format(
913
+ name=ufunc.__name__,
914
+ in_args=in_args,
915
+ out_args=out_args,
916
+ kwargs=kwargs
917
+ )
918
+
919
+
920
+ def npy_ctypes_check(cls):
921
+ # determine if a class comes from ctypes, in order to work around
922
+ # a bug in the buffer protocol for those objects, bpo-10746
923
+ try:
924
+ # ctypes class are new-style, so have an __mro__. This probably fails
925
+ # for ctypes classes with multiple inheritance.
926
+ if IS_PYPY:
927
+ # (..., _ctypes.basics._CData, Bufferable, object)
928
+ ctype_base = cls.__mro__[-3]
929
+ else:
930
+ # # (..., _ctypes._CData, object)
931
+ ctype_base = cls.__mro__[-2]
932
+ # right now, they're part of the _ctypes module
933
+ return '_ctypes' in ctype_base.__module__
934
+ except Exception:
935
+ return False
.venv/lib/python3.11/site-packages/numpy/core/_type_aliases.pyi ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import generic, signedinteger, unsignedinteger, floating, complexfloating
4
+
5
+ class _SCTypes(TypedDict):
6
+ int: list[type[signedinteger[Any]]]
7
+ uint: list[type[unsignedinteger[Any]]]
8
+ float: list[type[floating[Any]]]
9
+ complex: list[type[complexfloating[Any, Any]]]
10
+ others: list[type]
11
+
12
+ sctypeDict: dict[int | str, type[generic]]
13
+ sctypes: _SCTypes
.venv/lib/python3.11/site-packages/numpy/core/_umath_tests.cpython-311-x86_64-linux-gnu.so ADDED
Binary file (42.3 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/cversions.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simple script to compute the api hash of the current API.
2
+
3
+ The API has is defined by numpy_api_order and ufunc_api_order.
4
+
5
+ """
6
+ from os.path import dirname
7
+
8
+ from code_generators.genapi import fullapi_hash
9
+ from code_generators.numpy_api import full_api
10
+
11
+ if __name__ == '__main__':
12
+ curdir = dirname(__file__)
13
+ print(fullapi_hash(full_api))
.venv/lib/python3.11/site-packages/numpy/core/fromnumeric.py ADDED
The diff for this file is too large to render. See raw diff
 
.venv/lib/python3.11/site-packages/numpy/core/numerictypes.py ADDED
@@ -0,0 +1,689 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ numerictypes: Define the numeric type objects
3
+
4
+ This module is designed so "from numerictypes import \\*" is safe.
5
+ Exported symbols include:
6
+
7
+ Dictionary with all registered number types (including aliases):
8
+ sctypeDict
9
+
10
+ Type objects (not all will be available, depends on platform):
11
+ see variable sctypes for which ones you have
12
+
13
+ Bit-width names
14
+
15
+ int8 int16 int32 int64 int128
16
+ uint8 uint16 uint32 uint64 uint128
17
+ float16 float32 float64 float96 float128 float256
18
+ complex32 complex64 complex128 complex192 complex256 complex512
19
+ datetime64 timedelta64
20
+
21
+ c-based names
22
+
23
+ bool_
24
+
25
+ object_
26
+
27
+ void, str_, unicode_
28
+
29
+ byte, ubyte,
30
+ short, ushort
31
+ intc, uintc,
32
+ intp, uintp,
33
+ int_, uint,
34
+ longlong, ulonglong,
35
+
36
+ single, csingle,
37
+ float_, complex_,
38
+ longfloat, clongfloat,
39
+
40
+ As part of the type-hierarchy: xx -- is bit-width
41
+
42
+ generic
43
+ +-> bool_ (kind=b)
44
+ +-> number
45
+ | +-> integer
46
+ | | +-> signedinteger (intxx) (kind=i)
47
+ | | | byte
48
+ | | | short
49
+ | | | intc
50
+ | | | intp
51
+ | | | int_
52
+ | | | longlong
53
+ | | \\-> unsignedinteger (uintxx) (kind=u)
54
+ | | ubyte
55
+ | | ushort
56
+ | | uintc
57
+ | | uintp
58
+ | | uint_
59
+ | | ulonglong
60
+ | +-> inexact
61
+ | +-> floating (floatxx) (kind=f)
62
+ | | half
63
+ | | single
64
+ | | float_ (double)
65
+ | | longfloat
66
+ | \\-> complexfloating (complexxx) (kind=c)
67
+ | csingle (singlecomplex)
68
+ | complex_ (cfloat, cdouble)
69
+ | clongfloat (longcomplex)
70
+ +-> flexible
71
+ | +-> character
72
+ | | str_ (string_, bytes_) (kind=S) [Python 2]
73
+ | | unicode_ (kind=U) [Python 2]
74
+ | |
75
+ | | bytes_ (string_) (kind=S) [Python 3]
76
+ | | str_ (unicode_) (kind=U) [Python 3]
77
+ | |
78
+ | \\-> void (kind=V)
79
+ \\-> object_ (not used much) (kind=O)
80
+
81
+ """
82
+ import numbers
83
+ import warnings
84
+
85
+ from .multiarray import (
86
+ ndarray, array, dtype, datetime_data, datetime_as_string,
87
+ busday_offset, busday_count, is_busday, busdaycalendar
88
+ )
89
+ from .._utils import set_module
90
+
91
+ # we add more at the bottom
92
+ __all__ = ['sctypeDict', 'sctypes',
93
+ 'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
94
+ 'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
95
+ 'issubdtype', 'datetime_data', 'datetime_as_string',
96
+ 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
97
+ ]
98
+
99
+ # we don't need all these imports, but we need to keep them for compatibility
100
+ # for users using np.core.numerictypes.UPPER_TABLE
101
+ from ._string_helpers import (
102
+ english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE
103
+ )
104
+
105
+ from ._type_aliases import (
106
+ sctypeDict,
107
+ allTypes,
108
+ bitname,
109
+ sctypes,
110
+ _concrete_types,
111
+ _concrete_typeinfo,
112
+ _bits_of,
113
+ )
114
+ from ._dtype import _kind_name
115
+
116
+ # we don't export these for import *, but we do want them accessible
117
+ # as numerictypes.bool, etc.
118
+ from builtins import bool, int, float, complex, object, str, bytes
119
+ from numpy.compat import long, unicode
120
+
121
+
122
+ # We use this later
123
+ generic = allTypes['generic']
124
+
125
+ genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
126
+ 'int32', 'uint32', 'int64', 'uint64', 'int128',
127
+ 'uint128', 'float16',
128
+ 'float32', 'float64', 'float80', 'float96', 'float128',
129
+ 'float256',
130
+ 'complex32', 'complex64', 'complex128', 'complex160',
131
+ 'complex192', 'complex256', 'complex512', 'object']
132
+
133
+ @set_module('numpy')
134
+ def maximum_sctype(t):
135
+ """
136
+ Return the scalar type of highest precision of the same kind as the input.
137
+
138
+ Parameters
139
+ ----------
140
+ t : dtype or dtype specifier
141
+ The input data type. This can be a `dtype` object or an object that
142
+ is convertible to a `dtype`.
143
+
144
+ Returns
145
+ -------
146
+ out : dtype
147
+ The highest precision data type of the same kind (`dtype.kind`) as `t`.
148
+
149
+ See Also
150
+ --------
151
+ obj2sctype, mintypecode, sctype2char
152
+ dtype
153
+
154
+ Examples
155
+ --------
156
+ >>> np.maximum_sctype(int)
157
+ <class 'numpy.int64'>
158
+ >>> np.maximum_sctype(np.uint8)
159
+ <class 'numpy.uint64'>
160
+ >>> np.maximum_sctype(complex)
161
+ <class 'numpy.complex256'> # may vary
162
+
163
+ >>> np.maximum_sctype(str)
164
+ <class 'numpy.str_'>
165
+
166
+ >>> np.maximum_sctype('i2')
167
+ <class 'numpy.int64'>
168
+ >>> np.maximum_sctype('f4')
169
+ <class 'numpy.float128'> # may vary
170
+
171
+ """
172
+ g = obj2sctype(t)
173
+ if g is None:
174
+ return t
175
+ t = g
176
+ base = _kind_name(dtype(t))
177
+ if base in sctypes:
178
+ return sctypes[base][-1]
179
+ else:
180
+ return t
181
+
182
+
183
+ @set_module('numpy')
184
+ def issctype(rep):
185
+ """
186
+ Determines whether the given object represents a scalar data-type.
187
+
188
+ Parameters
189
+ ----------
190
+ rep : any
191
+ If `rep` is an instance of a scalar dtype, True is returned. If not,
192
+ False is returned.
193
+
194
+ Returns
195
+ -------
196
+ out : bool
197
+ Boolean result of check whether `rep` is a scalar dtype.
198
+
199
+ See Also
200
+ --------
201
+ issubsctype, issubdtype, obj2sctype, sctype2char
202
+
203
+ Examples
204
+ --------
205
+ >>> np.issctype(np.int32)
206
+ True
207
+ >>> np.issctype(list)
208
+ False
209
+ >>> np.issctype(1.1)
210
+ False
211
+
212
+ Strings are also a scalar type:
213
+
214
+ >>> np.issctype(np.dtype('str'))
215
+ True
216
+
217
+ """
218
+ if not isinstance(rep, (type, dtype)):
219
+ return False
220
+ try:
221
+ res = obj2sctype(rep)
222
+ if res and res != object_:
223
+ return True
224
+ return False
225
+ except Exception:
226
+ return False
227
+
228
+
229
+ @set_module('numpy')
230
+ def obj2sctype(rep, default=None):
231
+ """
232
+ Return the scalar dtype or NumPy equivalent of Python type of an object.
233
+
234
+ Parameters
235
+ ----------
236
+ rep : any
237
+ The object of which the type is returned.
238
+ default : any, optional
239
+ If given, this is returned for objects whose types can not be
240
+ determined. If not given, None is returned for those objects.
241
+
242
+ Returns
243
+ -------
244
+ dtype : dtype or Python type
245
+ The data type of `rep`.
246
+
247
+ See Also
248
+ --------
249
+ sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
250
+
251
+ Examples
252
+ --------
253
+ >>> np.obj2sctype(np.int32)
254
+ <class 'numpy.int32'>
255
+ >>> np.obj2sctype(np.array([1., 2.]))
256
+ <class 'numpy.float64'>
257
+ >>> np.obj2sctype(np.array([1.j]))
258
+ <class 'numpy.complex128'>
259
+
260
+ >>> np.obj2sctype(dict)
261
+ <class 'numpy.object_'>
262
+ >>> np.obj2sctype('string')
263
+
264
+ >>> np.obj2sctype(1, default=list)
265
+ <class 'list'>
266
+
267
+ """
268
+ # prevent abstract classes being upcast
269
+ if isinstance(rep, type) and issubclass(rep, generic):
270
+ return rep
271
+ # extract dtype from arrays
272
+ if isinstance(rep, ndarray):
273
+ return rep.dtype.type
274
+ # fall back on dtype to convert
275
+ try:
276
+ res = dtype(rep)
277
+ except Exception:
278
+ return default
279
+ else:
280
+ return res.type
281
+
282
+
283
+ @set_module('numpy')
284
+ def issubclass_(arg1, arg2):
285
+ """
286
+ Determine if a class is a subclass of a second class.
287
+
288
+ `issubclass_` is equivalent to the Python built-in ``issubclass``,
289
+ except that it returns False instead of raising a TypeError if one
290
+ of the arguments is not a class.
291
+
292
+ Parameters
293
+ ----------
294
+ arg1 : class
295
+ Input class. True is returned if `arg1` is a subclass of `arg2`.
296
+ arg2 : class or tuple of classes.
297
+ Input class. If a tuple of classes, True is returned if `arg1` is a
298
+ subclass of any of the tuple elements.
299
+
300
+ Returns
301
+ -------
302
+ out : bool
303
+ Whether `arg1` is a subclass of `arg2` or not.
304
+
305
+ See Also
306
+ --------
307
+ issubsctype, issubdtype, issctype
308
+
309
+ Examples
310
+ --------
311
+ >>> np.issubclass_(np.int32, int)
312
+ False
313
+ >>> np.issubclass_(np.int32, float)
314
+ False
315
+ >>> np.issubclass_(np.float64, float)
316
+ True
317
+
318
+ """
319
+ try:
320
+ return issubclass(arg1, arg2)
321
+ except TypeError:
322
+ return False
323
+
324
+
325
+ @set_module('numpy')
326
+ def issubsctype(arg1, arg2):
327
+ """
328
+ Determine if the first argument is a subclass of the second argument.
329
+
330
+ Parameters
331
+ ----------
332
+ arg1, arg2 : dtype or dtype specifier
333
+ Data-types.
334
+
335
+ Returns
336
+ -------
337
+ out : bool
338
+ The result.
339
+
340
+ See Also
341
+ --------
342
+ issctype, issubdtype, obj2sctype
343
+
344
+ Examples
345
+ --------
346
+ >>> np.issubsctype('S8', str)
347
+ False
348
+ >>> np.issubsctype(np.array([1]), int)
349
+ True
350
+ >>> np.issubsctype(np.array([1]), float)
351
+ False
352
+
353
+ """
354
+ return issubclass(obj2sctype(arg1), obj2sctype(arg2))
355
+
356
+
357
+ @set_module('numpy')
358
+ def issubdtype(arg1, arg2):
359
+ r"""
360
+ Returns True if first argument is a typecode lower/equal in type hierarchy.
361
+
362
+ This is like the builtin :func:`issubclass`, but for `dtype`\ s.
363
+
364
+ Parameters
365
+ ----------
366
+ arg1, arg2 : dtype_like
367
+ `dtype` or object coercible to one
368
+
369
+ Returns
370
+ -------
371
+ out : bool
372
+
373
+ See Also
374
+ --------
375
+ :ref:`arrays.scalars` : Overview of the numpy type hierarchy.
376
+ issubsctype, issubclass_
377
+
378
+ Examples
379
+ --------
380
+ `issubdtype` can be used to check the type of arrays:
381
+
382
+ >>> ints = np.array([1, 2, 3], dtype=np.int32)
383
+ >>> np.issubdtype(ints.dtype, np.integer)
384
+ True
385
+ >>> np.issubdtype(ints.dtype, np.floating)
386
+ False
387
+
388
+ >>> floats = np.array([1, 2, 3], dtype=np.float32)
389
+ >>> np.issubdtype(floats.dtype, np.integer)
390
+ False
391
+ >>> np.issubdtype(floats.dtype, np.floating)
392
+ True
393
+
394
+ Similar types of different sizes are not subdtypes of each other:
395
+
396
+ >>> np.issubdtype(np.float64, np.float32)
397
+ False
398
+ >>> np.issubdtype(np.float32, np.float64)
399
+ False
400
+
401
+ but both are subtypes of `floating`:
402
+
403
+ >>> np.issubdtype(np.float64, np.floating)
404
+ True
405
+ >>> np.issubdtype(np.float32, np.floating)
406
+ True
407
+
408
+ For convenience, dtype-like objects are allowed too:
409
+
410
+ >>> np.issubdtype('S1', np.string_)
411
+ True
412
+ >>> np.issubdtype('i4', np.signedinteger)
413
+ True
414
+
415
+ """
416
+ if not issubclass_(arg1, generic):
417
+ arg1 = dtype(arg1).type
418
+ if not issubclass_(arg2, generic):
419
+ arg2 = dtype(arg2).type
420
+
421
+ return issubclass(arg1, arg2)
422
+
423
+
424
+ # This dictionary allows look up based on any alias for an array data-type
425
+ class _typedict(dict):
426
+ """
427
+ Base object for a dictionary for look-up with any alias for an array dtype.
428
+
429
+ Instances of `_typedict` can not be used as dictionaries directly,
430
+ first they have to be populated.
431
+
432
+ """
433
+
434
+ def __getitem__(self, obj):
435
+ return dict.__getitem__(self, obj2sctype(obj))
436
+
437
+ nbytes = _typedict()
438
+ _alignment = _typedict()
439
+ _maxvals = _typedict()
440
+ _minvals = _typedict()
441
+ def _construct_lookups():
442
+ for name, info in _concrete_typeinfo.items():
443
+ obj = info.type
444
+ nbytes[obj] = info.bits // 8
445
+ _alignment[obj] = info.alignment
446
+ if len(info) > 5:
447
+ _maxvals[obj] = info.max
448
+ _minvals[obj] = info.min
449
+ else:
450
+ _maxvals[obj] = None
451
+ _minvals[obj] = None
452
+
453
+ _construct_lookups()
454
+
455
+
456
+ @set_module('numpy')
457
+ def sctype2char(sctype):
458
+ """
459
+ Return the string representation of a scalar dtype.
460
+
461
+ Parameters
462
+ ----------
463
+ sctype : scalar dtype or object
464
+ If a scalar dtype, the corresponding string character is
465
+ returned. If an object, `sctype2char` tries to infer its scalar type
466
+ and then return the corresponding string character.
467
+
468
+ Returns
469
+ -------
470
+ typechar : str
471
+ The string character corresponding to the scalar type.
472
+
473
+ Raises
474
+ ------
475
+ ValueError
476
+ If `sctype` is an object for which the type can not be inferred.
477
+
478
+ See Also
479
+ --------
480
+ obj2sctype, issctype, issubsctype, mintypecode
481
+
482
+ Examples
483
+ --------
484
+ >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]:
485
+ ... print(np.sctype2char(sctype))
486
+ l # may vary
487
+ d
488
+ D
489
+ S
490
+ O
491
+
492
+ >>> x = np.array([1., 2-1.j])
493
+ >>> np.sctype2char(x)
494
+ 'D'
495
+ >>> np.sctype2char(list)
496
+ 'O'
497
+
498
+ """
499
+ sctype = obj2sctype(sctype)
500
+ if sctype is None:
501
+ raise ValueError("unrecognized type")
502
+ if sctype not in _concrete_types:
503
+ # for compatibility
504
+ raise KeyError(sctype)
505
+ return dtype(sctype).char
506
+
507
+ # Create dictionary of casting functions that wrap sequences
508
+ # indexed by type or type character
509
+ cast = _typedict()
510
+ for key in _concrete_types:
511
+ cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
512
+
513
+
514
+ def _scalar_type_key(typ):
515
+ """A ``key`` function for `sorted`."""
516
+ dt = dtype(typ)
517
+ return (dt.kind.lower(), dt.itemsize)
518
+
519
+
520
+ ScalarType = [int, float, complex, bool, bytes, str, memoryview]
521
+ ScalarType += sorted(_concrete_types, key=_scalar_type_key)
522
+ ScalarType = tuple(ScalarType)
523
+
524
+
525
+ # Now add the types we've determined to this module
526
+ for key in allTypes:
527
+ globals()[key] = allTypes[key]
528
+ __all__.append(key)
529
+
530
+ del key
531
+
532
+ typecodes = {'Character':'c',
533
+ 'Integer':'bhilqp',
534
+ 'UnsignedInteger':'BHILQP',
535
+ 'Float':'efdg',
536
+ 'Complex':'FDG',
537
+ 'AllInteger':'bBhHiIlLqQpP',
538
+ 'AllFloat':'efdgFDG',
539
+ 'Datetime': 'Mm',
540
+ 'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
541
+
542
+ # backwards compatibility --- deprecated name
543
+ # Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py)
544
+ typeDict = sctypeDict
545
+
546
+ # b -> boolean
547
+ # u -> unsigned integer
548
+ # i -> signed integer
549
+ # f -> floating point
550
+ # c -> complex
551
+ # M -> datetime
552
+ # m -> timedelta
553
+ # S -> string
554
+ # U -> Unicode string
555
+ # V -> record
556
+ # O -> Python object
557
+ _kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
558
+
559
+ __test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
560
+ __len_test_types = len(__test_types)
561
+
562
+ # Keep incrementing until a common type both can be coerced to
563
+ # is found. Otherwise, return None
564
+ def _find_common_coerce(a, b):
565
+ if a > b:
566
+ return a
567
+ try:
568
+ thisind = __test_types.index(a.char)
569
+ except ValueError:
570
+ return None
571
+ return _can_coerce_all([a, b], start=thisind)
572
+
573
+ # Find a data-type that all data-types in a list can be coerced to
574
+ def _can_coerce_all(dtypelist, start=0):
575
+ N = len(dtypelist)
576
+ if N == 0:
577
+ return None
578
+ if N == 1:
579
+ return dtypelist[0]
580
+ thisind = start
581
+ while thisind < __len_test_types:
582
+ newdtype = dtype(__test_types[thisind])
583
+ numcoerce = len([x for x in dtypelist if newdtype >= x])
584
+ if numcoerce == N:
585
+ return newdtype
586
+ thisind += 1
587
+ return None
588
+
589
+ def _register_types():
590
+ numbers.Integral.register(integer)
591
+ numbers.Complex.register(inexact)
592
+ numbers.Real.register(floating)
593
+ numbers.Number.register(number)
594
+
595
+ _register_types()
596
+
597
+
598
+ @set_module('numpy')
599
+ def find_common_type(array_types, scalar_types):
600
+ """
601
+ Determine common type following standard coercion rules.
602
+
603
+ .. deprecated:: NumPy 1.25
604
+
605
+ This function is deprecated, use `numpy.promote_types` or
606
+ `numpy.result_type` instead. To achieve semantics for the
607
+ `scalar_types` argument, use `numpy.result_type` and pass the Python
608
+ values `0`, `0.0`, or `0j`.
609
+ This will give the same results in almost all cases.
610
+ More information and rare exception can be found in the
611
+ `NumPy 1.25 release notes <https://numpy.org/devdocs/release/1.25.0-notes.html>`_.
612
+
613
+ Parameters
614
+ ----------
615
+ array_types : sequence
616
+ A list of dtypes or dtype convertible objects representing arrays.
617
+ scalar_types : sequence
618
+ A list of dtypes or dtype convertible objects representing scalars.
619
+
620
+ Returns
621
+ -------
622
+ datatype : dtype
623
+ The common data type, which is the maximum of `array_types` ignoring
624
+ `scalar_types`, unless the maximum of `scalar_types` is of a
625
+ different kind (`dtype.kind`). If the kind is not understood, then
626
+ None is returned.
627
+
628
+ See Also
629
+ --------
630
+ dtype, common_type, can_cast, mintypecode
631
+
632
+ Examples
633
+ --------
634
+ >>> np.find_common_type([], [np.int64, np.float32, complex])
635
+ dtype('complex128')
636
+ >>> np.find_common_type([np.int64, np.float32], [])
637
+ dtype('float64')
638
+
639
+ The standard casting rules ensure that a scalar cannot up-cast an
640
+ array unless the scalar is of a fundamentally different kind of data
641
+ (i.e. under a different hierarchy in the data type hierarchy) then
642
+ the array:
643
+
644
+ >>> np.find_common_type([np.float32], [np.int64, np.float64])
645
+ dtype('float32')
646
+
647
+ Complex is of a different type, so it up-casts the float in the
648
+ `array_types` argument:
649
+
650
+ >>> np.find_common_type([np.float32], [complex])
651
+ dtype('complex128')
652
+
653
+ Type specifier strings are convertible to dtypes and can therefore
654
+ be used instead of dtypes:
655
+
656
+ >>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
657
+ dtype('complex128')
658
+
659
+ """
660
+ # Deprecated 2022-11-07, NumPy 1.25
661
+ warnings.warn(
662
+ "np.find_common_type is deprecated. Please use `np.result_type` "
663
+ "or `np.promote_types`.\n"
664
+ "See https://numpy.org/devdocs/release/1.25.0-notes.html and the "
665
+ "docs for more information. (Deprecated NumPy 1.25)",
666
+ DeprecationWarning, stacklevel=2)
667
+
668
+ array_types = [dtype(x) for x in array_types]
669
+ scalar_types = [dtype(x) for x in scalar_types]
670
+
671
+ maxa = _can_coerce_all(array_types)
672
+ maxsc = _can_coerce_all(scalar_types)
673
+
674
+ if maxa is None:
675
+ return maxsc
676
+
677
+ if maxsc is None:
678
+ return maxa
679
+
680
+ try:
681
+ index_a = _kind_list.index(maxa.kind)
682
+ index_sc = _kind_list.index(maxsc.kind)
683
+ except ValueError:
684
+ return None
685
+
686
+ if index_sc > index_a:
687
+ return _find_common_coerce(maxsc, maxa)
688
+ else:
689
+ return maxa
.venv/lib/python3.11/site-packages/numpy/core/overrides.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implementation of __array_function__ overrides from NEP-18."""
2
+ import collections
3
+ import functools
4
+ import os
5
+
6
+ from .._utils import set_module
7
+ from .._utils._inspect import getargspec
8
+ from numpy.core._multiarray_umath import (
9
+ add_docstring, _get_implementing_args, _ArrayFunctionDispatcher)
10
+
11
+
12
+ ARRAY_FUNCTIONS = set()
13
+
14
+ array_function_like_doc = (
15
+ """like : array_like, optional
16
+ Reference object to allow the creation of arrays which are not
17
+ NumPy arrays. If an array-like passed in as ``like`` supports
18
+ the ``__array_function__`` protocol, the result will be defined
19
+ by it. In this case, it ensures the creation of an array object
20
+ compatible with that passed in via this argument."""
21
+ )
22
+
23
+ def set_array_function_like_doc(public_api):
24
+ if public_api.__doc__ is not None:
25
+ public_api.__doc__ = public_api.__doc__.replace(
26
+ "${ARRAY_FUNCTION_LIKE}",
27
+ array_function_like_doc,
28
+ )
29
+ return public_api
30
+
31
+
32
+ add_docstring(
33
+ _ArrayFunctionDispatcher,
34
+ """
35
+ Class to wrap functions with checks for __array_function__ overrides.
36
+
37
+ All arguments are required, and can only be passed by position.
38
+
39
+ Parameters
40
+ ----------
41
+ dispatcher : function or None
42
+ The dispatcher function that returns a single sequence-like object
43
+ of all arguments relevant. It must have the same signature (except
44
+ the default values) as the actual implementation.
45
+ If ``None``, this is a ``like=`` dispatcher and the
46
+ ``_ArrayFunctionDispatcher`` must be called with ``like`` as the
47
+ first (additional and positional) argument.
48
+ implementation : function
49
+ Function that implements the operation on NumPy arrays without
50
+ overrides. Arguments passed calling the ``_ArrayFunctionDispatcher``
51
+ will be forwarded to this (and the ``dispatcher``) as if using
52
+ ``*args, **kwargs``.
53
+
54
+ Attributes
55
+ ----------
56
+ _implementation : function
57
+ The original implementation passed in.
58
+ """)
59
+
60
+
61
+ # exposed for testing purposes; used internally by _ArrayFunctionDispatcher
62
+ add_docstring(
63
+ _get_implementing_args,
64
+ """
65
+ Collect arguments on which to call __array_function__.
66
+
67
+ Parameters
68
+ ----------
69
+ relevant_args : iterable of array-like
70
+ Iterable of possibly array-like arguments to check for
71
+ __array_function__ methods.
72
+
73
+ Returns
74
+ -------
75
+ Sequence of arguments with __array_function__ methods, in the order in
76
+ which they should be called.
77
+ """)
78
+
79
+
80
+ ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
81
+
82
+
83
+ def verify_matching_signatures(implementation, dispatcher):
84
+ """Verify that a dispatcher function has the right signature."""
85
+ implementation_spec = ArgSpec(*getargspec(implementation))
86
+ dispatcher_spec = ArgSpec(*getargspec(dispatcher))
87
+
88
+ if (implementation_spec.args != dispatcher_spec.args or
89
+ implementation_spec.varargs != dispatcher_spec.varargs or
90
+ implementation_spec.keywords != dispatcher_spec.keywords or
91
+ (bool(implementation_spec.defaults) !=
92
+ bool(dispatcher_spec.defaults)) or
93
+ (implementation_spec.defaults is not None and
94
+ len(implementation_spec.defaults) !=
95
+ len(dispatcher_spec.defaults))):
96
+ raise RuntimeError('implementation and dispatcher for %s have '
97
+ 'different function signatures' % implementation)
98
+
99
+ if implementation_spec.defaults is not None:
100
+ if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
101
+ raise RuntimeError('dispatcher functions can only use None for '
102
+ 'default argument values')
103
+
104
+
105
+ def array_function_dispatch(dispatcher=None, module=None, verify=True,
106
+ docs_from_dispatcher=False):
107
+ """Decorator for adding dispatch with the __array_function__ protocol.
108
+
109
+ See NEP-18 for example usage.
110
+
111
+ Parameters
112
+ ----------
113
+ dispatcher : callable or None
114
+ Function that when called like ``dispatcher(*args, **kwargs)`` with
115
+ arguments from the NumPy function call returns an iterable of
116
+ array-like arguments to check for ``__array_function__``.
117
+
118
+ If `None`, the first argument is used as the single `like=` argument
119
+ and not passed on. A function implementing `like=` must call its
120
+ dispatcher with `like` as the first non-keyword argument.
121
+ module : str, optional
122
+ __module__ attribute to set on new function, e.g., ``module='numpy'``.
123
+ By default, module is copied from the decorated function.
124
+ verify : bool, optional
125
+ If True, verify the that the signature of the dispatcher and decorated
126
+ function signatures match exactly: all required and optional arguments
127
+ should appear in order with the same names, but the default values for
128
+ all optional arguments should be ``None``. Only disable verification
129
+ if the dispatcher's signature needs to deviate for some particular
130
+ reason, e.g., because the function has a signature like
131
+ ``func(*args, **kwargs)``.
132
+ docs_from_dispatcher : bool, optional
133
+ If True, copy docs from the dispatcher function onto the dispatched
134
+ function, rather than from the implementation. This is useful for
135
+ functions defined in C, which otherwise don't have docstrings.
136
+
137
+ Returns
138
+ -------
139
+ Function suitable for decorating the implementation of a NumPy function.
140
+
141
+ """
142
+ def decorator(implementation):
143
+ if verify:
144
+ if dispatcher is not None:
145
+ verify_matching_signatures(implementation, dispatcher)
146
+ else:
147
+ # Using __code__ directly similar to verify_matching_signature
148
+ co = implementation.__code__
149
+ last_arg = co.co_argcount + co.co_kwonlyargcount - 1
150
+ last_arg = co.co_varnames[last_arg]
151
+ if last_arg != "like" or co.co_kwonlyargcount == 0:
152
+ raise RuntimeError(
153
+ "__array_function__ expects `like=` to be the last "
154
+ "argument and a keyword-only argument. "
155
+ f"{implementation} does not seem to comply.")
156
+
157
+ if docs_from_dispatcher:
158
+ add_docstring(implementation, dispatcher.__doc__)
159
+
160
+ public_api = _ArrayFunctionDispatcher(dispatcher, implementation)
161
+ public_api = functools.wraps(implementation)(public_api)
162
+
163
+ if module is not None:
164
+ public_api.__module__ = module
165
+
166
+ ARRAY_FUNCTIONS.add(public_api)
167
+
168
+ return public_api
169
+
170
+ return decorator
171
+
172
+
173
+ def array_function_from_dispatcher(
174
+ implementation, module=None, verify=True, docs_from_dispatcher=True):
175
+ """Like array_function_dispatcher, but with function arguments flipped."""
176
+
177
+ def decorator(dispatcher):
178
+ return array_function_dispatch(
179
+ dispatcher, module, verify=verify,
180
+ docs_from_dispatcher=docs_from_dispatcher)(implementation)
181
+ return decorator
.venv/lib/python3.11/site-packages/numpy/fft/__pycache__/_pocketfft.cpython-311.pyc ADDED
Binary file (57.1 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/fft/tests/__init__.py ADDED
File without changes
.venv/lib/python3.11/site-packages/numpy/fft/tests/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (188 Bytes). View file
 
.venv/lib/python3.11/site-packages/numpy/fft/tests/__pycache__/test_helper.cpython-311.pyc ADDED
Binary file (10.4 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/fft/tests/__pycache__/test_pocketfft.cpython-311.pyc ADDED
Binary file (30.3 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/fft/tests/test_helper.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test functions for fftpack.helper module
2
+
3
+ Copied from fftpack.helper by Pearu Peterson, October 2005
4
+
5
+ """
6
+ import numpy as np
7
+ from numpy.testing import assert_array_almost_equal
8
+ from numpy import fft, pi
9
+
10
+
11
+ class TestFFTShift:
12
+
13
+ def test_definition(self):
14
+ x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
15
+ y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
16
+ assert_array_almost_equal(fft.fftshift(x), y)
17
+ assert_array_almost_equal(fft.ifftshift(y), x)
18
+ x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
19
+ y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
20
+ assert_array_almost_equal(fft.fftshift(x), y)
21
+ assert_array_almost_equal(fft.ifftshift(y), x)
22
+
23
+ def test_inverse(self):
24
+ for n in [1, 4, 9, 100, 211]:
25
+ x = np.random.random((n,))
26
+ assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
27
+
28
+ def test_axes_keyword(self):
29
+ freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
30
+ shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
31
+ assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
32
+ assert_array_almost_equal(fft.fftshift(freqs, axes=0),
33
+ fft.fftshift(freqs, axes=(0,)))
34
+ assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
35
+ assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
36
+ fft.ifftshift(shifted, axes=(0,)))
37
+
38
+ assert_array_almost_equal(fft.fftshift(freqs), shifted)
39
+ assert_array_almost_equal(fft.ifftshift(shifted), freqs)
40
+
41
+ def test_uneven_dims(self):
42
+ """ Test 2D input, which has uneven dimension sizes """
43
+ freqs = [
44
+ [0, 1],
45
+ [2, 3],
46
+ [4, 5]
47
+ ]
48
+
49
+ # shift in dimension 0
50
+ shift_dim0 = [
51
+ [4, 5],
52
+ [0, 1],
53
+ [2, 3]
54
+ ]
55
+ assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)
56
+ assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)
57
+ assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)
58
+ assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)
59
+
60
+ # shift in dimension 1
61
+ shift_dim1 = [
62
+ [1, 0],
63
+ [3, 2],
64
+ [5, 4]
65
+ ]
66
+ assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)
67
+ assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)
68
+
69
+ # shift in both dimensions
70
+ shift_dim_both = [
71
+ [5, 4],
72
+ [1, 0],
73
+ [3, 2]
74
+ ]
75
+ assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
76
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
77
+ assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
78
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
79
+
80
+ # axes=None (default) shift in all dimensions
81
+ assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)
82
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)
83
+ assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)
84
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)
85
+
86
+ def test_equal_to_original(self):
87
+ """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
88
+ from numpy.core import asarray, concatenate, arange, take
89
+
90
+ def original_fftshift(x, axes=None):
91
+ """ How fftshift was implemented in v1.14"""
92
+ tmp = asarray(x)
93
+ ndim = tmp.ndim
94
+ if axes is None:
95
+ axes = list(range(ndim))
96
+ elif isinstance(axes, int):
97
+ axes = (axes,)
98
+ y = tmp
99
+ for k in axes:
100
+ n = tmp.shape[k]
101
+ p2 = (n + 1) // 2
102
+ mylist = concatenate((arange(p2, n), arange(p2)))
103
+ y = take(y, mylist, k)
104
+ return y
105
+
106
+ def original_ifftshift(x, axes=None):
107
+ """ How ifftshift was implemented in v1.14 """
108
+ tmp = asarray(x)
109
+ ndim = tmp.ndim
110
+ if axes is None:
111
+ axes = list(range(ndim))
112
+ elif isinstance(axes, int):
113
+ axes = (axes,)
114
+ y = tmp
115
+ for k in axes:
116
+ n = tmp.shape[k]
117
+ p2 = n - (n + 1) // 2
118
+ mylist = concatenate((arange(p2, n), arange(p2)))
119
+ y = take(y, mylist, k)
120
+ return y
121
+
122
+ # create possible 2d array combinations and try all possible keywords
123
+ # compare output to original functions
124
+ for i in range(16):
125
+ for j in range(16):
126
+ for axes_keyword in [0, 1, None, (0,), (0, 1)]:
127
+ inp = np.random.rand(i, j)
128
+
129
+ assert_array_almost_equal(fft.fftshift(inp, axes_keyword),
130
+ original_fftshift(inp, axes_keyword))
131
+
132
+ assert_array_almost_equal(fft.ifftshift(inp, axes_keyword),
133
+ original_ifftshift(inp, axes_keyword))
134
+
135
+
136
+ class TestFFTFreq:
137
+
138
+ def test_definition(self):
139
+ x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
140
+ assert_array_almost_equal(9*fft.fftfreq(9), x)
141
+ assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
142
+ x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
143
+ assert_array_almost_equal(10*fft.fftfreq(10), x)
144
+ assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
145
+
146
+
147
+ class TestRFFTFreq:
148
+
149
+ def test_definition(self):
150
+ x = [0, 1, 2, 3, 4]
151
+ assert_array_almost_equal(9*fft.rfftfreq(9), x)
152
+ assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
153
+ x = [0, 1, 2, 3, 4, 5]
154
+ assert_array_almost_equal(10*fft.rfftfreq(10), x)
155
+ assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
156
+
157
+
158
+ class TestIRFFTN:
159
+
160
+ def test_not_last_axis_success(self):
161
+ ar, ai = np.random.random((2, 16, 8, 32))
162
+ a = ar + 1j*ai
163
+
164
+ axes = (-2,)
165
+
166
+ # Should not raise error
167
+ fft.irfftn(a, axes=axes)
.venv/lib/python3.11/site-packages/numpy/random/LICENSE.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **This software is dual-licensed under the The University of Illinois/NCSA
2
+ Open Source License (NCSA) and The 3-Clause BSD License**
3
+
4
+ # NCSA Open Source License
5
+ **Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
6
+
7
+ Developed by: Kevin Sheppard (<kevin.sheppard@economics.ox.ac.uk>,
8
+ <kevin.k.sheppard@gmail.com>)
9
+ [http://www.kevinsheppard.com](http://www.kevinsheppard.com)
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
12
+ this software and associated documentation files (the "Software"), to deal with
13
+ the Software without restriction, including without limitation the rights to
14
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
15
+ of the Software, and to permit persons to whom the Software is furnished to do
16
+ so, subject to the following conditions:
17
+
18
+ Redistributions of source code must retain the above copyright notice, this
19
+ list of conditions and the following disclaimers.
20
+
21
+ Redistributions in binary form must reproduce the above copyright notice, this
22
+ list of conditions and the following disclaimers in the documentation and/or
23
+ other materials provided with the distribution.
24
+
25
+ Neither the names of Kevin Sheppard, nor the names of any contributors may be
26
+ used to endorse or promote products derived from this Software without specific
27
+ prior written permission.
28
+
29
+ **THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32
+ CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
34
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
35
+ THE SOFTWARE.**
36
+
37
+
38
+ # 3-Clause BSD License
39
+ **Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
40
+
41
+ Redistribution and use in source and binary forms, with or without
42
+ modification, are permitted provided that the following conditions are met:
43
+
44
+ 1. Redistributions of source code must retain the above copyright notice,
45
+ this list of conditions and the following disclaimer.
46
+
47
+ 2. Redistributions in binary form must reproduce the above copyright notice,
48
+ this list of conditions and the following disclaimer in the documentation
49
+ and/or other materials provided with the distribution.
50
+
51
+ 3. Neither the name of the copyright holder nor the names of its contributors
52
+ may be used to endorse or promote products derived from this software
53
+ without specific prior written permission.
54
+
55
+ **THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
56
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
59
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
60
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
61
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
62
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
63
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
64
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
65
+ THE POSSIBILITY OF SUCH DAMAGE.**
66
+
67
+ # Components
68
+
69
+ Many parts of this module have been derived from original sources,
70
+ often the algorithm's designer. Component licenses are located with
71
+ the component code.
.venv/lib/python3.11/site-packages/numpy/random/__init__.pxd ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport numpy as np
2
+ from libc.stdint cimport uint32_t, uint64_t
3
+
4
+ cdef extern from "numpy/random/bitgen.h":
5
+ struct bitgen:
6
+ void *state
7
+ uint64_t (*next_uint64)(void *st) nogil
8
+ uint32_t (*next_uint32)(void *st) nogil
9
+ double (*next_double)(void *st) nogil
10
+ uint64_t (*next_raw)(void *st) nogil
11
+
12
+ ctypedef bitgen bitgen_t
13
+
14
+ from numpy.random.bit_generator cimport BitGenerator, SeedSequence
.venv/lib/python3.11/site-packages/numpy/random/__init__.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ========================
3
+ Random Number Generation
4
+ ========================
5
+
6
+ Use ``default_rng()`` to create a `Generator` and call its methods.
7
+
8
+ =============== =========================================================
9
+ Generator
10
+ --------------- ---------------------------------------------------------
11
+ Generator Class implementing all of the random number distributions
12
+ default_rng Default constructor for ``Generator``
13
+ =============== =========================================================
14
+
15
+ ============================================= ===
16
+ BitGenerator Streams that work with Generator
17
+ --------------------------------------------- ---
18
+ MT19937
19
+ PCG64
20
+ PCG64DXSM
21
+ Philox
22
+ SFC64
23
+ ============================================= ===
24
+
25
+ ============================================= ===
26
+ Getting entropy to initialize a BitGenerator
27
+ --------------------------------------------- ---
28
+ SeedSequence
29
+ ============================================= ===
30
+
31
+
32
+ Legacy
33
+ ------
34
+
35
+ For backwards compatibility with previous versions of numpy before 1.17, the
36
+ various aliases to the global `RandomState` methods are left alone and do not
37
+ use the new `Generator` API.
38
+
39
+ ==================== =========================================================
40
+ Utility functions
41
+ -------------------- ---------------------------------------------------------
42
+ random Uniformly distributed floats over ``[0, 1)``
43
+ bytes Uniformly distributed random bytes.
44
+ permutation Randomly permute a sequence / generate a random sequence.
45
+ shuffle Randomly permute a sequence in place.
46
+ choice Random sample from 1-D array.
47
+ ==================== =========================================================
48
+
49
+ ==================== =========================================================
50
+ Compatibility
51
+ functions - removed
52
+ in the new API
53
+ -------------------- ---------------------------------------------------------
54
+ rand Uniformly distributed values.
55
+ randn Normally distributed values.
56
+ ranf Uniformly distributed floating point numbers.
57
+ random_integers Uniformly distributed integers in a given range.
58
+ (deprecated, use ``integers(..., closed=True)`` instead)
59
+ random_sample Alias for `random_sample`
60
+ randint Uniformly distributed integers in a given range
61
+ seed Seed the legacy random number generator.
62
+ ==================== =========================================================
63
+
64
+ ==================== =========================================================
65
+ Univariate
66
+ distributions
67
+ -------------------- ---------------------------------------------------------
68
+ beta Beta distribution over ``[0, 1]``.
69
+ binomial Binomial distribution.
70
+ chisquare :math:`\\chi^2` distribution.
71
+ exponential Exponential distribution.
72
+ f F (Fisher-Snedecor) distribution.
73
+ gamma Gamma distribution.
74
+ geometric Geometric distribution.
75
+ gumbel Gumbel distribution.
76
+ hypergeometric Hypergeometric distribution.
77
+ laplace Laplace distribution.
78
+ logistic Logistic distribution.
79
+ lognormal Log-normal distribution.
80
+ logseries Logarithmic series distribution.
81
+ negative_binomial Negative binomial distribution.
82
+ noncentral_chisquare Non-central chi-square distribution.
83
+ noncentral_f Non-central F distribution.
84
+ normal Normal / Gaussian distribution.
85
+ pareto Pareto distribution.
86
+ poisson Poisson distribution.
87
+ power Power distribution.
88
+ rayleigh Rayleigh distribution.
89
+ triangular Triangular distribution.
90
+ uniform Uniform distribution.
91
+ vonmises Von Mises circular distribution.
92
+ wald Wald (inverse Gaussian) distribution.
93
+ weibull Weibull distribution.
94
+ zipf Zipf's distribution over ranked data.
95
+ ==================== =========================================================
96
+
97
+ ==================== ==========================================================
98
+ Multivariate
99
+ distributions
100
+ -------------------- ----------------------------------------------------------
101
+ dirichlet Multivariate generalization of Beta distribution.
102
+ multinomial Multivariate generalization of the binomial distribution.
103
+ multivariate_normal Multivariate generalization of the normal distribution.
104
+ ==================== ==========================================================
105
+
106
+ ==================== =========================================================
107
+ Standard
108
+ distributions
109
+ -------------------- ---------------------------------------------------------
110
+ standard_cauchy Standard Cauchy-Lorentz distribution.
111
+ standard_exponential Standard exponential distribution.
112
+ standard_gamma Standard Gamma distribution.
113
+ standard_normal Standard normal distribution.
114
+ standard_t Standard Student's t-distribution.
115
+ ==================== =========================================================
116
+
117
+ ==================== =========================================================
118
+ Internal functions
119
+ -------------------- ---------------------------------------------------------
120
+ get_state Get tuple representing internal state of generator.
121
+ set_state Set state of generator.
122
+ ==================== =========================================================
123
+
124
+
125
+ """
126
+ __all__ = [
127
+ 'beta',
128
+ 'binomial',
129
+ 'bytes',
130
+ 'chisquare',
131
+ 'choice',
132
+ 'dirichlet',
133
+ 'exponential',
134
+ 'f',
135
+ 'gamma',
136
+ 'geometric',
137
+ 'get_state',
138
+ 'gumbel',
139
+ 'hypergeometric',
140
+ 'laplace',
141
+ 'logistic',
142
+ 'lognormal',
143
+ 'logseries',
144
+ 'multinomial',
145
+ 'multivariate_normal',
146
+ 'negative_binomial',
147
+ 'noncentral_chisquare',
148
+ 'noncentral_f',
149
+ 'normal',
150
+ 'pareto',
151
+ 'permutation',
152
+ 'poisson',
153
+ 'power',
154
+ 'rand',
155
+ 'randint',
156
+ 'randn',
157
+ 'random',
158
+ 'random_integers',
159
+ 'random_sample',
160
+ 'ranf',
161
+ 'rayleigh',
162
+ 'sample',
163
+ 'seed',
164
+ 'set_state',
165
+ 'shuffle',
166
+ 'standard_cauchy',
167
+ 'standard_exponential',
168
+ 'standard_gamma',
169
+ 'standard_normal',
170
+ 'standard_t',
171
+ 'triangular',
172
+ 'uniform',
173
+ 'vonmises',
174
+ 'wald',
175
+ 'weibull',
176
+ 'zipf',
177
+ ]
178
+
179
+ # add these for module-freeze analysis (like PyInstaller)
180
+ from . import _pickle
181
+ from . import _common
182
+ from . import _bounded_integers
183
+
184
+ from ._generator import Generator, default_rng
185
+ from .bit_generator import SeedSequence, BitGenerator
186
+ from ._mt19937 import MT19937
187
+ from ._pcg64 import PCG64, PCG64DXSM
188
+ from ._philox import Philox
189
+ from ._sfc64 import SFC64
190
+ from .mtrand import *
191
+
192
+ __all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
193
+ 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng',
194
+ 'BitGenerator']
195
+
196
+
197
+ def __RandomState_ctor():
198
+ """Return a RandomState instance.
199
+
200
+ This function exists solely to assist (un)pickling.
201
+
202
+ Note that the state of the RandomState returned here is irrelevant, as this
203
+ function's entire purpose is to return a newly allocated RandomState whose
204
+ state pickle can set. Consequently the RandomState returned by this function
205
+ is a freshly allocated copy with a seed=0.
206
+
207
+ See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
208
+
209
+ """
210
+ return RandomState(seed=0)
211
+
212
+
213
+ from numpy._pytesttester import PytestTester
214
+ test = PytestTester(__name__)
215
+ del PytestTester
.venv/lib/python3.11/site-packages/numpy/random/__init__.pyi ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy._pytesttester import PytestTester
2
+
3
+ from numpy.random._generator import Generator as Generator
4
+ from numpy.random._generator import default_rng as default_rng
5
+ from numpy.random._mt19937 import MT19937 as MT19937
6
+ from numpy.random._pcg64 import (
7
+ PCG64 as PCG64,
8
+ PCG64DXSM as PCG64DXSM,
9
+ )
10
+ from numpy.random._philox import Philox as Philox
11
+ from numpy.random._sfc64 import SFC64 as SFC64
12
+ from numpy.random.bit_generator import BitGenerator as BitGenerator
13
+ from numpy.random.bit_generator import SeedSequence as SeedSequence
14
+ from numpy.random.mtrand import (
15
+ RandomState as RandomState,
16
+ beta as beta,
17
+ binomial as binomial,
18
+ bytes as bytes,
19
+ chisquare as chisquare,
20
+ choice as choice,
21
+ dirichlet as dirichlet,
22
+ exponential as exponential,
23
+ f as f,
24
+ gamma as gamma,
25
+ geometric as geometric,
26
+ get_bit_generator as get_bit_generator,
27
+ get_state as get_state,
28
+ gumbel as gumbel,
29
+ hypergeometric as hypergeometric,
30
+ laplace as laplace,
31
+ logistic as logistic,
32
+ lognormal as lognormal,
33
+ logseries as logseries,
34
+ multinomial as multinomial,
35
+ multivariate_normal as multivariate_normal,
36
+ negative_binomial as negative_binomial,
37
+ noncentral_chisquare as noncentral_chisquare,
38
+ noncentral_f as noncentral_f,
39
+ normal as normal,
40
+ pareto as pareto,
41
+ permutation as permutation,
42
+ poisson as poisson,
43
+ power as power,
44
+ rand as rand,
45
+ randint as randint,
46
+ randn as randn,
47
+ random as random,
48
+ random_integers as random_integers,
49
+ random_sample as random_sample,
50
+ ranf as ranf,
51
+ rayleigh as rayleigh,
52
+ sample as sample,
53
+ seed as seed,
54
+ set_bit_generator as set_bit_generator,
55
+ set_state as set_state,
56
+ shuffle as shuffle,
57
+ standard_cauchy as standard_cauchy,
58
+ standard_exponential as standard_exponential,
59
+ standard_gamma as standard_gamma,
60
+ standard_normal as standard_normal,
61
+ standard_t as standard_t,
62
+ triangular as triangular,
63
+ uniform as uniform,
64
+ vonmises as vonmises,
65
+ wald as wald,
66
+ weibull as weibull,
67
+ zipf as zipf,
68
+ )
69
+
70
+ __all__: list[str]
71
+ __path__: list[str]
72
+ test: PytestTester
.venv/lib/python3.11/site-packages/numpy/random/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (7.76 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/random/__pycache__/_pickle.cpython-311.pyc ADDED
Binary file (2.72 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/random/_bounded_integers.pxd ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
2
+ int8_t, int16_t, int32_t, int64_t, intptr_t)
3
+ import numpy as np
4
+ cimport numpy as np
5
+ ctypedef np.npy_bool bool_t
6
+
7
+ from numpy.random cimport bitgen_t
8
+
9
+ cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
10
+ """Mask generator for use in bounded random numbers"""
11
+ # Smallest bit mask >= max
12
+ cdef uint64_t mask = max_val
13
+ mask |= mask >> 1
14
+ mask |= mask >> 2
15
+ mask |= mask >> 4
16
+ mask |= mask >> 8
17
+ mask |= mask >> 16
18
+ mask |= mask >> 32
19
+ return mask
20
+
21
+ cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
22
+ cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
23
+ cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
24
+ cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
25
+ cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
26
+ cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
27
+ cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
28
+ cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
29
+ cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
.venv/lib/python3.11/site-packages/numpy/random/_common.pxd ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #cython: language_level=3
2
+
3
+ from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
4
+
5
+ import numpy as np
6
+ cimport numpy as np
7
+
8
+ from numpy.random cimport bitgen_t
9
+
10
+ cdef double POISSON_LAM_MAX
11
+ cdef double LEGACY_POISSON_LAM_MAX
12
+ cdef uint64_t MAXSIZE
13
+
14
+ cdef enum ConstraintType:
15
+ CONS_NONE
16
+ CONS_NON_NEGATIVE
17
+ CONS_POSITIVE
18
+ CONS_POSITIVE_NOT_NAN
19
+ CONS_BOUNDED_0_1
20
+ CONS_BOUNDED_GT_0_1
21
+ CONS_BOUNDED_LT_0_1
22
+ CONS_GT_1
23
+ CONS_GTE_1
24
+ CONS_POISSON
25
+ LEGACY_CONS_POISSON
26
+
27
+ ctypedef ConstraintType constraint_type
28
+
29
+ cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method)
30
+ cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output)
31
+ cdef object prepare_cffi(bitgen_t *bitgen)
32
+ cdef object prepare_ctypes(bitgen_t *bitgen)
33
+ cdef int check_constraint(double val, object name, constraint_type cons) except -1
34
+ cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
35
+
36
+ cdef extern from "include/aligned_malloc.h":
37
+ cdef void *PyArray_realloc_aligned(void *p, size_t n)
38
+ cdef void *PyArray_malloc_aligned(size_t n)
39
+ cdef void *PyArray_calloc_aligned(size_t n, size_t s)
40
+ cdef void PyArray_free_aligned(void *p)
41
+
42
+ ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil
43
+ ctypedef double (*random_double_0)(void *state) noexcept nogil
44
+ ctypedef double (*random_double_1)(void *state, double a) noexcept nogil
45
+ ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil
46
+ ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil
47
+
48
+ ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil
49
+ ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil
50
+ ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil
51
+
52
+ ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil
53
+ ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil
54
+ ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil
55
+ ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil
56
+ ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil
57
+ ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil
58
+
59
+ ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil
60
+ ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil
61
+
62
+ ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil
63
+ ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil
64
+
65
+ cdef double kahan_sum(double *darr, np.npy_intp n) noexcept
66
+
67
+ cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil:
68
+ return (rnd >> 11) * (1.0 / 9007199254740992.0)
69
+
70
+ cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
71
+
72
+ cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out)
73
+
74
+ cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out)
75
+
76
+ cdef object wrap_int(object val, object bits)
77
+
78
+ cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
79
+
80
+ cdef validate_output_shape(iter_shape, np.ndarray output)
81
+
82
+ cdef object cont(void *func, void *state, object size, object lock, int narg,
83
+ object a, object a_name, constraint_type a_constraint,
84
+ object b, object b_name, constraint_type b_constraint,
85
+ object c, object c_name, constraint_type c_constraint,
86
+ object out)
87
+
88
+ cdef object disc(void *func, void *state, object size, object lock,
89
+ int narg_double, int narg_int64,
90
+ object a, object a_name, constraint_type a_constraint,
91
+ object b, object b_name, constraint_type b_constraint,
92
+ object c, object c_name, constraint_type c_constraint)
93
+
94
+ cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
95
+ object a, object a_name, constraint_type a_constraint,
96
+ object out)
97
+
98
+ cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
99
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
100
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
101
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
102
+
103
+ cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
104
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
105
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
106
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
.venv/lib/python3.11/site-packages/numpy/random/_examples/cffi/__pycache__/extending.cpython-311.pyc ADDED
Binary file (1.71 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/random/_examples/cffi/__pycache__/parse.cpython-311.pyc ADDED
Binary file (2.75 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/random/_examples/cffi/extending.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Use cffi to access any of the underlying C functions from distributions.h
3
+ """
4
+ import os
5
+ import numpy as np
6
+ import cffi
7
+ from .parse import parse_distributions_h
8
+ ffi = cffi.FFI()
9
+
10
+ inc_dir = os.path.join(np.get_include(), 'numpy')
11
+
12
+ # Basic numpy types
13
+ ffi.cdef('''
14
+ typedef intptr_t npy_intp;
15
+ typedef unsigned char npy_bool;
16
+
17
+ ''')
18
+
19
+ parse_distributions_h(ffi, inc_dir)
20
+
21
+ lib = ffi.dlopen(np.random._generator.__file__)
22
+
23
+ # Compare the distributions.h random_standard_normal_fill to
24
+ # Generator.standard_random
25
+ bit_gen = np.random.PCG64()
26
+ rng = np.random.Generator(bit_gen)
27
+ state = bit_gen.state
28
+
29
+ interface = rng.bit_generator.cffi
30
+ n = 100
31
+ vals_cffi = ffi.new('double[%d]' % n)
32
+ lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi)
33
+
34
+ # reset the state
35
+ bit_gen.state = state
36
+
37
+ vals = rng.standard_normal(n)
38
+
39
+ for i in range(n):
40
+ assert vals[i] == vals_cffi[i]
.venv/lib/python3.11/site-packages/numpy/random/_examples/cffi/parse.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ def parse_distributions_h(ffi, inc_dir):
5
+ """
6
+ Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef
7
+
8
+ Read the function declarations without the "#define ..." macros that will
9
+ be filled in when loading the library.
10
+ """
11
+
12
+ with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
13
+ s = []
14
+ for line in fid:
15
+ # massage the include file
16
+ if line.strip().startswith('#'):
17
+ continue
18
+ s.append(line)
19
+ ffi.cdef('\n'.join(s))
20
+
21
+ with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
22
+ s = []
23
+ in_skip = 0
24
+ ignoring = False
25
+ for line in fid:
26
+ # check for and remove extern "C" guards
27
+ if ignoring:
28
+ if line.strip().startswith('#endif'):
29
+ ignoring = False
30
+ continue
31
+ if line.strip().startswith('#ifdef __cplusplus'):
32
+ ignoring = True
33
+
34
+ # massage the include file
35
+ if line.strip().startswith('#'):
36
+ continue
37
+
38
+ # skip any inlined function definition
39
+ # which starts with 'static inline xxx(...) {'
40
+ # and ends with a closing '}'
41
+ if line.strip().startswith('static inline'):
42
+ in_skip += line.count('{')
43
+ continue
44
+ elif in_skip > 0:
45
+ in_skip += line.count('{')
46
+ in_skip -= line.count('}')
47
+ continue
48
+
49
+ # replace defines with their value or remove them
50
+ line = line.replace('DECLDIR', '')
51
+ line = line.replace('RAND_INT_TYPE', 'int64_t')
52
+ s.append(line)
53
+ ffi.cdef('\n'.join(s))
54
+
.venv/lib/python3.11/site-packages/numpy/random/_examples/cython/extending.pyx ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ #cython: language_level=3
3
+
4
+ from libc.stdint cimport uint32_t
5
+ from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
6
+
7
+ import numpy as np
8
+ cimport numpy as np
9
+ cimport cython
10
+
11
+ from numpy.random cimport bitgen_t
12
+ from numpy.random import PCG64
13
+
14
+ np.import_array()
15
+
16
+
17
+ @cython.boundscheck(False)
18
+ @cython.wraparound(False)
19
+ def uniform_mean(Py_ssize_t n):
20
+ cdef Py_ssize_t i
21
+ cdef bitgen_t *rng
22
+ cdef const char *capsule_name = "BitGenerator"
23
+ cdef double[::1] random_values
24
+ cdef np.ndarray randoms
25
+
26
+ x = PCG64()
27
+ capsule = x.capsule
28
+ if not PyCapsule_IsValid(capsule, capsule_name):
29
+ raise ValueError("Invalid pointer to anon_func_state")
30
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
31
+ random_values = np.empty(n)
32
+ # Best practice is to acquire the lock whenever generating random values.
33
+ # This prevents other threads from modifying the state. Acquiring the lock
34
+ # is only necessary if the GIL is also released, as in this example.
35
+ with x.lock, nogil:
36
+ for i in range(n):
37
+ random_values[i] = rng.next_double(rng.state)
38
+ randoms = np.asarray(random_values)
39
+ return randoms.mean()
40
+
41
+
42
+ # This function is declared nogil so it can be used without the GIL below
43
+ cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
44
+ cdef uint32_t mask, delta, val
45
+ mask = delta = ub - lb
46
+ mask |= mask >> 1
47
+ mask |= mask >> 2
48
+ mask |= mask >> 4
49
+ mask |= mask >> 8
50
+ mask |= mask >> 16
51
+
52
+ val = rng.next_uint32(rng.state) & mask
53
+ while val > delta:
54
+ val = rng.next_uint32(rng.state) & mask
55
+
56
+ return lb + val
57
+
58
+
59
+ @cython.boundscheck(False)
60
+ @cython.wraparound(False)
61
+ def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n):
62
+ cdef Py_ssize_t i
63
+ cdef bitgen_t *rng
64
+ cdef uint32_t[::1] out
65
+ cdef const char *capsule_name = "BitGenerator"
66
+
67
+ x = PCG64()
68
+ out = np.empty(n, dtype=np.uint32)
69
+ capsule = x.capsule
70
+
71
+ if not PyCapsule_IsValid(capsule, capsule_name):
72
+ raise ValueError("Invalid pointer to anon_func_state")
73
+ rng = <bitgen_t *>PyCapsule_GetPointer(capsule, capsule_name)
74
+
75
+ with x.lock, nogil:
76
+ for i in range(n):
77
+ out[i] = bounded_uint(lb, ub, rng)
78
+ return np.asarray(out)
.venv/lib/python3.11/site-packages/numpy/random/_examples/cython/extending_distributions.pyx ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ #cython: language_level=3
3
+ """
4
+ This file shows how the to use a BitGenerator to create a distribution.
5
+ """
6
+ import numpy as np
7
+ cimport numpy as np
8
+ cimport cython
9
+ from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
10
+ from libc.stdint cimport uint16_t, uint64_t
11
+ from numpy.random cimport bitgen_t
12
+ from numpy.random import PCG64
13
+ from numpy.random.c_distributions cimport (
14
+ random_standard_uniform_fill, random_standard_uniform_fill_f)
15
+
16
+
17
+ @cython.boundscheck(False)
18
+ @cython.wraparound(False)
19
+ def uniforms(Py_ssize_t n):
20
+ """
21
+ Create an array of `n` uniformly distributed doubles.
22
+ A 'real' distribution would want to process the values into
23
+ some non-uniform distribution
24
+ """
25
+ cdef Py_ssize_t i
26
+ cdef bitgen_t *rng
27
+ cdef const char *capsule_name = "BitGenerator"
28
+ cdef double[::1] random_values
29
+
30
+ x = PCG64()
31
+ capsule = x.capsule
32
+ # Optional check that the capsule if from a BitGenerator
33
+ if not PyCapsule_IsValid(capsule, capsule_name):
34
+ raise ValueError("Invalid pointer to anon_func_state")
35
+ # Cast the pointer
36
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
37
+ random_values = np.empty(n, dtype='float64')
38
+ with x.lock, nogil:
39
+ for i in range(n):
40
+ # Call the function
41
+ random_values[i] = rng.next_double(rng.state)
42
+ randoms = np.asarray(random_values)
43
+
44
+ return randoms
45
+
46
+ # cython example 2
47
+ @cython.boundscheck(False)
48
+ @cython.wraparound(False)
49
+ def uint10_uniforms(Py_ssize_t n):
50
+ """Uniform 10 bit integers stored as 16-bit unsigned integers"""
51
+ cdef Py_ssize_t i
52
+ cdef bitgen_t *rng
53
+ cdef const char *capsule_name = "BitGenerator"
54
+ cdef uint16_t[::1] random_values
55
+ cdef int bits_remaining
56
+ cdef int width = 10
57
+ cdef uint64_t buff, mask = 0x3FF
58
+
59
+ x = PCG64()
60
+ capsule = x.capsule
61
+ if not PyCapsule_IsValid(capsule, capsule_name):
62
+ raise ValueError("Invalid pointer to anon_func_state")
63
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
64
+ random_values = np.empty(n, dtype='uint16')
65
+ # Best practice is to release GIL and acquire the lock
66
+ bits_remaining = 0
67
+ with x.lock, nogil:
68
+ for i in range(n):
69
+ if bits_remaining < width:
70
+ buff = rng.next_uint64(rng.state)
71
+ random_values[i] = buff & mask
72
+ buff >>= width
73
+
74
+ randoms = np.asarray(random_values)
75
+ return randoms
76
+
77
+ # cython example 3
78
+ def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64):
79
+ """
80
+ Create an array of `n` uniformly distributed doubles via a "fill" function.
81
+
82
+ A 'real' distribution would want to process the values into
83
+ some non-uniform distribution
84
+
85
+ Parameters
86
+ ----------
87
+ bit_generator: BitGenerator instance
88
+ n: int
89
+ Output vector length
90
+ dtype: {str, dtype}, optional
91
+ Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The
92
+ default dtype value is 'd'
93
+ """
94
+ cdef Py_ssize_t i
95
+ cdef bitgen_t *rng
96
+ cdef const char *capsule_name = "BitGenerator"
97
+ cdef np.ndarray randoms
98
+
99
+ capsule = bit_generator.capsule
100
+ # Optional check that the capsule if from a BitGenerator
101
+ if not PyCapsule_IsValid(capsule, capsule_name):
102
+ raise ValueError("Invalid pointer to anon_func_state")
103
+ # Cast the pointer
104
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
105
+
106
+ _dtype = np.dtype(dtype)
107
+ randoms = np.empty(n, dtype=_dtype)
108
+ if _dtype == np.float32:
109
+ with bit_generator.lock:
110
+ random_standard_uniform_fill_f(rng, n, <float*>np.PyArray_DATA(randoms))
111
+ elif _dtype == np.float64:
112
+ with bit_generator.lock:
113
+ random_standard_uniform_fill(rng, n, <double*>np.PyArray_DATA(randoms))
114
+ else:
115
+ raise TypeError('Unsupported dtype %r for random' % _dtype)
116
+ return randoms
117
+
.venv/lib/python3.11/site-packages/numpy/random/_examples/cython/meson.build ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ project('random-build-examples', 'c', 'cpp', 'cython')
2
+
3
+ py_mod = import('python')
4
+ py3 = py_mod.find_installation(pure: false)
5
+
6
+ cc = meson.get_compiler('c')
7
+ cy = meson.get_compiler('cython')
8
+
9
+ if not cy.version().version_compare('>=0.29.35')
10
+ error('tests requires Cython >= 0.29.35')
11
+ endif
12
+
13
+ _numpy_abs = run_command(py3, ['-c',
14
+ 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'],
15
+ check: true).stdout().strip()
16
+
17
+ npymath_path = _numpy_abs / 'core' / 'lib'
18
+ npy_include_path = _numpy_abs / 'core' / 'include'
19
+ npyrandom_path = _numpy_abs / 'random' / 'lib'
20
+ npymath_lib = cc.find_library('npymath', dirs: npymath_path)
21
+ npyrandom_lib = cc.find_library('npyrandom', dirs: npyrandom_path)
22
+
23
+ py3.extension_module(
24
+ 'extending_distributions',
25
+ 'extending_distributions.pyx',
26
+ install: false,
27
+ include_directories: [npy_include_path],
28
+ dependencies: [npyrandom_lib, npymath_lib],
29
+ )
30
+ py3.extension_module(
31
+ 'extending',
32
+ 'extending.pyx',
33
+ install: false,
34
+ include_directories: [npy_include_path],
35
+ dependencies: [npyrandom_lib, npymath_lib],
36
+ )
37
+ py3.extension_module(
38
+ 'extending_cpp',
39
+ 'extending_distributions.pyx',
40
+ install: false,
41
+ override_options : ['cython_language=cpp'],
42
+ cython_args: ['--module-name', 'extending_cpp'],
43
+ include_directories: [npy_include_path],
44
+ dependencies: [npyrandom_lib, npymath_lib],
45
+ )
.venv/lib/python3.11/site-packages/numpy/random/_examples/numba/__pycache__/extending.cpython-311.pyc ADDED
Binary file (4.1 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-311.pyc ADDED
Binary file (2.91 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/random/_examples/numba/extending.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import numba as nb
3
+
4
+ from numpy.random import PCG64
5
+ from timeit import timeit
6
+
7
+ bit_gen = PCG64()
8
+ next_d = bit_gen.cffi.next_double
9
+ state_addr = bit_gen.cffi.state_address
10
+
11
+ def normals(n, state):
12
+ out = np.empty(n)
13
+ for i in range((n + 1) // 2):
14
+ x1 = 2.0 * next_d(state) - 1.0
15
+ x2 = 2.0 * next_d(state) - 1.0
16
+ r2 = x1 * x1 + x2 * x2
17
+ while r2 >= 1.0 or r2 == 0.0:
18
+ x1 = 2.0 * next_d(state) - 1.0
19
+ x2 = 2.0 * next_d(state) - 1.0
20
+ r2 = x1 * x1 + x2 * x2
21
+ f = np.sqrt(-2.0 * np.log(r2) / r2)
22
+ out[2 * i] = f * x1
23
+ if 2 * i + 1 < n:
24
+ out[2 * i + 1] = f * x2
25
+ return out
26
+
27
+ # Compile using Numba
28
+ normalsj = nb.jit(normals, nopython=True)
29
+ # Must use state address not state with numba
30
+ n = 10000
31
+
32
+ def numbacall():
33
+ return normalsj(n, state_addr)
34
+
35
+ rg = np.random.Generator(PCG64())
36
+
37
+ def numpycall():
38
+ return rg.normal(size=n)
39
+
40
+ # Check that the functions work
41
+ r1 = numbacall()
42
+ r2 = numpycall()
43
+ assert r1.shape == (n,)
44
+ assert r1.shape == r2.shape
45
+
46
+ t1 = timeit(numbacall, number=1000)
47
+ print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms')
48
+ t2 = timeit(numpycall, number=1000)
49
+ print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms')
50
+
51
+ # example 2
52
+
53
+ next_u32 = bit_gen.ctypes.next_uint32
54
+ ctypes_state = bit_gen.ctypes.state
55
+
56
+ @nb.jit(nopython=True)
57
+ def bounded_uint(lb, ub, state):
58
+ mask = delta = ub - lb
59
+ mask |= mask >> 1
60
+ mask |= mask >> 2
61
+ mask |= mask >> 4
62
+ mask |= mask >> 8
63
+ mask |= mask >> 16
64
+
65
+ val = next_u32(state) & mask
66
+ while val > delta:
67
+ val = next_u32(state) & mask
68
+
69
+ return lb + val
70
+
71
+
72
+ print(bounded_uint(323, 2394691, ctypes_state.value))
73
+
74
+
75
+ @nb.jit(nopython=True)
76
+ def bounded_uints(lb, ub, n, state):
77
+ out = np.empty(n, dtype=np.uint32)
78
+ for i in range(n):
79
+ out[i] = bounded_uint(lb, ub, state)
80
+
81
+
82
+ bounded_uints(323, 2394691, 10000000, ctypes_state.value)
83
+
84
+
.venv/lib/python3.11/site-packages/numpy/random/_examples/numba/extending_distributions.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ Building the required library in this example requires a source distribution
3
+ of NumPy or clone of the NumPy git repository since distributions.c is not
4
+ included in binary distributions.
5
+
6
+ On *nix, execute in numpy/random/src/distributions
7
+
8
+ export ${PYTHON_VERSION}=3.8 # Python version
9
+ export PYTHON_INCLUDE=#path to Python's include folder, usually \
10
+ ${PYTHON_HOME}/include/python${PYTHON_VERSION}m
11
+ export NUMPY_INCLUDE=#path to numpy's include folder, usually \
12
+ ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
13
+ gcc -shared -o libdistributions.so -fPIC distributions.c \
14
+ -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
15
+ mv libdistributions.so ../../_examples/numba/
16
+
17
+ On Windows
18
+
19
+ rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example
20
+ set PYTHON_HOME=c:\Anaconda
21
+ set PYTHON_VERSION=38
22
+ cl.exe /LD .\distributions.c -DDLL_EXPORT \
23
+ -I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
24
+ -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib
25
+ move distributions.dll ../../_examples/numba/
26
+ """
27
+ import os
28
+
29
+ import numba as nb
30
+ import numpy as np
31
+ from cffi import FFI
32
+
33
+ from numpy.random import PCG64
34
+
35
+ ffi = FFI()
36
+ if os.path.exists('./distributions.dll'):
37
+ lib = ffi.dlopen('./distributions.dll')
38
+ elif os.path.exists('./libdistributions.so'):
39
+ lib = ffi.dlopen('./libdistributions.so')
40
+ else:
41
+ raise RuntimeError('Required DLL/so file was not found.')
42
+
43
+ ffi.cdef("""
44
+ double random_standard_normal(void *bitgen_state);
45
+ """)
46
+ x = PCG64()
47
+ xffi = x.cffi
48
+ bit_generator = xffi.bit_generator
49
+
50
+ random_standard_normal = lib.random_standard_normal
51
+
52
+
53
+ def normals(n, bit_generator):
54
+ out = np.empty(n)
55
+ for i in range(n):
56
+ out[i] = random_standard_normal(bit_generator)
57
+ return out
58
+
59
+
60
+ normalsj = nb.jit(normals, nopython=True)
61
+
62
+ # Numba requires a memory address for void *
63
+ # Can also get address from x.ctypes.bit_generator.value
64
+ bit_generator_address = int(ffi.cast('uintptr_t', bit_generator))
65
+
66
+ norm = normalsj(1000, bit_generator_address)
67
+ print(norm[:12])
.venv/lib/python3.11/site-packages/numpy/random/_generator.cpython-311-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c23f8e487f474185f11f28fc7b8eb2ebbc60712f68c3eb5f3cf014f2c7ec46c
3
+ size 980520
.venv/lib/python3.11/site-packages/numpy/random/_generator.pyi ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable
2
+ from typing import Any, Union, overload, TypeVar, Literal
3
+
4
+ from numpy import (
5
+ bool_,
6
+ dtype,
7
+ float32,
8
+ float64,
9
+ int8,
10
+ int16,
11
+ int32,
12
+ int64,
13
+ int_,
14
+ ndarray,
15
+ uint,
16
+ uint8,
17
+ uint16,
18
+ uint32,
19
+ uint64,
20
+ )
21
+ from numpy.random import BitGenerator, SeedSequence
22
+ from numpy._typing import (
23
+ ArrayLike,
24
+ _ArrayLikeFloat_co,
25
+ _ArrayLikeInt_co,
26
+ _DoubleCodes,
27
+ _DTypeLikeBool,
28
+ _DTypeLikeInt,
29
+ _DTypeLikeUInt,
30
+ _Float32Codes,
31
+ _Float64Codes,
32
+ _FloatLike_co,
33
+ _Int8Codes,
34
+ _Int16Codes,
35
+ _Int32Codes,
36
+ _Int64Codes,
37
+ _IntCodes,
38
+ _ShapeLike,
39
+ _SingleCodes,
40
+ _SupportsDType,
41
+ _UInt8Codes,
42
+ _UInt16Codes,
43
+ _UInt32Codes,
44
+ _UInt64Codes,
45
+ _UIntCodes,
46
+ )
47
+
48
+ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
49
+
50
+ _DTypeLikeFloat32 = Union[
51
+ dtype[float32],
52
+ _SupportsDType[dtype[float32]],
53
+ type[float32],
54
+ _Float32Codes,
55
+ _SingleCodes,
56
+ ]
57
+
58
+ _DTypeLikeFloat64 = Union[
59
+ dtype[float64],
60
+ _SupportsDType[dtype[float64]],
61
+ type[float],
62
+ type[float64],
63
+ _Float64Codes,
64
+ _DoubleCodes,
65
+ ]
66
+
67
+ class Generator:
68
+ def __init__(self, bit_generator: BitGenerator) -> None: ...
69
+ def __repr__(self) -> str: ...
70
+ def __str__(self) -> str: ...
71
+ def __getstate__(self) -> dict[str, Any]: ...
72
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
73
+ def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ...
74
+ @property
75
+ def bit_generator(self) -> BitGenerator: ...
76
+ def spawn(self, n_children: int) -> list[Generator]: ...
77
+ def bytes(self, length: int) -> bytes: ...
78
+ @overload
79
+ def standard_normal( # type: ignore[misc]
80
+ self,
81
+ size: None = ...,
82
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
83
+ out: None = ...,
84
+ ) -> float: ...
85
+ @overload
86
+ def standard_normal( # type: ignore[misc]
87
+ self,
88
+ size: _ShapeLike = ...,
89
+ ) -> ndarray[Any, dtype[float64]]: ...
90
+ @overload
91
+ def standard_normal( # type: ignore[misc]
92
+ self,
93
+ *,
94
+ out: ndarray[Any, dtype[float64]] = ...,
95
+ ) -> ndarray[Any, dtype[float64]]: ...
96
+ @overload
97
+ def standard_normal( # type: ignore[misc]
98
+ self,
99
+ size: _ShapeLike = ...,
100
+ dtype: _DTypeLikeFloat32 = ...,
101
+ out: None | ndarray[Any, dtype[float32]] = ...,
102
+ ) -> ndarray[Any, dtype[float32]]: ...
103
+ @overload
104
+ def standard_normal( # type: ignore[misc]
105
+ self,
106
+ size: _ShapeLike = ...,
107
+ dtype: _DTypeLikeFloat64 = ...,
108
+ out: None | ndarray[Any, dtype[float64]] = ...,
109
+ ) -> ndarray[Any, dtype[float64]]: ...
110
+ @overload
111
+ def permutation(self, x: int, axis: int = ...) -> ndarray[Any, dtype[int64]]: ...
112
+ @overload
113
+ def permutation(self, x: ArrayLike, axis: int = ...) -> ndarray[Any, Any]: ...
114
+ @overload
115
+ def standard_exponential( # type: ignore[misc]
116
+ self,
117
+ size: None = ...,
118
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
119
+ method: Literal["zig", "inv"] = ...,
120
+ out: None = ...,
121
+ ) -> float: ...
122
+ @overload
123
+ def standard_exponential(
124
+ self,
125
+ size: _ShapeLike = ...,
126
+ ) -> ndarray[Any, dtype[float64]]: ...
127
+ @overload
128
+ def standard_exponential(
129
+ self,
130
+ *,
131
+ out: ndarray[Any, dtype[float64]] = ...,
132
+ ) -> ndarray[Any, dtype[float64]]: ...
133
+ @overload
134
+ def standard_exponential(
135
+ self,
136
+ size: _ShapeLike = ...,
137
+ *,
138
+ method: Literal["zig", "inv"] = ...,
139
+ out: None | ndarray[Any, dtype[float64]] = ...,
140
+ ) -> ndarray[Any, dtype[float64]]: ...
141
+ @overload
142
+ def standard_exponential(
143
+ self,
144
+ size: _ShapeLike = ...,
145
+ dtype: _DTypeLikeFloat32 = ...,
146
+ method: Literal["zig", "inv"] = ...,
147
+ out: None | ndarray[Any, dtype[float32]] = ...,
148
+ ) -> ndarray[Any, dtype[float32]]: ...
149
+ @overload
150
+ def standard_exponential(
151
+ self,
152
+ size: _ShapeLike = ...,
153
+ dtype: _DTypeLikeFloat64 = ...,
154
+ method: Literal["zig", "inv"] = ...,
155
+ out: None | ndarray[Any, dtype[float64]] = ...,
156
+ ) -> ndarray[Any, dtype[float64]]: ...
157
+ @overload
158
+ def random( # type: ignore[misc]
159
+ self,
160
+ size: None = ...,
161
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
162
+ out: None = ...,
163
+ ) -> float: ...
164
+ @overload
165
+ def random(
166
+ self,
167
+ *,
168
+ out: ndarray[Any, dtype[float64]] = ...,
169
+ ) -> ndarray[Any, dtype[float64]]: ...
170
+ @overload
171
+ def random(
172
+ self,
173
+ size: _ShapeLike = ...,
174
+ *,
175
+ out: None | ndarray[Any, dtype[float64]] = ...,
176
+ ) -> ndarray[Any, dtype[float64]]: ...
177
+ @overload
178
+ def random(
179
+ self,
180
+ size: _ShapeLike = ...,
181
+ dtype: _DTypeLikeFloat32 = ...,
182
+ out: None | ndarray[Any, dtype[float32]] = ...,
183
+ ) -> ndarray[Any, dtype[float32]]: ...
184
+ @overload
185
+ def random(
186
+ self,
187
+ size: _ShapeLike = ...,
188
+ dtype: _DTypeLikeFloat64 = ...,
189
+ out: None | ndarray[Any, dtype[float64]] = ...,
190
+ ) -> ndarray[Any, dtype[float64]]: ...
191
+ @overload
192
+ def beta(
193
+ self,
194
+ a: _FloatLike_co,
195
+ b: _FloatLike_co,
196
+ size: None = ...,
197
+ ) -> float: ... # type: ignore[misc]
198
+ @overload
199
+ def beta(
200
+ self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
201
+ ) -> ndarray[Any, dtype[float64]]: ...
202
+ @overload
203
+ def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
204
+ @overload
205
+ def exponential(
206
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
207
+ ) -> ndarray[Any, dtype[float64]]: ...
208
+ @overload
209
+ def integers( # type: ignore[misc]
210
+ self,
211
+ low: int,
212
+ high: None | int = ...,
213
+ ) -> int: ...
214
+ @overload
215
+ def integers( # type: ignore[misc]
216
+ self,
217
+ low: int,
218
+ high: None | int = ...,
219
+ size: None = ...,
220
+ dtype: _DTypeLikeBool = ...,
221
+ endpoint: bool = ...,
222
+ ) -> bool: ...
223
+ @overload
224
+ def integers( # type: ignore[misc]
225
+ self,
226
+ low: int,
227
+ high: None | int = ...,
228
+ size: None = ...,
229
+ dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
230
+ endpoint: bool = ...,
231
+ ) -> int: ...
232
+ @overload
233
+ def integers( # type: ignore[misc]
234
+ self,
235
+ low: _ArrayLikeInt_co,
236
+ high: None | _ArrayLikeInt_co = ...,
237
+ size: None | _ShapeLike = ...,
238
+ ) -> ndarray[Any, dtype[int64]]: ...
239
+ @overload
240
+ def integers( # type: ignore[misc]
241
+ self,
242
+ low: _ArrayLikeInt_co,
243
+ high: None | _ArrayLikeInt_co = ...,
244
+ size: None | _ShapeLike = ...,
245
+ dtype: _DTypeLikeBool = ...,
246
+ endpoint: bool = ...,
247
+ ) -> ndarray[Any, dtype[bool_]]: ...
248
+ @overload
249
+ def integers( # type: ignore[misc]
250
+ self,
251
+ low: _ArrayLikeInt_co,
252
+ high: None | _ArrayLikeInt_co = ...,
253
+ size: None | _ShapeLike = ...,
254
+ dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
255
+ endpoint: bool = ...,
256
+ ) -> ndarray[Any, dtype[int8]]: ...
257
+ @overload
258
+ def integers( # type: ignore[misc]
259
+ self,
260
+ low: _ArrayLikeInt_co,
261
+ high: None | _ArrayLikeInt_co = ...,
262
+ size: None | _ShapeLike = ...,
263
+ dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
264
+ endpoint: bool = ...,
265
+ ) -> ndarray[Any, dtype[int16]]: ...
266
+ @overload
267
+ def integers( # type: ignore[misc]
268
+ self,
269
+ low: _ArrayLikeInt_co,
270
+ high: None | _ArrayLikeInt_co = ...,
271
+ size: None | _ShapeLike = ...,
272
+ dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
273
+ endpoint: bool = ...,
274
+ ) -> ndarray[Any, dtype[int32]]: ...
275
+ @overload
276
+ def integers( # type: ignore[misc]
277
+ self,
278
+ low: _ArrayLikeInt_co,
279
+ high: None | _ArrayLikeInt_co = ...,
280
+ size: None | _ShapeLike = ...,
281
+ dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
282
+ endpoint: bool = ...,
283
+ ) -> ndarray[Any, dtype[int64]]: ...
284
+ @overload
285
+ def integers( # type: ignore[misc]
286
+ self,
287
+ low: _ArrayLikeInt_co,
288
+ high: None | _ArrayLikeInt_co = ...,
289
+ size: None | _ShapeLike = ...,
290
+ dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
291
+ endpoint: bool = ...,
292
+ ) -> ndarray[Any, dtype[uint8]]: ...
293
+ @overload
294
+ def integers( # type: ignore[misc]
295
+ self,
296
+ low: _ArrayLikeInt_co,
297
+ high: None | _ArrayLikeInt_co = ...,
298
+ size: None | _ShapeLike = ...,
299
+ dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
300
+ endpoint: bool = ...,
301
+ ) -> ndarray[Any, dtype[uint16]]: ...
302
+ @overload
303
+ def integers( # type: ignore[misc]
304
+ self,
305
+ low: _ArrayLikeInt_co,
306
+ high: None | _ArrayLikeInt_co = ...,
307
+ size: None | _ShapeLike = ...,
308
+ dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
309
+ endpoint: bool = ...,
310
+ ) -> ndarray[Any, dtype[uint32]]: ...
311
+ @overload
312
+ def integers( # type: ignore[misc]
313
+ self,
314
+ low: _ArrayLikeInt_co,
315
+ high: None | _ArrayLikeInt_co = ...,
316
+ size: None | _ShapeLike = ...,
317
+ dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
318
+ endpoint: bool = ...,
319
+ ) -> ndarray[Any, dtype[uint64]]: ...
320
+ @overload
321
+ def integers( # type: ignore[misc]
322
+ self,
323
+ low: _ArrayLikeInt_co,
324
+ high: None | _ArrayLikeInt_co = ...,
325
+ size: None | _ShapeLike = ...,
326
+ dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
327
+ endpoint: bool = ...,
328
+ ) -> ndarray[Any, dtype[int_]]: ...
329
+ @overload
330
+ def integers( # type: ignore[misc]
331
+ self,
332
+ low: _ArrayLikeInt_co,
333
+ high: None | _ArrayLikeInt_co = ...,
334
+ size: None | _ShapeLike = ...,
335
+ dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
336
+ endpoint: bool = ...,
337
+ ) -> ndarray[Any, dtype[uint]]: ...
338
+ # TODO: Use a TypeVar _T here to get away from Any output? Should be int->ndarray[Any,dtype[int64]], ArrayLike[_T] -> _T | ndarray[Any,Any]
339
+ @overload
340
+ def choice(
341
+ self,
342
+ a: int,
343
+ size: None = ...,
344
+ replace: bool = ...,
345
+ p: None | _ArrayLikeFloat_co = ...,
346
+ axis: int = ...,
347
+ shuffle: bool = ...,
348
+ ) -> int: ...
349
+ @overload
350
+ def choice(
351
+ self,
352
+ a: int,
353
+ size: _ShapeLike = ...,
354
+ replace: bool = ...,
355
+ p: None | _ArrayLikeFloat_co = ...,
356
+ axis: int = ...,
357
+ shuffle: bool = ...,
358
+ ) -> ndarray[Any, dtype[int64]]: ...
359
+ @overload
360
+ def choice(
361
+ self,
362
+ a: ArrayLike,
363
+ size: None = ...,
364
+ replace: bool = ...,
365
+ p: None | _ArrayLikeFloat_co = ...,
366
+ axis: int = ...,
367
+ shuffle: bool = ...,
368
+ ) -> Any: ...
369
+ @overload
370
+ def choice(
371
+ self,
372
+ a: ArrayLike,
373
+ size: _ShapeLike = ...,
374
+ replace: bool = ...,
375
+ p: None | _ArrayLikeFloat_co = ...,
376
+ axis: int = ...,
377
+ shuffle: bool = ...,
378
+ ) -> ndarray[Any, Any]: ...
379
+ @overload
380
+ def uniform(
381
+ self,
382
+ low: _FloatLike_co = ...,
383
+ high: _FloatLike_co = ...,
384
+ size: None = ...,
385
+ ) -> float: ... # type: ignore[misc]
386
+ @overload
387
+ def uniform(
388
+ self,
389
+ low: _ArrayLikeFloat_co = ...,
390
+ high: _ArrayLikeFloat_co = ...,
391
+ size: None | _ShapeLike = ...,
392
+ ) -> ndarray[Any, dtype[float64]]: ...
393
+ @overload
394
+ def normal(
395
+ self,
396
+ loc: _FloatLike_co = ...,
397
+ scale: _FloatLike_co = ...,
398
+ size: None = ...,
399
+ ) -> float: ... # type: ignore[misc]
400
+ @overload
401
+ def normal(
402
+ self,
403
+ loc: _ArrayLikeFloat_co = ...,
404
+ scale: _ArrayLikeFloat_co = ...,
405
+ size: None | _ShapeLike = ...,
406
+ ) -> ndarray[Any, dtype[float64]]: ...
407
+ @overload
408
+ def standard_gamma( # type: ignore[misc]
409
+ self,
410
+ shape: _FloatLike_co,
411
+ size: None = ...,
412
+ dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ...,
413
+ out: None = ...,
414
+ ) -> float: ...
415
+ @overload
416
+ def standard_gamma(
417
+ self,
418
+ shape: _ArrayLikeFloat_co,
419
+ size: None | _ShapeLike = ...,
420
+ ) -> ndarray[Any, dtype[float64]]: ...
421
+ @overload
422
+ def standard_gamma(
423
+ self,
424
+ shape: _ArrayLikeFloat_co,
425
+ *,
426
+ out: ndarray[Any, dtype[float64]] = ...,
427
+ ) -> ndarray[Any, dtype[float64]]: ...
428
+ @overload
429
+ def standard_gamma(
430
+ self,
431
+ shape: _ArrayLikeFloat_co,
432
+ size: None | _ShapeLike = ...,
433
+ dtype: _DTypeLikeFloat32 = ...,
434
+ out: None | ndarray[Any, dtype[float32]] = ...,
435
+ ) -> ndarray[Any, dtype[float32]]: ...
436
+ @overload
437
+ def standard_gamma(
438
+ self,
439
+ shape: _ArrayLikeFloat_co,
440
+ size: None | _ShapeLike = ...,
441
+ dtype: _DTypeLikeFloat64 = ...,
442
+ out: None | ndarray[Any, dtype[float64]] = ...,
443
+ ) -> ndarray[Any, dtype[float64]]: ...
444
+ @overload
445
+ def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
446
+ @overload
447
+ def gamma(
448
+ self,
449
+ shape: _ArrayLikeFloat_co,
450
+ scale: _ArrayLikeFloat_co = ...,
451
+ size: None | _ShapeLike = ...,
452
+ ) -> ndarray[Any, dtype[float64]]: ...
453
+ @overload
454
+ def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
455
+ @overload
456
+ def f(
457
+ self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
458
+ ) -> ndarray[Any, dtype[float64]]: ...
459
+ @overload
460
+ def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
461
+ @overload
462
+ def noncentral_f(
463
+ self,
464
+ dfnum: _ArrayLikeFloat_co,
465
+ dfden: _ArrayLikeFloat_co,
466
+ nonc: _ArrayLikeFloat_co,
467
+ size: None | _ShapeLike = ...,
468
+ ) -> ndarray[Any, dtype[float64]]: ...
469
+ @overload
470
+ def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
471
+ @overload
472
+ def chisquare(
473
+ self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
474
+ ) -> ndarray[Any, dtype[float64]]: ...
475
+ @overload
476
+ def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
477
+ @overload
478
+ def noncentral_chisquare(
479
+ self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
480
+ ) -> ndarray[Any, dtype[float64]]: ...
481
+ @overload
482
+ def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
483
+ @overload
484
+ def standard_t(
485
+ self, df: _ArrayLikeFloat_co, size: None = ...
486
+ ) -> ndarray[Any, dtype[float64]]: ...
487
+ @overload
488
+ def standard_t(
489
+ self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
490
+ ) -> ndarray[Any, dtype[float64]]: ...
491
+ @overload
492
+ def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
493
+ @overload
494
+ def vonmises(
495
+ self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
496
+ ) -> ndarray[Any, dtype[float64]]: ...
497
+ @overload
498
+ def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
499
+ @overload
500
+ def pareto(
501
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
502
+ ) -> ndarray[Any, dtype[float64]]: ...
503
+ @overload
504
+ def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
505
+ @overload
506
+ def weibull(
507
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
508
+ ) -> ndarray[Any, dtype[float64]]: ...
509
+ @overload
510
+ def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
511
+ @overload
512
+ def power(
513
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
514
+ ) -> ndarray[Any, dtype[float64]]: ...
515
+ @overload
516
+ def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
517
+ @overload
518
+ def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
519
+ @overload
520
+ def laplace(
521
+ self,
522
+ loc: _FloatLike_co = ...,
523
+ scale: _FloatLike_co = ...,
524
+ size: None = ...,
525
+ ) -> float: ... # type: ignore[misc]
526
+ @overload
527
+ def laplace(
528
+ self,
529
+ loc: _ArrayLikeFloat_co = ...,
530
+ scale: _ArrayLikeFloat_co = ...,
531
+ size: None | _ShapeLike = ...,
532
+ ) -> ndarray[Any, dtype[float64]]: ...
533
+ @overload
534
+ def gumbel(
535
+ self,
536
+ loc: _FloatLike_co = ...,
537
+ scale: _FloatLike_co = ...,
538
+ size: None = ...,
539
+ ) -> float: ... # type: ignore[misc]
540
+ @overload
541
+ def gumbel(
542
+ self,
543
+ loc: _ArrayLikeFloat_co = ...,
544
+ scale: _ArrayLikeFloat_co = ...,
545
+ size: None | _ShapeLike = ...,
546
+ ) -> ndarray[Any, dtype[float64]]: ...
547
+ @overload
548
+ def logistic(
549
+ self,
550
+ loc: _FloatLike_co = ...,
551
+ scale: _FloatLike_co = ...,
552
+ size: None = ...,
553
+ ) -> float: ... # type: ignore[misc]
554
+ @overload
555
+ def logistic(
556
+ self,
557
+ loc: _ArrayLikeFloat_co = ...,
558
+ scale: _ArrayLikeFloat_co = ...,
559
+ size: None | _ShapeLike = ...,
560
+ ) -> ndarray[Any, dtype[float64]]: ...
561
+ @overload
562
+ def lognormal(
563
+ self,
564
+ mean: _FloatLike_co = ...,
565
+ sigma: _FloatLike_co = ...,
566
+ size: None = ...,
567
+ ) -> float: ... # type: ignore[misc]
568
+ @overload
569
+ def lognormal(
570
+ self,
571
+ mean: _ArrayLikeFloat_co = ...,
572
+ sigma: _ArrayLikeFloat_co = ...,
573
+ size: None | _ShapeLike = ...,
574
+ ) -> ndarray[Any, dtype[float64]]: ...
575
+ @overload
576
+ def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc]
577
+ @overload
578
+ def rayleigh(
579
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
580
+ ) -> ndarray[Any, dtype[float64]]: ...
581
+ @overload
582
+ def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc]
583
+ @overload
584
+ def wald(
585
+ self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
586
+ ) -> ndarray[Any, dtype[float64]]: ...
587
+ @overload
588
+ def triangular(
589
+ self,
590
+ left: _FloatLike_co,
591
+ mode: _FloatLike_co,
592
+ right: _FloatLike_co,
593
+ size: None = ...,
594
+ ) -> float: ... # type: ignore[misc]
595
+ @overload
596
+ def triangular(
597
+ self,
598
+ left: _ArrayLikeFloat_co,
599
+ mode: _ArrayLikeFloat_co,
600
+ right: _ArrayLikeFloat_co,
601
+ size: None | _ShapeLike = ...,
602
+ ) -> ndarray[Any, dtype[float64]]: ...
603
+ @overload
604
+ def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
605
+ @overload
606
+ def binomial(
607
+ self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
608
+ ) -> ndarray[Any, dtype[int64]]: ...
609
+ @overload
610
+ def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
611
+ @overload
612
+ def negative_binomial(
613
+ self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
614
+ ) -> ndarray[Any, dtype[int64]]: ...
615
+ @overload
616
+ def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc]
617
+ @overload
618
+ def poisson(
619
+ self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
620
+ ) -> ndarray[Any, dtype[int64]]: ...
621
+ @overload
622
+ def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
623
+ @overload
624
+ def zipf(
625
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
626
+ ) -> ndarray[Any, dtype[int64]]: ...
627
+ @overload
628
+ def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
629
+ @overload
630
+ def geometric(
631
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
632
+ ) -> ndarray[Any, dtype[int64]]: ...
633
+ @overload
634
+ def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
635
+ @overload
636
+ def hypergeometric(
637
+ self,
638
+ ngood: _ArrayLikeInt_co,
639
+ nbad: _ArrayLikeInt_co,
640
+ nsample: _ArrayLikeInt_co,
641
+ size: None | _ShapeLike = ...,
642
+ ) -> ndarray[Any, dtype[int64]]: ...
643
+ @overload
644
+ def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc]
645
+ @overload
646
+ def logseries(
647
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
648
+ ) -> ndarray[Any, dtype[int64]]: ...
649
+ def multivariate_normal(
650
+ self,
651
+ mean: _ArrayLikeFloat_co,
652
+ cov: _ArrayLikeFloat_co,
653
+ size: None | _ShapeLike = ...,
654
+ check_valid: Literal["warn", "raise", "ignore"] = ...,
655
+ tol: float = ...,
656
+ *,
657
+ method: Literal["svd", "eigh", "cholesky"] = ...,
658
+ ) -> ndarray[Any, dtype[float64]]: ...
659
+ def multinomial(
660
+ self, n: _ArrayLikeInt_co,
661
+ pvals: _ArrayLikeFloat_co,
662
+ size: None | _ShapeLike = ...
663
+ ) -> ndarray[Any, dtype[int64]]: ...
664
+ def multivariate_hypergeometric(
665
+ self,
666
+ colors: _ArrayLikeInt_co,
667
+ nsample: int,
668
+ size: None | _ShapeLike = ...,
669
+ method: Literal["marginals", "count"] = ...,
670
+ ) -> ndarray[Any, dtype[int64]]: ...
671
+ def dirichlet(
672
+ self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
673
+ ) -> ndarray[Any, dtype[float64]]: ...
674
+ def permuted(
675
+ self, x: ArrayLike, *, axis: None | int = ..., out: None | ndarray[Any, Any] = ...
676
+ ) -> ndarray[Any, Any]: ...
677
+ def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ...
678
+
679
+ def default_rng(
680
+ seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ...
681
+ ) -> Generator: ...
.venv/lib/python3.11/site-packages/numpy/random/_mt19937.cpython-311-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d39b0e5a9e90613deede597857f8abec29cfb2ccccaff6ed6f11cbc10f684a82
3
+ size 120312
.venv/lib/python3.11/site-packages/numpy/random/_mt19937.pyi ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import dtype, ndarray, uint32
4
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
5
+ from numpy._typing import _ArrayLikeInt_co
6
+
7
+ class _MT19937Internal(TypedDict):
8
+ key: ndarray[Any, dtype[uint32]]
9
+ pos: int
10
+
11
+ class _MT19937State(TypedDict):
12
+ bit_generator: str
13
+ state: _MT19937Internal
14
+
15
+ class MT19937(BitGenerator):
16
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
17
+ def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ...
18
+ def jumped(self, jumps: int = ...) -> MT19937: ...
19
+ @property
20
+ def state(self) -> _MT19937State: ...
21
+ @state.setter
22
+ def state(self, value: _MT19937State) -> None: ...
.venv/lib/python3.11/site-packages/numpy/random/_pcg64.pyi ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict
2
+
3
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
4
+ from numpy._typing import _ArrayLikeInt_co
5
+
6
+ class _PCG64Internal(TypedDict):
7
+ state: int
8
+ inc: int
9
+
10
+ class _PCG64State(TypedDict):
11
+ bit_generator: str
12
+ state: _PCG64Internal
13
+ has_uint32: int
14
+ uinteger: int
15
+
16
+ class PCG64(BitGenerator):
17
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
18
+ def jumped(self, jumps: int = ...) -> PCG64: ...
19
+ @property
20
+ def state(
21
+ self,
22
+ ) -> _PCG64State: ...
23
+ @state.setter
24
+ def state(
25
+ self,
26
+ value: _PCG64State,
27
+ ) -> None: ...
28
+ def advance(self, delta: int) -> PCG64: ...
29
+
30
+ class PCG64DXSM(BitGenerator):
31
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
32
+ def jumped(self, jumps: int = ...) -> PCG64DXSM: ...
33
+ @property
34
+ def state(
35
+ self,
36
+ ) -> _PCG64State: ...
37
+ @state.setter
38
+ def state(
39
+ self,
40
+ value: _PCG64State,
41
+ ) -> None: ...
42
+ def advance(self, delta: int) -> PCG64DXSM: ...
.venv/lib/python3.11/site-packages/numpy/random/_philox.pyi ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import dtype, ndarray, uint64
4
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
5
+ from numpy._typing import _ArrayLikeInt_co
6
+
7
+ class _PhiloxInternal(TypedDict):
8
+ counter: ndarray[Any, dtype[uint64]]
9
+ key: ndarray[Any, dtype[uint64]]
10
+
11
+ class _PhiloxState(TypedDict):
12
+ bit_generator: str
13
+ state: _PhiloxInternal
14
+ buffer: ndarray[Any, dtype[uint64]]
15
+ buffer_pos: int
16
+ has_uint32: int
17
+ uinteger: int
18
+
19
+ class Philox(BitGenerator):
20
+ def __init__(
21
+ self,
22
+ seed: None | _ArrayLikeInt_co | SeedSequence = ...,
23
+ counter: None | _ArrayLikeInt_co = ...,
24
+ key: None | _ArrayLikeInt_co = ...,
25
+ ) -> None: ...
26
+ @property
27
+ def state(
28
+ self,
29
+ ) -> _PhiloxState: ...
30
+ @state.setter
31
+ def state(
32
+ self,
33
+ value: _PhiloxState,
34
+ ) -> None: ...
35
+ def jumped(self, jumps: int = ...) -> Philox: ...
36
+ def advance(self, delta: int) -> Philox: ...
.venv/lib/python3.11/site-packages/numpy/random/_pickle.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .mtrand import RandomState
2
+ from ._philox import Philox
3
+ from ._pcg64 import PCG64, PCG64DXSM
4
+ from ._sfc64 import SFC64
5
+
6
+ from ._generator import Generator
7
+ from ._mt19937 import MT19937
8
+
9
+ BitGenerators = {'MT19937': MT19937,
10
+ 'PCG64': PCG64,
11
+ 'PCG64DXSM': PCG64DXSM,
12
+ 'Philox': Philox,
13
+ 'SFC64': SFC64,
14
+ }
15
+
16
+
17
+ def __bit_generator_ctor(bit_generator_name='MT19937'):
18
+ """
19
+ Pickling helper function that returns a bit generator object
20
+
21
+ Parameters
22
+ ----------
23
+ bit_generator_name : str
24
+ String containing the name of the BitGenerator
25
+
26
+ Returns
27
+ -------
28
+ bit_generator : BitGenerator
29
+ BitGenerator instance
30
+ """
31
+ if bit_generator_name in BitGenerators:
32
+ bit_generator = BitGenerators[bit_generator_name]
33
+ else:
34
+ raise ValueError(str(bit_generator_name) + ' is not a known '
35
+ 'BitGenerator module.')
36
+
37
+ return bit_generator()
38
+
39
+
40
+ def __generator_ctor(bit_generator_name="MT19937",
41
+ bit_generator_ctor=__bit_generator_ctor):
42
+ """
43
+ Pickling helper function that returns a Generator object
44
+
45
+ Parameters
46
+ ----------
47
+ bit_generator_name : str
48
+ String containing the core BitGenerator's name
49
+ bit_generator_ctor : callable, optional
50
+ Callable function that takes bit_generator_name as its only argument
51
+ and returns an instantized bit generator.
52
+
53
+ Returns
54
+ -------
55
+ rg : Generator
56
+ Generator using the named core BitGenerator
57
+ """
58
+ return Generator(bit_generator_ctor(bit_generator_name))
59
+
60
+
61
+ def __randomstate_ctor(bit_generator_name="MT19937",
62
+ bit_generator_ctor=__bit_generator_ctor):
63
+ """
64
+ Pickling helper function that returns a legacy RandomState-like object
65
+
66
+ Parameters
67
+ ----------
68
+ bit_generator_name : str
69
+ String containing the core BitGenerator's name
70
+ bit_generator_ctor : callable, optional
71
+ Callable function that takes bit_generator_name as its only argument
72
+ and returns an instantized bit generator.
73
+
74
+ Returns
75
+ -------
76
+ rs : RandomState
77
+ Legacy RandomState using the named core BitGenerator
78
+ """
79
+
80
+ return RandomState(bit_generator_ctor(bit_generator_name))
.venv/lib/python3.11/site-packages/numpy/random/_sfc64.cpython-311-x86_64-linux-gnu.so ADDED
Binary file (76.8 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/random/_sfc64.pyi ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypedDict
2
+
3
+ from numpy import dtype as dtype
4
+ from numpy import ndarray as ndarray
5
+ from numpy import uint64
6
+ from numpy.random.bit_generator import BitGenerator, SeedSequence
7
+ from numpy._typing import _ArrayLikeInt_co
8
+
9
+ class _SFC64Internal(TypedDict):
10
+ state: ndarray[Any, dtype[uint64]]
11
+
12
+ class _SFC64State(TypedDict):
13
+ bit_generator: str
14
+ state: _SFC64Internal
15
+ has_uint32: int
16
+ uinteger: int
17
+
18
+ class SFC64(BitGenerator):
19
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
20
+ @property
21
+ def state(
22
+ self,
23
+ ) -> _SFC64State: ...
24
+ @state.setter
25
+ def state(
26
+ self,
27
+ value: _SFC64State,
28
+ ) -> None: ...
.venv/lib/python3.11/site-packages/numpy/random/bit_generator.cpython-311-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4498c107393d71027b88b81505ba7b00591b1085dab86f08262ca675407a990
3
+ size 242584
.venv/lib/python3.11/site-packages/numpy/random/bit_generator.pxd ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cimport numpy as np
2
+ from libc.stdint cimport uint32_t, uint64_t
3
+
4
+ cdef extern from "numpy/random/bitgen.h":
5
+ struct bitgen:
6
+ void *state
7
+ uint64_t (*next_uint64)(void *st) nogil
8
+ uint32_t (*next_uint32)(void *st) nogil
9
+ double (*next_double)(void *st) nogil
10
+ uint64_t (*next_raw)(void *st) nogil
11
+
12
+ ctypedef bitgen bitgen_t
13
+
14
+ cdef class BitGenerator():
15
+ cdef readonly object _seed_seq
16
+ cdef readonly object lock
17
+ cdef bitgen_t _bitgen
18
+ cdef readonly object _ctypes
19
+ cdef readonly object _cffi
20
+ cdef readonly object capsule
21
+
22
+
23
+ cdef class SeedSequence():
24
+ cdef readonly object entropy
25
+ cdef readonly tuple spawn_key
26
+ cdef readonly Py_ssize_t pool_size
27
+ cdef readonly object pool
28
+ cdef readonly uint32_t n_children_spawned
29
+
30
+ cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
31
+ np.ndarray[np.npy_uint32, ndim=1] entropy_array)
32
+ cdef get_assembled_entropy(self)
33
+
34
+ cdef class SeedlessSequence():
35
+ pass
.venv/lib/python3.11/site-packages/numpy/random/bit_generator.pyi ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from threading import Lock
3
+ from collections.abc import Callable, Mapping, Sequence
4
+ from typing import (
5
+ Any,
6
+ NamedTuple,
7
+ TypedDict,
8
+ TypeVar,
9
+ Union,
10
+ overload,
11
+ Literal,
12
+ )
13
+
14
+ from numpy import dtype, ndarray, uint32, uint64
15
+ from numpy._typing import _ArrayLikeInt_co, _ShapeLike, _SupportsDType, _UInt32Codes, _UInt64Codes
16
+
17
+ _T = TypeVar("_T")
18
+
19
+ _DTypeLikeUint32 = Union[
20
+ dtype[uint32],
21
+ _SupportsDType[dtype[uint32]],
22
+ type[uint32],
23
+ _UInt32Codes,
24
+ ]
25
+ _DTypeLikeUint64 = Union[
26
+ dtype[uint64],
27
+ _SupportsDType[dtype[uint64]],
28
+ type[uint64],
29
+ _UInt64Codes,
30
+ ]
31
+
32
+ class _SeedSeqState(TypedDict):
33
+ entropy: None | int | Sequence[int]
34
+ spawn_key: tuple[int, ...]
35
+ pool_size: int
36
+ n_children_spawned: int
37
+
38
+ class _Interface(NamedTuple):
39
+ state_address: Any
40
+ state: Any
41
+ next_uint64: Any
42
+ next_uint32: Any
43
+ next_double: Any
44
+ bit_generator: Any
45
+
46
+ class ISeedSequence(abc.ABC):
47
+ @abc.abstractmethod
48
+ def generate_state(
49
+ self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
50
+ ) -> ndarray[Any, dtype[uint32 | uint64]]: ...
51
+
52
+ class ISpawnableSeedSequence(ISeedSequence):
53
+ @abc.abstractmethod
54
+ def spawn(self: _T, n_children: int) -> list[_T]: ...
55
+
56
+ class SeedlessSeedSequence(ISpawnableSeedSequence):
57
+ def generate_state(
58
+ self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
59
+ ) -> ndarray[Any, dtype[uint32 | uint64]]: ...
60
+ def spawn(self: _T, n_children: int) -> list[_T]: ...
61
+
62
+ class SeedSequence(ISpawnableSeedSequence):
63
+ entropy: None | int | Sequence[int]
64
+ spawn_key: tuple[int, ...]
65
+ pool_size: int
66
+ n_children_spawned: int
67
+ pool: ndarray[Any, dtype[uint32]]
68
+ def __init__(
69
+ self,
70
+ entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ...,
71
+ *,
72
+ spawn_key: Sequence[int] = ...,
73
+ pool_size: int = ...,
74
+ n_children_spawned: int = ...,
75
+ ) -> None: ...
76
+ def __repr__(self) -> str: ...
77
+ @property
78
+ def state(
79
+ self,
80
+ ) -> _SeedSeqState: ...
81
+ def generate_state(
82
+ self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ...
83
+ ) -> ndarray[Any, dtype[uint32 | uint64]]: ...
84
+ def spawn(self, n_children: int) -> list[SeedSequence]: ...
85
+
86
+ class BitGenerator(abc.ABC):
87
+ lock: Lock
88
+ def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
89
+ def __getstate__(self) -> dict[str, Any]: ...
90
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
91
+ def __reduce__(
92
+ self,
93
+ ) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]: ...
94
+ @abc.abstractmethod
95
+ @property
96
+ def state(self) -> Mapping[str, Any]: ...
97
+ @state.setter
98
+ def state(self, value: Mapping[str, Any]) -> None: ...
99
+ @property
100
+ def seed_seq(self) -> ISeedSequence: ...
101
+ def spawn(self, n_children: int) -> list[BitGenerator]: ...
102
+ @overload
103
+ def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc]
104
+ @overload
105
+ def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> ndarray[Any, dtype[uint64]]: ... # type: ignore[misc]
106
+ @overload
107
+ def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc]
108
+ def _benchmark(self, cnt: int, method: str = ...) -> None: ...
109
+ @property
110
+ def ctypes(self) -> _Interface: ...
111
+ @property
112
+ def cffi(self) -> _Interface: ...
.venv/lib/python3.11/site-packages/numpy/random/c_distributions.pxd ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!python
2
+ #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
3
+ from numpy cimport npy_intp
4
+
5
+ from libc.stdint cimport (uint64_t, int32_t, int64_t)
6
+ from numpy.random cimport bitgen_t
7
+
8
+ cdef extern from "numpy/random/distributions.h":
9
+
10
+ struct s_binomial_t:
11
+ int has_binomial
12
+ double psave
13
+ int64_t nsave
14
+ double r
15
+ double q
16
+ double fm
17
+ int64_t m
18
+ double p1
19
+ double xm
20
+ double xl
21
+ double xr
22
+ double c
23
+ double laml
24
+ double lamr
25
+ double p2
26
+ double p3
27
+ double p4
28
+
29
+ ctypedef s_binomial_t binomial_t
30
+
31
+ float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
32
+ double random_standard_uniform(bitgen_t *bitgen_state) nogil
33
+ void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil
34
+ void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
35
+
36
+ double random_standard_exponential(bitgen_t *bitgen_state) nogil
37
+ float random_standard_exponential_f(bitgen_t *bitgen_state) nogil
38
+ void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
39
+ void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
40
+ void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil
41
+ void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil
42
+
43
+ double random_standard_normal(bitgen_t* bitgen_state) nogil
44
+ float random_standard_normal_f(bitgen_t *bitgen_state) nogil
45
+ void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil
46
+ void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil
47
+ double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil
48
+ float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
49
+
50
+ float random_standard_uniform_f(bitgen_t *bitgen_state) nogil
51
+ void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil
52
+ float random_standard_normal_f(bitgen_t* bitgen_state) nogil
53
+ float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
54
+
55
+ int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
56
+ int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
57
+ int64_t random_positive_int(bitgen_t *bitgen_state) nogil
58
+ uint64_t random_uint(bitgen_t *bitgen_state) nogil
59
+
60
+ double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil
61
+
62
+ double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
63
+ float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil
64
+
65
+ double random_exponential(bitgen_t *bitgen_state, double scale) nogil
66
+ double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
67
+ double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
68
+ double random_chisquare(bitgen_t *bitgen_state, double df) nogil
69
+ double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
70
+ double random_standard_cauchy(bitgen_t *bitgen_state) nogil
71
+ double random_pareto(bitgen_t *bitgen_state, double a) nogil
72
+ double random_weibull(bitgen_t *bitgen_state, double a) nogil
73
+ double random_power(bitgen_t *bitgen_state, double a) nogil
74
+ double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
75
+ double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
76
+ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
77
+ double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
78
+ double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
79
+ double random_standard_t(bitgen_t *bitgen_state, double df) nogil
80
+ double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
81
+ double nonc) nogil
82
+ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
83
+ double dfden, double nonc) nogil
84
+ double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
85
+ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
86
+ double random_triangular(bitgen_t *bitgen_state, double left, double mode,
87
+ double right) nogil
88
+
89
+ int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
90
+ int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
91
+ int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
92
+ int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
93
+ int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
94
+ int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
95
+ int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
96
+ int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
97
+ int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
98
+ int64_t sample) nogil
99
+
100
+ uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
101
+
102
+ # Generate random uint64 numbers in closed interval [off, off + rng].
103
+ uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
104
+ uint64_t off, uint64_t rng,
105
+ uint64_t mask, bint use_masked) nogil
106
+
107
+ void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
108
+ double *pix, npy_intp d, binomial_t *binomial) nogil
109
+
110
+ int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
111
+ int64_t total,
112
+ size_t num_colors, int64_t *colors,
113
+ int64_t nsample,
114
+ size_t num_variates, int64_t *variates) nogil
115
+ void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
116
+ int64_t total,
117
+ size_t num_colors, int64_t *colors,
118
+ int64_t nsample,
119
+ size_t num_variates, int64_t *variates) nogil
120
+
.venv/lib/python3.11/site-packages/numpy/random/lib/libnpyrandom.a ADDED
Binary file (71.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/random/mtrand.pyi ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import builtins
2
+ from collections.abc import Callable
3
+ from typing import Any, Union, overload, Literal
4
+
5
+ from numpy import (
6
+ bool_,
7
+ dtype,
8
+ float32,
9
+ float64,
10
+ int8,
11
+ int16,
12
+ int32,
13
+ int64,
14
+ int_,
15
+ ndarray,
16
+ uint,
17
+ uint8,
18
+ uint16,
19
+ uint32,
20
+ uint64,
21
+ )
22
+ from numpy.random.bit_generator import BitGenerator
23
+ from numpy._typing import (
24
+ ArrayLike,
25
+ _ArrayLikeFloat_co,
26
+ _ArrayLikeInt_co,
27
+ _DoubleCodes,
28
+ _DTypeLikeBool,
29
+ _DTypeLikeInt,
30
+ _DTypeLikeUInt,
31
+ _Float32Codes,
32
+ _Float64Codes,
33
+ _Int8Codes,
34
+ _Int16Codes,
35
+ _Int32Codes,
36
+ _Int64Codes,
37
+ _IntCodes,
38
+ _ShapeLike,
39
+ _SingleCodes,
40
+ _SupportsDType,
41
+ _UInt8Codes,
42
+ _UInt16Codes,
43
+ _UInt32Codes,
44
+ _UInt64Codes,
45
+ _UIntCodes,
46
+ )
47
+
48
+ _DTypeLikeFloat32 = Union[
49
+ dtype[float32],
50
+ _SupportsDType[dtype[float32]],
51
+ type[float32],
52
+ _Float32Codes,
53
+ _SingleCodes,
54
+ ]
55
+
56
+ _DTypeLikeFloat64 = Union[
57
+ dtype[float64],
58
+ _SupportsDType[dtype[float64]],
59
+ type[float],
60
+ type[float64],
61
+ _Float64Codes,
62
+ _DoubleCodes,
63
+ ]
64
+
65
+ class RandomState:
66
+ _bit_generator: BitGenerator
67
+ def __init__(self, seed: None | _ArrayLikeInt_co | BitGenerator = ...) -> None: ...
68
+ def __repr__(self) -> str: ...
69
+ def __str__(self) -> str: ...
70
+ def __getstate__(self) -> dict[str, Any]: ...
71
+ def __setstate__(self, state: dict[str, Any]) -> None: ...
72
+ def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ...
73
+ def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ...
74
+ @overload
75
+ def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ...
76
+ @overload
77
+ def get_state(
78
+ self, legacy: Literal[True] = ...
79
+ ) -> dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]: ...
80
+ def set_state(
81
+ self, state: dict[str, Any] | tuple[str, ndarray[Any, dtype[uint32]], int, int, float]
82
+ ) -> None: ...
83
+ @overload
84
+ def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc]
85
+ @overload
86
+ def random_sample(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
87
+ @overload
88
+ def random(self, size: None = ...) -> float: ... # type: ignore[misc]
89
+ @overload
90
+ def random(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
91
+ @overload
92
+ def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc]
93
+ @overload
94
+ def beta(
95
+ self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
96
+ ) -> ndarray[Any, dtype[float64]]: ...
97
+ @overload
98
+ def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
99
+ @overload
100
+ def exponential(
101
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
102
+ ) -> ndarray[Any, dtype[float64]]: ...
103
+ @overload
104
+ def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc]
105
+ @overload
106
+ def standard_exponential(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
107
+ @overload
108
+ def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc]
109
+ @overload
110
+ def tomaxint(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[int_]]: ...
111
+ @overload
112
+ def randint( # type: ignore[misc]
113
+ self,
114
+ low: int,
115
+ high: None | int = ...,
116
+ ) -> int: ...
117
+ @overload
118
+ def randint( # type: ignore[misc]
119
+ self,
120
+ low: int,
121
+ high: None | int = ...,
122
+ size: None = ...,
123
+ dtype: _DTypeLikeBool = ...,
124
+ ) -> bool: ...
125
+ @overload
126
+ def randint( # type: ignore[misc]
127
+ self,
128
+ low: int,
129
+ high: None | int = ...,
130
+ size: None = ...,
131
+ dtype: _DTypeLikeInt | _DTypeLikeUInt = ...,
132
+ ) -> int: ...
133
+ @overload
134
+ def randint( # type: ignore[misc]
135
+ self,
136
+ low: _ArrayLikeInt_co,
137
+ high: None | _ArrayLikeInt_co = ...,
138
+ size: None | _ShapeLike = ...,
139
+ ) -> ndarray[Any, dtype[int_]]: ...
140
+ @overload
141
+ def randint( # type: ignore[misc]
142
+ self,
143
+ low: _ArrayLikeInt_co,
144
+ high: None | _ArrayLikeInt_co = ...,
145
+ size: None | _ShapeLike = ...,
146
+ dtype: _DTypeLikeBool = ...,
147
+ ) -> ndarray[Any, dtype[bool_]]: ...
148
+ @overload
149
+ def randint( # type: ignore[misc]
150
+ self,
151
+ low: _ArrayLikeInt_co,
152
+ high: None | _ArrayLikeInt_co = ...,
153
+ size: None | _ShapeLike = ...,
154
+ dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ...,
155
+ ) -> ndarray[Any, dtype[int8]]: ...
156
+ @overload
157
+ def randint( # type: ignore[misc]
158
+ self,
159
+ low: _ArrayLikeInt_co,
160
+ high: None | _ArrayLikeInt_co = ...,
161
+ size: None | _ShapeLike = ...,
162
+ dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ...,
163
+ ) -> ndarray[Any, dtype[int16]]: ...
164
+ @overload
165
+ def randint( # type: ignore[misc]
166
+ self,
167
+ low: _ArrayLikeInt_co,
168
+ high: None | _ArrayLikeInt_co = ...,
169
+ size: None | _ShapeLike = ...,
170
+ dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ...,
171
+ ) -> ndarray[Any, dtype[int32]]: ...
172
+ @overload
173
+ def randint( # type: ignore[misc]
174
+ self,
175
+ low: _ArrayLikeInt_co,
176
+ high: None | _ArrayLikeInt_co = ...,
177
+ size: None | _ShapeLike = ...,
178
+ dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ...,
179
+ ) -> ndarray[Any, dtype[int64]]: ...
180
+ @overload
181
+ def randint( # type: ignore[misc]
182
+ self,
183
+ low: _ArrayLikeInt_co,
184
+ high: None | _ArrayLikeInt_co = ...,
185
+ size: None | _ShapeLike = ...,
186
+ dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ...,
187
+ ) -> ndarray[Any, dtype[uint8]]: ...
188
+ @overload
189
+ def randint( # type: ignore[misc]
190
+ self,
191
+ low: _ArrayLikeInt_co,
192
+ high: None | _ArrayLikeInt_co = ...,
193
+ size: None | _ShapeLike = ...,
194
+ dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ...,
195
+ ) -> ndarray[Any, dtype[uint16]]: ...
196
+ @overload
197
+ def randint( # type: ignore[misc]
198
+ self,
199
+ low: _ArrayLikeInt_co,
200
+ high: None | _ArrayLikeInt_co = ...,
201
+ size: None | _ShapeLike = ...,
202
+ dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ...,
203
+ ) -> ndarray[Any, dtype[uint32]]: ...
204
+ @overload
205
+ def randint( # type: ignore[misc]
206
+ self,
207
+ low: _ArrayLikeInt_co,
208
+ high: None | _ArrayLikeInt_co = ...,
209
+ size: None | _ShapeLike = ...,
210
+ dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ...,
211
+ ) -> ndarray[Any, dtype[uint64]]: ...
212
+ @overload
213
+ def randint( # type: ignore[misc]
214
+ self,
215
+ low: _ArrayLikeInt_co,
216
+ high: None | _ArrayLikeInt_co = ...,
217
+ size: None | _ShapeLike = ...,
218
+ dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ...,
219
+ ) -> ndarray[Any, dtype[int_]]: ...
220
+ @overload
221
+ def randint( # type: ignore[misc]
222
+ self,
223
+ low: _ArrayLikeInt_co,
224
+ high: None | _ArrayLikeInt_co = ...,
225
+ size: None | _ShapeLike = ...,
226
+ dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ...,
227
+ ) -> ndarray[Any, dtype[uint]]: ...
228
+ def bytes(self, length: int) -> builtins.bytes: ...
229
+ @overload
230
+ def choice(
231
+ self,
232
+ a: int,
233
+ size: None = ...,
234
+ replace: bool = ...,
235
+ p: None | _ArrayLikeFloat_co = ...,
236
+ ) -> int: ...
237
+ @overload
238
+ def choice(
239
+ self,
240
+ a: int,
241
+ size: _ShapeLike = ...,
242
+ replace: bool = ...,
243
+ p: None | _ArrayLikeFloat_co = ...,
244
+ ) -> ndarray[Any, dtype[int_]]: ...
245
+ @overload
246
+ def choice(
247
+ self,
248
+ a: ArrayLike,
249
+ size: None = ...,
250
+ replace: bool = ...,
251
+ p: None | _ArrayLikeFloat_co = ...,
252
+ ) -> Any: ...
253
+ @overload
254
+ def choice(
255
+ self,
256
+ a: ArrayLike,
257
+ size: _ShapeLike = ...,
258
+ replace: bool = ...,
259
+ p: None | _ArrayLikeFloat_co = ...,
260
+ ) -> ndarray[Any, Any]: ...
261
+ @overload
262
+ def uniform(self, low: float = ..., high: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
263
+ @overload
264
+ def uniform(
265
+ self,
266
+ low: _ArrayLikeFloat_co = ...,
267
+ high: _ArrayLikeFloat_co = ...,
268
+ size: None | _ShapeLike = ...,
269
+ ) -> ndarray[Any, dtype[float64]]: ...
270
+ @overload
271
+ def rand(self) -> float: ...
272
+ @overload
273
+ def rand(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
274
+ @overload
275
+ def randn(self) -> float: ...
276
+ @overload
277
+ def randn(self, *args: int) -> ndarray[Any, dtype[float64]]: ...
278
+ @overload
279
+ def random_integers(self, low: int, high: None | int = ..., size: None = ...) -> int: ... # type: ignore[misc]
280
+ @overload
281
+ def random_integers(
282
+ self,
283
+ low: _ArrayLikeInt_co,
284
+ high: None | _ArrayLikeInt_co = ...,
285
+ size: None | _ShapeLike = ...,
286
+ ) -> ndarray[Any, dtype[int_]]: ...
287
+ @overload
288
+ def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc]
289
+ @overload
290
+ def standard_normal( # type: ignore[misc]
291
+ self, size: _ShapeLike = ...
292
+ ) -> ndarray[Any, dtype[float64]]: ...
293
+ @overload
294
+ def normal(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
295
+ @overload
296
+ def normal(
297
+ self,
298
+ loc: _ArrayLikeFloat_co = ...,
299
+ scale: _ArrayLikeFloat_co = ...,
300
+ size: None | _ShapeLike = ...,
301
+ ) -> ndarray[Any, dtype[float64]]: ...
302
+ @overload
303
+ def standard_gamma( # type: ignore[misc]
304
+ self,
305
+ shape: float,
306
+ size: None = ...,
307
+ ) -> float: ...
308
+ @overload
309
+ def standard_gamma(
310
+ self,
311
+ shape: _ArrayLikeFloat_co,
312
+ size: None | _ShapeLike = ...,
313
+ ) -> ndarray[Any, dtype[float64]]: ...
314
+ @overload
315
+ def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
316
+ @overload
317
+ def gamma(
318
+ self,
319
+ shape: _ArrayLikeFloat_co,
320
+ scale: _ArrayLikeFloat_co = ...,
321
+ size: None | _ShapeLike = ...,
322
+ ) -> ndarray[Any, dtype[float64]]: ...
323
+ @overload
324
+ def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc]
325
+ @overload
326
+ def f(
327
+ self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
328
+ ) -> ndarray[Any, dtype[float64]]: ...
329
+ @overload
330
+ def noncentral_f(self, dfnum: float, dfden: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
331
+ @overload
332
+ def noncentral_f(
333
+ self,
334
+ dfnum: _ArrayLikeFloat_co,
335
+ dfden: _ArrayLikeFloat_co,
336
+ nonc: _ArrayLikeFloat_co,
337
+ size: None | _ShapeLike = ...,
338
+ ) -> ndarray[Any, dtype[float64]]: ...
339
+ @overload
340
+ def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
341
+ @overload
342
+ def chisquare(
343
+ self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
344
+ ) -> ndarray[Any, dtype[float64]]: ...
345
+ @overload
346
+ def noncentral_chisquare(self, df: float, nonc: float, size: None = ...) -> float: ... # type: ignore[misc]
347
+ @overload
348
+ def noncentral_chisquare(
349
+ self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
350
+ ) -> ndarray[Any, dtype[float64]]: ...
351
+ @overload
352
+ def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc]
353
+ @overload
354
+ def standard_t(
355
+ self, df: _ArrayLikeFloat_co, size: None = ...
356
+ ) -> ndarray[Any, dtype[float64]]: ...
357
+ @overload
358
+ def standard_t(
359
+ self, df: _ArrayLikeFloat_co, size: _ShapeLike = ...
360
+ ) -> ndarray[Any, dtype[float64]]: ...
361
+ @overload
362
+ def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc]
363
+ @overload
364
+ def vonmises(
365
+ self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
366
+ ) -> ndarray[Any, dtype[float64]]: ...
367
+ @overload
368
+ def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
369
+ @overload
370
+ def pareto(
371
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
372
+ ) -> ndarray[Any, dtype[float64]]: ...
373
+ @overload
374
+ def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
375
+ @overload
376
+ def weibull(
377
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
378
+ ) -> ndarray[Any, dtype[float64]]: ...
379
+ @overload
380
+ def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc]
381
+ @overload
382
+ def power(
383
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
384
+ ) -> ndarray[Any, dtype[float64]]: ...
385
+ @overload
386
+ def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc]
387
+ @overload
388
+ def standard_cauchy(self, size: _ShapeLike = ...) -> ndarray[Any, dtype[float64]]: ...
389
+ @overload
390
+ def laplace(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
391
+ @overload
392
+ def laplace(
393
+ self,
394
+ loc: _ArrayLikeFloat_co = ...,
395
+ scale: _ArrayLikeFloat_co = ...,
396
+ size: None | _ShapeLike = ...,
397
+ ) -> ndarray[Any, dtype[float64]]: ...
398
+ @overload
399
+ def gumbel(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
400
+ @overload
401
+ def gumbel(
402
+ self,
403
+ loc: _ArrayLikeFloat_co = ...,
404
+ scale: _ArrayLikeFloat_co = ...,
405
+ size: None | _ShapeLike = ...,
406
+ ) -> ndarray[Any, dtype[float64]]: ...
407
+ @overload
408
+ def logistic(self, loc: float = ..., scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
409
+ @overload
410
+ def logistic(
411
+ self,
412
+ loc: _ArrayLikeFloat_co = ...,
413
+ scale: _ArrayLikeFloat_co = ...,
414
+ size: None | _ShapeLike = ...,
415
+ ) -> ndarray[Any, dtype[float64]]: ...
416
+ @overload
417
+ def lognormal(self, mean: float = ..., sigma: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
418
+ @overload
419
+ def lognormal(
420
+ self,
421
+ mean: _ArrayLikeFloat_co = ...,
422
+ sigma: _ArrayLikeFloat_co = ...,
423
+ size: None | _ShapeLike = ...,
424
+ ) -> ndarray[Any, dtype[float64]]: ...
425
+ @overload
426
+ def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc]
427
+ @overload
428
+ def rayleigh(
429
+ self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
430
+ ) -> ndarray[Any, dtype[float64]]: ...
431
+ @overload
432
+ def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc]
433
+ @overload
434
+ def wald(
435
+ self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
436
+ ) -> ndarray[Any, dtype[float64]]: ...
437
+ @overload
438
+ def triangular(self, left: float, mode: float, right: float, size: None = ...) -> float: ... # type: ignore[misc]
439
+ @overload
440
+ def triangular(
441
+ self,
442
+ left: _ArrayLikeFloat_co,
443
+ mode: _ArrayLikeFloat_co,
444
+ right: _ArrayLikeFloat_co,
445
+ size: None | _ShapeLike = ...,
446
+ ) -> ndarray[Any, dtype[float64]]: ...
447
+ @overload
448
+ def binomial(self, n: int, p: float, size: None = ...) -> int: ... # type: ignore[misc]
449
+ @overload
450
+ def binomial(
451
+ self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
452
+ ) -> ndarray[Any, dtype[int_]]: ...
453
+ @overload
454
+ def negative_binomial(self, n: float, p: float, size: None = ...) -> int: ... # type: ignore[misc]
455
+ @overload
456
+ def negative_binomial(
457
+ self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
458
+ ) -> ndarray[Any, dtype[int_]]: ...
459
+ @overload
460
+ def poisson(self, lam: float = ..., size: None = ...) -> int: ... # type: ignore[misc]
461
+ @overload
462
+ def poisson(
463
+ self, lam: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...
464
+ ) -> ndarray[Any, dtype[int_]]: ...
465
+ @overload
466
+ def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc]
467
+ @overload
468
+ def zipf(
469
+ self, a: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
470
+ ) -> ndarray[Any, dtype[int_]]: ...
471
+ @overload
472
+ def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
473
+ @overload
474
+ def geometric(
475
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
476
+ ) -> ndarray[Any, dtype[int_]]: ...
477
+ @overload
478
+ def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc]
479
+ @overload
480
+ def hypergeometric(
481
+ self,
482
+ ngood: _ArrayLikeInt_co,
483
+ nbad: _ArrayLikeInt_co,
484
+ nsample: _ArrayLikeInt_co,
485
+ size: None | _ShapeLike = ...,
486
+ ) -> ndarray[Any, dtype[int_]]: ...
487
+ @overload
488
+ def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc]
489
+ @overload
490
+ def logseries(
491
+ self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
492
+ ) -> ndarray[Any, dtype[int_]]: ...
493
+ def multivariate_normal(
494
+ self,
495
+ mean: _ArrayLikeFloat_co,
496
+ cov: _ArrayLikeFloat_co,
497
+ size: None | _ShapeLike = ...,
498
+ check_valid: Literal["warn", "raise", "ignore"] = ...,
499
+ tol: float = ...,
500
+ ) -> ndarray[Any, dtype[float64]]: ...
501
+ def multinomial(
502
+ self, n: _ArrayLikeInt_co, pvals: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
503
+ ) -> ndarray[Any, dtype[int_]]: ...
504
+ def dirichlet(
505
+ self, alpha: _ArrayLikeFloat_co, size: None | _ShapeLike = ...
506
+ ) -> ndarray[Any, dtype[float64]]: ...
507
+ def shuffle(self, x: ArrayLike) -> None: ...
508
+ @overload
509
+ def permutation(self, x: int) -> ndarray[Any, dtype[int_]]: ...
510
+ @overload
511
+ def permutation(self, x: ArrayLike) -> ndarray[Any, Any]: ...
512
+
513
+ _rand: RandomState
514
+
515
+ beta = _rand.beta
516
+ binomial = _rand.binomial
517
+ bytes = _rand.bytes
518
+ chisquare = _rand.chisquare
519
+ choice = _rand.choice
520
+ dirichlet = _rand.dirichlet
521
+ exponential = _rand.exponential
522
+ f = _rand.f
523
+ gamma = _rand.gamma
524
+ get_state = _rand.get_state
525
+ geometric = _rand.geometric
526
+ gumbel = _rand.gumbel
527
+ hypergeometric = _rand.hypergeometric
528
+ laplace = _rand.laplace
529
+ logistic = _rand.logistic
530
+ lognormal = _rand.lognormal
531
+ logseries = _rand.logseries
532
+ multinomial = _rand.multinomial
533
+ multivariate_normal = _rand.multivariate_normal
534
+ negative_binomial = _rand.negative_binomial
535
+ noncentral_chisquare = _rand.noncentral_chisquare
536
+ noncentral_f = _rand.noncentral_f
537
+ normal = _rand.normal
538
+ pareto = _rand.pareto
539
+ permutation = _rand.permutation
540
+ poisson = _rand.poisson
541
+ power = _rand.power
542
+ rand = _rand.rand
543
+ randint = _rand.randint
544
+ randn = _rand.randn
545
+ random = _rand.random
546
+ random_integers = _rand.random_integers
547
+ random_sample = _rand.random_sample
548
+ rayleigh = _rand.rayleigh
549
+ seed = _rand.seed
550
+ set_state = _rand.set_state
551
+ shuffle = _rand.shuffle
552
+ standard_cauchy = _rand.standard_cauchy
553
+ standard_exponential = _rand.standard_exponential
554
+ standard_gamma = _rand.standard_gamma
555
+ standard_normal = _rand.standard_normal
556
+ standard_t = _rand.standard_t
557
+ triangular = _rand.triangular
558
+ uniform = _rand.uniform
559
+ vonmises = _rand.vonmises
560
+ wald = _rand.wald
561
+ weibull = _rand.weibull
562
+ zipf = _rand.zipf
563
+ # Two legacy that are trivial wrappers around random_sample
564
+ sample = _rand.random_sample
565
+ ranf = _rand.random_sample
566
+
567
+ def set_bit_generator(bitgen: BitGenerator) -> None:
568
+ ...
569
+
570
+ def get_bit_generator() -> BitGenerator:
571
+ ...
.venv/lib/python3.11/site-packages/numpy/random/tests/__init__.py ADDED
File without changes
.venv/lib/python3.11/site-packages/numpy/random/tests/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (191 Bytes). View file