language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/backend_wx.py
|
{
"start": 13161,
"end": 31504
}
|
class ____(FigureCanvasBase, wx.Panel):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window
probably implements a wx.Sizer to control the displayed control size - but
we give a hint as to our preferred minimum size.
"""
required_interactive_framework = "wx"
_timer_cls = TimerWx
manager_class = _api.classproperty(lambda cls: FigureManagerWx)
keyvald = {
wx.WXK_CONTROL: 'control',
wx.WXK_SHIFT: 'shift',
wx.WXK_ALT: 'alt',
wx.WXK_CAPITAL: 'caps_lock',
wx.WXK_LEFT: 'left',
wx.WXK_UP: 'up',
wx.WXK_RIGHT: 'right',
wx.WXK_DOWN: 'down',
wx.WXK_ESCAPE: 'escape',
wx.WXK_F1: 'f1',
wx.WXK_F2: 'f2',
wx.WXK_F3: 'f3',
wx.WXK_F4: 'f4',
wx.WXK_F5: 'f5',
wx.WXK_F6: 'f6',
wx.WXK_F7: 'f7',
wx.WXK_F8: 'f8',
wx.WXK_F9: 'f9',
wx.WXK_F10: 'f10',
wx.WXK_F11: 'f11',
wx.WXK_F12: 'f12',
wx.WXK_SCROLL: 'scroll_lock',
wx.WXK_PAUSE: 'break',
wx.WXK_BACK: 'backspace',
wx.WXK_RETURN: 'enter',
wx.WXK_INSERT: 'insert',
wx.WXK_DELETE: 'delete',
wx.WXK_HOME: 'home',
wx.WXK_END: 'end',
wx.WXK_PAGEUP: 'pageup',
wx.WXK_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD0: '0',
wx.WXK_NUMPAD1: '1',
wx.WXK_NUMPAD2: '2',
wx.WXK_NUMPAD3: '3',
wx.WXK_NUMPAD4: '4',
wx.WXK_NUMPAD5: '5',
wx.WXK_NUMPAD6: '6',
wx.WXK_NUMPAD7: '7',
wx.WXK_NUMPAD8: '8',
wx.WXK_NUMPAD9: '9',
wx.WXK_NUMPAD_ADD: '+',
wx.WXK_NUMPAD_SUBTRACT: '-',
wx.WXK_NUMPAD_MULTIPLY: '*',
wx.WXK_NUMPAD_DIVIDE: '/',
wx.WXK_NUMPAD_DECIMAL: 'dec',
wx.WXK_NUMPAD_ENTER: 'enter',
wx.WXK_NUMPAD_UP: 'up',
wx.WXK_NUMPAD_RIGHT: 'right',
wx.WXK_NUMPAD_DOWN: 'down',
wx.WXK_NUMPAD_LEFT: 'left',
wx.WXK_NUMPAD_PAGEUP: 'pageup',
wx.WXK_NUMPAD_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD_HOME: 'home',
wx.WXK_NUMPAD_END: 'end',
wx.WXK_NUMPAD_INSERT: 'insert',
wx.WXK_NUMPAD_DELETE: 'delete',
}
def __init__(self, parent, id, figure=None):
"""
Initialize a FigureWx instance.
- Initialize the FigureCanvasBase and wxPanel parents.
- Set event handlers for resize, paint, and keyboard and mouse
interaction.
"""
FigureCanvasBase.__init__(self, figure)
size = wx.Size(*map(math.ceil, self.figure.bbox.size))
if wx.Platform != '__WXMSW__':
size = parent.FromDIP(size)
# Set preferred window size hint - helps the sizer, if one is connected
wx.Panel.__init__(self, parent, id, size=size)
self.bitmap = None
self._isDrawn = False
self._rubberband_rect = None
self._rubberband_pen_black = wx.Pen('BLACK', 1, wx.PENSTYLE_SHORT_DASH)
self._rubberband_pen_white = wx.Pen('WHITE', 1, wx.PENSTYLE_SOLID)
self.Bind(wx.EVT_SIZE, self._on_size)
self.Bind(wx.EVT_PAINT, self._on_paint)
self.Bind(wx.EVT_CHAR_HOOK, self._on_key_down)
self.Bind(wx.EVT_KEY_UP, self._on_key_up)
self.Bind(wx.EVT_LEFT_DOWN, self._on_mouse_button)
self.Bind(wx.EVT_LEFT_DCLICK, self._on_mouse_button)
self.Bind(wx.EVT_LEFT_UP, self._on_mouse_button)
self.Bind(wx.EVT_MIDDLE_DOWN, self._on_mouse_button)
self.Bind(wx.EVT_MIDDLE_DCLICK, self._on_mouse_button)
self.Bind(wx.EVT_MIDDLE_UP, self._on_mouse_button)
self.Bind(wx.EVT_RIGHT_DOWN, self._on_mouse_button)
self.Bind(wx.EVT_RIGHT_DCLICK, self._on_mouse_button)
self.Bind(wx.EVT_RIGHT_UP, self._on_mouse_button)
self.Bind(wx.EVT_MOUSE_AUX1_DOWN, self._on_mouse_button)
self.Bind(wx.EVT_MOUSE_AUX1_UP, self._on_mouse_button)
self.Bind(wx.EVT_MOUSE_AUX2_DOWN, self._on_mouse_button)
self.Bind(wx.EVT_MOUSE_AUX2_UP, self._on_mouse_button)
self.Bind(wx.EVT_MOUSE_AUX1_DCLICK, self._on_mouse_button)
self.Bind(wx.EVT_MOUSE_AUX2_DCLICK, self._on_mouse_button)
self.Bind(wx.EVT_MOUSEWHEEL, self._on_mouse_wheel)
self.Bind(wx.EVT_MOTION, self._on_motion)
self.Bind(wx.EVT_ENTER_WINDOW, self._on_enter)
self.Bind(wx.EVT_LEAVE_WINDOW, self._on_leave)
self.Bind(wx.EVT_MOUSE_CAPTURE_CHANGED, self._on_capture_lost)
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self._on_capture_lost)
self.SetBackgroundStyle(wx.BG_STYLE_PAINT) # Reduce flicker.
self.SetBackgroundColour(wx.WHITE)
if wx.Platform == '__WXMAC__':
# Initial scaling. Other platforms handle this automatically
dpiScale = self.GetDPIScaleFactor()
self.SetInitialSize(self.GetSize()*(1/dpiScale))
self._set_device_pixel_ratio(dpiScale)
def Copy_to_Clipboard(self, event=None):
"""Copy bitmap of canvas to system clipboard."""
bmp_obj = wx.BitmapDataObject()
bmp_obj.SetBitmap(self.bitmap)
if not wx.TheClipboard.IsOpened():
open_success = wx.TheClipboard.Open()
if open_success:
wx.TheClipboard.SetData(bmp_obj)
wx.TheClipboard.Flush()
wx.TheClipboard.Close()
def _update_device_pixel_ratio(self, *args, **kwargs):
# We need to be careful in cases with mixed resolution displays if
# device_pixel_ratio changes.
if self._set_device_pixel_ratio(self.GetDPIScaleFactor()):
self.draw()
def draw_idle(self):
# docstring inherited
_log.debug("%s - draw_idle()", type(self))
self._isDrawn = False # Force redraw
# Triggering a paint event is all that is needed to defer drawing
# until later. The platform will send the event when it thinks it is
# a good time (usually as soon as there are no other events pending).
self.Refresh(eraseBackground=False)
def flush_events(self):
# docstring inherited
wx.Yield()
def start_event_loop(self, timeout=0):
# docstring inherited
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
timer = wx.Timer(self, id=wx.ID_ANY)
if timeout > 0:
timer.Start(int(timeout * 1000), oneShot=True)
self.Bind(wx.EVT_TIMER, self.stop_event_loop, id=timer.GetId())
# Event loop handler for start/stop event loop
self._event_loop = wx.GUIEventLoop()
self._event_loop.Run()
timer.Stop()
def stop_event_loop(self, event=None):
# docstring inherited
if hasattr(self, '_event_loop'):
if self._event_loop.IsRunning():
self._event_loop.Exit()
del self._event_loop
def _get_imagesave_wildcards(self):
"""Return the wildcard string for the filesave dialog."""
default_filetype = self.get_default_filetype()
filetypes = self.get_supported_filetypes_grouped()
sorted_filetypes = sorted(filetypes.items())
wildcards = []
extensions = []
filter_index = 0
for i, (name, exts) in enumerate(sorted_filetypes):
ext_list = ';'.join(['*.%s' % ext for ext in exts])
extensions.append(exts[0])
wildcard = f'{name} ({ext_list})|{ext_list}'
if default_filetype in exts:
filter_index = i
wildcards.append(wildcard)
wildcards = '|'.join(wildcards)
return wildcards, extensions, filter_index
def gui_repaint(self, drawDC=None):
"""
Update the displayed image on the GUI canvas, using the supplied
wx.PaintDC device context.
"""
_log.debug("%s - gui_repaint()", type(self))
# The "if self" check avoids a "wrapped C/C++ object has been deleted"
# RuntimeError if doing things after window is closed.
if not (self and self.IsShownOnScreen()):
return
if not drawDC: # not called from OnPaint use a ClientDC
drawDC = wx.ClientDC(self)
# For 'WX' backend on Windows, the bitmap cannot be in use by another
# DC (see GraphicsContextWx._cache).
bmp = (self.bitmap.ConvertToImage().ConvertToBitmap()
if wx.Platform == '__WXMSW__'
and isinstance(self.figure.canvas.get_renderer(), RendererWx)
else self.bitmap)
drawDC.DrawBitmap(bmp, 0, 0)
if self._rubberband_rect is not None:
# Some versions of wx+python don't support numpy.float64 here.
x0, y0, x1, y1 = map(round, self._rubberband_rect)
rect = [(x0, y0, x1, y0), (x1, y0, x1, y1),
(x0, y0, x0, y1), (x0, y1, x1, y1)]
drawDC.DrawLineList(rect, self._rubberband_pen_white)
drawDC.DrawLineList(rect, self._rubberband_pen_black)
filetypes = {
**FigureCanvasBase.filetypes,
'bmp': 'Windows bitmap',
'jpeg': 'JPEG',
'jpg': 'JPEG',
'pcx': 'PCX',
'png': 'Portable Network Graphics',
'tif': 'Tagged Image Format File',
'tiff': 'Tagged Image Format File',
'xpm': 'X pixmap',
}
def _on_paint(self, event):
"""Called when wxPaintEvt is generated."""
_log.debug("%s - _on_paint()", type(self))
drawDC = wx.PaintDC(self)
if not self._isDrawn:
self.draw(drawDC=drawDC)
else:
self.gui_repaint(drawDC=drawDC)
drawDC.Destroy()
def _on_size(self, event):
"""
Called when wxEventSize is generated.
In this application we attempt to resize to fit the window, so it
is better to take the performance hit and redraw the whole window.
"""
self._update_device_pixel_ratio()
_log.debug("%s - _on_size()", type(self))
sz = self.GetParent().GetSizer()
if sz:
si = sz.GetItem(self)
if sz and si and not si.Proportion and not si.Flag & wx.EXPAND:
# managed by a sizer, but with a fixed size
size = self.GetMinSize()
else:
# variable size
size = self.GetClientSize()
# Do not allow size to become smaller than MinSize
size.IncTo(self.GetMinSize())
if getattr(self, "_width", None):
if size == (self._width, self._height):
# no change in size
return
self._width, self._height = size
self._isDrawn = False
if self._width <= 1 or self._height <= 1:
return # Empty figure
# Create a new, correctly sized bitmap
dpival = self.figure.dpi
if not wx.Platform == '__WXMSW__':
scale = self.GetDPIScaleFactor()
dpival /= scale
winch = self._width / dpival
hinch = self._height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# Rendering will happen on the associated paint event
# so no need to do anything here except to make sure
# the whole background is repainted.
self.Refresh(eraseBackground=False)
ResizeEvent("resize_event", self)._process()
self.draw_idle()
@staticmethod
def _mpl_buttons():
state = wx.GetMouseState()
# NOTE: Alternatively, we could use event.LeftIsDown() / etc. but this
# fails to report multiclick drags on macOS (other OSes have not been
# verified).
mod_table = [
(MouseButton.LEFT, state.LeftIsDown()),
(MouseButton.RIGHT, state.RightIsDown()),
(MouseButton.MIDDLE, state.MiddleIsDown()),
(MouseButton.BACK, state.Aux1IsDown()),
(MouseButton.FORWARD, state.Aux2IsDown()),
]
# State *after* press/release.
return {button for button, flag in mod_table if flag}
@staticmethod
def _mpl_modifiers(event=None, *, exclude=None):
mod_table = [
("ctrl", wx.MOD_CONTROL, wx.WXK_CONTROL),
("alt", wx.MOD_ALT, wx.WXK_ALT),
("shift", wx.MOD_SHIFT, wx.WXK_SHIFT),
]
if event is not None:
modifiers = event.GetModifiers()
return [name for name, mod, key in mod_table
if modifiers & mod and exclude != key]
else:
return [name for name, mod, key in mod_table
if wx.GetKeyState(key)]
def _get_key(self, event):
keyval = event.KeyCode
if keyval in self.keyvald:
key = self.keyvald[keyval]
elif keyval < 256:
key = chr(keyval)
# wx always returns an uppercase, so make it lowercase if the shift
# key is not depressed (NOTE: this will not handle Caps Lock)
if not event.ShiftDown():
key = key.lower()
else:
return None
mods = self._mpl_modifiers(event, exclude=keyval)
if "shift" in mods and key.isupper():
mods.remove("shift")
return "+".join([*mods, key])
def _mpl_coords(self, pos=None):
"""
Convert a wx position, defaulting to the current cursor position, to
Matplotlib coordinates.
"""
if pos is None:
pos = wx.GetMouseState()
x, y = self.ScreenToClient(pos.X, pos.Y)
else:
x, y = pos.X, pos.Y
# flip y so y=0 is bottom of canvas
if not wx.Platform == '__WXMSW__':
scale = self.GetDPIScaleFactor()
return x*scale, self.figure.bbox.height - y*scale
else:
return x, self.figure.bbox.height - y
def _on_key_down(self, event):
"""Capture key press."""
KeyEvent("key_press_event", self,
self._get_key(event), *self._mpl_coords(),
guiEvent=event)._process()
if self:
event.Skip()
def _on_key_up(self, event):
"""Release key."""
KeyEvent("key_release_event", self,
self._get_key(event), *self._mpl_coords(),
guiEvent=event)._process()
if self:
event.Skip()
def set_cursor(self, cursor):
# docstring inherited
cursor = wx.Cursor(_api.check_getitem({
cursors.MOVE: wx.CURSOR_HAND,
cursors.HAND: wx.CURSOR_HAND,
cursors.POINTER: wx.CURSOR_ARROW,
cursors.SELECT_REGION: wx.CURSOR_CROSS,
cursors.WAIT: wx.CURSOR_WAIT,
cursors.RESIZE_HORIZONTAL: wx.CURSOR_SIZEWE,
cursors.RESIZE_VERTICAL: wx.CURSOR_SIZENS,
}, cursor=cursor))
self.SetCursor(cursor)
self.Refresh()
def _set_capture(self, capture=True):
"""Control wx mouse capture."""
if self.HasCapture():
self.ReleaseMouse()
if capture:
self.CaptureMouse()
def _on_capture_lost(self, event):
"""Capture changed or lost"""
self._set_capture(False)
def _on_mouse_button(self, event):
"""Start measuring on an axis."""
event.Skip()
self._set_capture(event.ButtonDown() or event.ButtonDClick())
x, y = self._mpl_coords(event)
button_map = {
wx.MOUSE_BTN_LEFT: MouseButton.LEFT,
wx.MOUSE_BTN_MIDDLE: MouseButton.MIDDLE,
wx.MOUSE_BTN_RIGHT: MouseButton.RIGHT,
wx.MOUSE_BTN_AUX1: MouseButton.BACK,
wx.MOUSE_BTN_AUX2: MouseButton.FORWARD,
}
button = event.GetButton()
button = button_map.get(button, button)
modifiers = self._mpl_modifiers(event)
if event.ButtonDown():
MouseEvent("button_press_event", self, x, y, button,
modifiers=modifiers, guiEvent=event)._process()
elif event.ButtonDClick():
MouseEvent("button_press_event", self, x, y, button, dblclick=True,
modifiers=modifiers, guiEvent=event)._process()
elif event.ButtonUp():
MouseEvent("button_release_event", self, x, y, button,
modifiers=modifiers, guiEvent=event)._process()
def _on_mouse_wheel(self, event):
"""Translate mouse wheel events into matplotlib events"""
x, y = self._mpl_coords(event)
# Convert delta/rotation/rate into a floating point step size
step = event.LinesPerAction * event.WheelRotation / event.WheelDelta
# Done handling event
event.Skip()
# Mac gives two events for every wheel event; skip every second one.
if wx.Platform == '__WXMAC__':
if not hasattr(self, '_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
MouseEvent("scroll_event", self, x, y, step=step,
modifiers=self._mpl_modifiers(event),
guiEvent=event)._process()
def _on_motion(self, event):
"""Start measuring on an axis."""
event.Skip()
MouseEvent("motion_notify_event", self,
*self._mpl_coords(event),
buttons=self._mpl_buttons(),
modifiers=self._mpl_modifiers(event),
guiEvent=event)._process()
def _on_enter(self, event):
"""Mouse has entered the window."""
event.Skip()
LocationEvent("figure_enter_event", self,
*self._mpl_coords(event),
modifiers=self._mpl_modifiers(),
guiEvent=event)._process()
def _on_leave(self, event):
"""Mouse has left the window."""
event.Skip()
LocationEvent("figure_leave_event", self,
*self._mpl_coords(event),
modifiers=self._mpl_modifiers(),
guiEvent=event)._process()
|
_FigureCanvasWxBase
|
python
|
keon__algorithms
|
algorithms/tree/avl/avl.py
|
{
"start": 58,
"end": 3476
}
|
class ____(object):
"""
An avl tree.
"""
def __init__(self):
# Root node of the tree.
self.node = None
self.height = -1
self.balance = 0
def insert(self, key):
"""
Insert new key into node
"""
# Create new node
node = TreeNode(key)
if not self.node:
self.node = node
self.node.left = AvlTree()
self.node.right = AvlTree()
elif key < self.node.val:
self.node.left.insert(key)
elif key > self.node.val:
self.node.right.insert(key)
self.re_balance()
def re_balance(self):
"""
Re balance tree. After inserting or deleting a node,
"""
self.update_heights(recursive=False)
self.update_balances(False)
while self.balance < -1 or self.balance > 1:
if self.balance > 1:
if self.node.left.balance < 0:
self.node.left.rotate_left()
self.update_heights()
self.update_balances()
self.rotate_right()
self.update_heights()
self.update_balances()
if self.balance < -1:
if self.node.right.balance > 0:
self.node.right.rotate_right()
self.update_heights()
self.update_balances()
self.rotate_left()
self.update_heights()
self.update_balances()
def update_heights(self, recursive=True):
"""
Update tree height
"""
if self.node:
if recursive:
if self.node.left:
self.node.left.update_heights()
if self.node.right:
self.node.right.update_heights()
self.height = 1 + max(self.node.left.height,
self.node.right.height)
else:
self.height = -1
def update_balances(self, recursive=True):
"""
Calculate tree balance factor
"""
if self.node:
if recursive:
if self.node.left:
self.node.left.update_balances()
if self.node.right:
self.node.right.update_balances()
self.balance = self.node.left.height - self.node.right.height
else:
self.balance = 0
def rotate_right(self):
"""
Right rotation
"""
new_root = self.node.left.node
new_left_sub = new_root.right.node
old_root = self.node
self.node = new_root
old_root.left.node = new_left_sub
new_root.right.node = old_root
def rotate_left(self):
"""
Left rotation
"""
new_root = self.node.right.node
new_left_sub = new_root.left.node
old_root = self.node
self.node = new_root
old_root.right.node = new_left_sub
new_root.left.node = old_root
def in_order_traverse(self):
"""
In-order traversal of the tree
"""
result = []
if not self.node:
return result
result.extend(self.node.left.in_order_traverse())
result.append(self.node.key)
result.extend(self.node.right.in_order_traverse())
return result
|
AvlTree
|
python
|
walkccc__LeetCode
|
solutions/2538. Difference Between Maximum and Minimum Price Sum/2538.py
|
{
"start": 0,
"end": 1448
}
|
class ____:
def maxOutput(self, n: int, edges: list[list[int]], price: list[int]) -> int:
ans = 0
tree = [[] for _ in range(n)]
maxSums = [0] * n # maxSums[i] := the maximum the sum of path rooted at i
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
def maxSum(u: int, prev: int) -> int:
maxChildSum = 0
for v in tree[u]:
if v != prev:
maxChildSum = max(maxChildSum, maxSum(v, u))
maxSums[u] = price[u] + maxChildSum
return maxSums[u]
# Precalculate `maxSums`.
maxSum(0, -1)
def reroot(u: int, prev: int, parentSum: int) -> None:
nonlocal ans
# Get the top two subtree sums and the top one node index.
maxSubtreeSum1 = 0
maxSubtreeSum2 = 0
maxNode = -1
for v in tree[u]:
if v == prev:
continue
if maxSums[v] > maxSubtreeSum1:
maxSubtreeSum2 = maxSubtreeSum1
maxSubtreeSum1 = maxSums[v]
maxNode = v
elif maxSums[v] > maxSubtreeSum2:
maxSubtreeSum2 = maxSums[v]
if len(tree[u]) == 1:
ans = max(ans, parentSum, maxSubtreeSum1)
for v in tree[u]:
if v == prev:
continue
nextParentSum = (
price[u] + max(parentSum, maxSubtreeSum2) if v == maxNode else
price[u] + max(parentSum, maxSubtreeSum1))
reroot(v, u, nextParentSum)
reroot(0, -1, 0)
return ans
|
Solution
|
python
|
django__django
|
tests/postgres_tests/test_array.py
|
{
"start": 37442,
"end": 38585
}
|
class ____(PostgreSQLSimpleTestCase):
field_values = [["Django", "Python", None], ["Джанго", "פייתון", None, "król"]]
@staticmethod
def create_json_data(array_field_value):
fields = {"field": json.dumps(array_field_value, ensure_ascii=False)}
return json.dumps(
[{"model": "postgres_tests.chararraymodel", "pk": None, "fields": fields}]
)
def test_encode(self):
for field_value in self.field_values:
with self.subTest(field_value=field_value):
instance = CharArrayModel(field=field_value)
data = serializers.serialize("json", [instance])
json_data = self.create_json_data(field_value)
self.assertEqual(json.loads(data), json.loads(json_data))
def test_decode(self):
for field_value in self.field_values:
with self.subTest(field_value=field_value):
json_data = self.create_json_data(field_value)
instance = list(serializers.deserialize("json", json_data))[0].object
self.assertEqual(instance.field, field_value)
|
TestStringSerialization
|
python
|
FactoryBoy__factory_boy
|
examples/django_demo/generic_foreignkey/factories.py
|
{
"start": 285,
"end": 400
}
|
class ____(factory.django.DjangoModelFactory):
name = 'group'
class Meta:
model = Group
|
GroupFactory
|
python
|
conda__conda
|
conda/exceptions.py
|
{
"start": 19550,
"end": 20497
}
|
class ____(CondaError):
def __init__(
self,
url: str,
target_full_path: PathType,
checksum_type: str,
expected_checksum: str,
actual_checksum: str,
partial_download: bool = False,
):
message = dals(
"""
Conda detected a mismatch between the expected content and downloaded content
for url '%(url)s'.
download saved to: %(target_full_path)s
expected %(checksum_type)s: %(expected_checksum)s
actual %(checksum_type)s: %(actual_checksum)s
"""
)
url = maybe_unquote(url)
super().__init__(
message,
url=url,
target_full_path=target_full_path,
checksum_type=checksum_type,
expected_checksum=expected_checksum,
actual_checksum=actual_checksum,
partial_download=partial_download,
)
|
ChecksumMismatchError
|
python
|
spyder-ide__spyder
|
spyder/widgets/config.py
|
{
"start": 1542,
"end": 2483
}
|
class ____:
"""Mixin to access config options in SpyderConfigPages."""
CONF_SECTION = None
def set_option(
self,
option,
value,
section=None,
recursive_notification=False,
secure=False,
):
section = self.CONF_SECTION if section is None else section
CONF.set(
section,
option,
value,
recursive_notification=recursive_notification,
secure=secure,
)
def get_option(
self, option, default=NoDefault, section=None, secure=False
):
section = self.CONF_SECTION if section is None else section
return CONF.get(section, option, default=default, secure=secure)
def remove_option(self, option, section=None, secure=False):
section = self.CONF_SECTION if section is None else section
CONF.remove_option(section, option, secure=secure)
|
ConfigAccessMixin
|
python
|
kamyu104__LeetCode-Solutions
|
Python/lonely-pixel-ii.py
|
{
"start": 58,
"end": 839
}
|
class ____(object):
def findBlackPixel(self, picture, N):
"""
:type picture: List[List[str]]
:type N: int
:rtype: int
"""
rows, cols = [0] * len(picture), [0] * len(picture[0])
lookup = collections.defaultdict(int)
for i in xrange(len(picture)):
for j in xrange(len(picture[0])):
if picture[i][j] == 'B':
rows[i] += 1
cols[j] += 1
lookup[tuple(picture[i])] += 1
result = 0
for i in xrange(len(picture)):
if rows[i] == N and lookup[tuple(picture[i])] == N:
for j in xrange(len(picture[0])):
result += picture[i][j] == 'B' and cols[j] == N
return result
|
Solution
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0131_increase_env_var_size.py
|
{
"start": 150,
"end": 545
}
|
class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0130_addons_remove_old_fields"),
]
operations = [
migrations.AlterField(
model_name="environmentvariable",
name="value",
field=models.CharField(help_text="Value of the environment variable", max_length=48000),
),
]
|
Migration
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/backfill_policy.py
|
{
"start": 405,
"end": 3987
}
|
class ____(
NamedTuple(
"_BackfillPolicy",
[
("max_partitions_per_run", Optional[int]),
],
)
):
"""A BackfillPolicy specifies how Dagster should attempt to backfill a partitioned asset.
There are two main kinds of backfill policies: single-run and multi-run.
An asset with a single-run backfill policy will take a single run to backfill all of its
partitions at once.
An asset with a multi-run backfill policy will take multiple runs to backfill all of its
partitions. Each run will backfill a subset of the partitions. The number of partitions to
backfill in each run is controlled by the `max_partitions_per_run` parameter.
For example:
- If an asset has 100 partitions, and the `max_partitions_per_run` is set to 10, then it will
be backfilled in 10 runs; each run will backfill 10 partitions.
- If an asset has 100 partitions, and the `max_partitions_per_run` is set to 11, then it will
be backfilled in 10 runs; the first 9 runs will backfill 11 partitions, and the last one run
will backfill the remaining 9 partitions.
**Warning:**
Constructing an BackfillPolicy directly is not recommended as the API is subject to change.
BackfillPolicy.single_run() and BackfillPolicy.multi_run(max_partitions_per_run=x) are the
recommended APIs.
"""
def __new__(cls, max_partitions_per_run: Optional[int] = 1):
return super().__new__(
cls,
max_partitions_per_run=max_partitions_per_run,
)
@public
@staticmethod
def single_run() -> "BackfillPolicy":
"""Creates a BackfillPolicy that executes the entire backfill in a single run."""
return BackfillPolicy(max_partitions_per_run=None)
@public
@staticmethod
def multi_run(max_partitions_per_run: int = 1) -> "BackfillPolicy":
"""Creates a BackfillPolicy that executes the entire backfill in multiple runs.
Each run will backfill [max_partitions_per_run] number of partitions.
Args:
max_partitions_per_run (Optional[int]): The maximum number of partitions in each run of
the multiple runs. Defaults to 1.
"""
return BackfillPolicy(
max_partitions_per_run=check.int_param(max_partitions_per_run, "max_partitions_per_run")
)
@property
def policy_type(self) -> BackfillPolicyType:
if self.max_partitions_per_run:
return BackfillPolicyType.MULTI_RUN
else:
return BackfillPolicyType.SINGLE_RUN
def __str__(self):
return (
"BackfillPolicy.single_run()"
if self.policy_type == BackfillPolicyType.SINGLE_RUN
else (f"BackfillPolicy.multi_run(max_partitions_per_run={self.max_partitions_per_run})")
)
# In situations where multiple backfill policies are specified, call this to resolve a canonical
# policy, which is the policy with the minimum max_partitions_per_run.
def resolve_backfill_policy(
backfill_policies: Iterable[Optional[BackfillPolicy]],
) -> BackfillPolicy:
policy = next(iter(sorted(backfill_policies, key=_backfill_policy_sort_key)), None)
with disable_dagster_warnings():
return policy or BackfillPolicy.multi_run(1)
def _backfill_policy_sort_key(bp: Optional[BackfillPolicy]) -> float:
if bp is None: # equivalent to max_partitions_per_run=1
return 1
elif bp.max_partitions_per_run is None:
return float("inf")
else:
return bp.max_partitions_per_run
|
BackfillPolicy
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/qbatchnorm_test.py
|
{
"start": 1611,
"end": 2513
}
|
class ____(QBatchNormBenchmark):
def _init(self, M, N, K, device):
self.set_module_name("QBatchNorm2d")
# Note: quantized implementation requires rank 4, which is why we
# add a 1 as the last dimension
self.input_one = torch.rand(
M, N, K, 1, device=device, requires_grad=self.auto_set()
)
def forward(
self,
q_input_one,
weight,
bias,
mean,
var,
eps: float,
Y_scale: float,
Y_zero_point: int,
):
return torch.ops.quantized.batch_norm2d(
q_input_one, weight, bias, mean, var, eps, Y_scale, Y_zero_point
)
op_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm1dBenchmark)
op_bench.generate_pt_test(batchnorm_configs_short, QBatchNorm2dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
QBatchNorm2dBenchmark
|
python
|
pytorch__pytorch
|
test/distributed/test_c10d_pypg.py
|
{
"start": 1084,
"end": 2418
}
|
class ____(dist.ProcessGroup):
"""
This PG only supports world_size of 1
"""
def __init__(self, rank, world, use_wrapper):
super().__init__(rank, world)
assert rank == 0
assert world == 1
self._rank = rank
self._world = world
self.wait_count = 0
self.get_future_count = 0
self.use_wrapper = use_wrapper
self._work = []
def broadcast(self, tensor_list, opts):
if self.use_wrapper:
return create_work(tensor_list)
res = MyWork(tensor_list, self)
self._work.append(res)
return res
def allgather(self, output_tensors, input_tensor, opts):
for o, i in zip(output_tensors[0], input_tensor):
o.copy_(i)
if self.use_wrapper:
return create_work(output_tensors)
res = MyWork(output_tensors, self)
self._work.append(res)
return res
def allreduce(self, tensors, opts):
if self.use_wrapper:
return create_work(tensors)
res = MyWork(tensors, self)
self._work.append(res)
return res
def getSize(self):
return self._world
def getBackendName(self):
return "lonely-pg"
def __repr__(self):
return f"PLG w:{self._world} r:{self._rank}"
|
LonelyRankProcessGroup
|
python
|
pandas-dev__pandas
|
pandas/tests/reshape/concat/test_datetimes.py
|
{
"start": 17348,
"end": 21118
}
|
class ____:
def test_concat_period_series(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="D"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="Period[D]")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_concat_period_multiple_freq_series(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="M"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_period_other_series(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="M"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_period_other_series2(self):
# non-period
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(DatetimeIndex(["2015-11-01", "2015-12-01"]))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_period_other_series3(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(["A", "B"])
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_timedelta64_block():
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
tm.assert_frame_equal(result.iloc[:10], df, check_index_type=False)
tm.assert_frame_equal(result.iloc[10:], df, check_index_type=False)
def test_concat_multiindex_datetime_nat():
# GH#44900
left = DataFrame({"a": 1}, index=MultiIndex.from_tuples([(1, pd.NaT)]))
right = DataFrame(
{"b": 2}, index=MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
)
result = concat([left, right], axis="columns")
expected = DataFrame(
{"a": [1.0, np.nan], "b": 2}, MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
)
tm.assert_frame_equal(result, expected)
def test_concat_float_datetime64():
# GH#32934
df_time = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")})
df_float = DataFrame({"A": pd.array([1.0], dtype="float64")})
expected = DataFrame(
{
"A": [
pd.array(["2000"], dtype="datetime64[ns]")[0],
pd.array([1.0], dtype="float64")[0],
]
},
index=[0, 0],
)
result = concat([df_time, df_float])
tm.assert_frame_equal(result, expected)
expected = DataFrame({"A": pd.array([], dtype="object")})
result = concat([df_time.iloc[:0], df_float.iloc[:0]])
tm.assert_frame_equal(result, expected)
expected = DataFrame({"A": pd.array([1.0], dtype="object")})
result = concat([df_time.iloc[:0], df_float])
tm.assert_frame_equal(result, expected)
expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}).astype(
object
)
result = concat([df_time, df_float.iloc[:0]])
tm.assert_frame_equal(result, expected)
|
TestPeriodConcat
|
python
|
getsentry__sentry
|
tests/sentry/users/api/endpoints/test_user_identity_details.py
|
{
"start": 224,
"end": 923
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-user-identity-details"
method = "delete"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
def test_simple(self) -> None:
auth_provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
auth_identity = AuthIdentity.objects.create(
auth_provider=auth_provider,
ident=self.user.email,
user=self.user,
)
self.get_success_response(self.user.id, auth_identity.id, status_code=204)
assert not AuthIdentity.objects.filter(id=auth_identity.id).exists()
|
DeleteUserIdentityTest
|
python
|
django-mptt__django-mptt
|
tests/myapp/models.py
|
{
"start": 3923,
"end": 4272
}
|
class ____(MPTTModel):
name = models.CharField(max_length=50)
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
# just testing it's actually possible to override the tree manager
objects = CustomTreeManager()
def __str__(self):
return self.name
|
Person
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py
|
{
"start": 16916,
"end": 19496
}
|
class ____(Metafield):
"""
{
products(query: "updated_at:>='2023-01-08T00:00:00+00:00' AND updated_at:<='2024-08-02T15:12:41.689153+00:00'", sortKey: UPDATED_AT) {
edges {
node {
__typename
id
product_updated_at: updatedAt
media {
edges {
node {
__typename
id
... on MediaImage {
metafields {
edges {
node {
__typename
id
namespace
value
key
description
createdAt
updatedAt
type
}
}
}
}
}
}
}
}
}
}
}
"""
type = MetafieldType.PRODUCT_IMAGES
record_composition = {
"new_record": "Product",
"record_components": ["Metafield"],
}
@property
def query_nodes(self) -> List[Field]:
"""
This is the overide for the default `query_nodes` method,
because the usual way of retrieving the metafields for product images` was suddently deprecated,
for `2024-10`, but the changes are reflected in the `2024-04` as well, starting: `2024-08-01T00:06:44`
More info here:
https://shopify.dev/docs/api/release-notes/2024-04#productimage-value-removed
"""
# define metafield node
metafield_node = self.get_edge_node("metafields", self.metafield_fields)
media_fields: List[Field] = ["__typename", "id", InlineFragment(type="MediaImage", fields=[metafield_node])]
media_node = self.get_edge_node("media", media_fields)
fields: List[Field] = ["__typename", "id", media_node]
fields = self.inject_parent_cursor_field(fields)
return fields
|
MetafieldProductImage
|
python
|
davidhalter__parso
|
parso/python/diff.py
|
{
"start": 22628,
"end": 34206
}
|
class ____:
def __init__(self, module):
self._base_node = _NodesTreeNode(module)
self._working_stack = [self._base_node]
self._module = module
self._prefix_remainder = ''
self.prefix = ''
self.indents = [0]
@property
def parsed_until_line(self):
return self._working_stack[-1].get_last_line(self.prefix)
def _update_insertion_node(self, indentation):
for node in reversed(list(self._working_stack)):
if node.indentation < indentation or node is self._working_stack[0]:
return node
self._working_stack.pop()
def add_parsed_nodes(self, tree_nodes, keyword_token_indents):
old_prefix = self.prefix
tree_nodes = self._remove_endmarker(tree_nodes)
if not tree_nodes:
self.prefix = old_prefix + self.prefix
return
assert tree_nodes[0].type != 'newline'
node = self._update_insertion_node(tree_nodes[0].start_pos[1])
assert node.tree_node.type in ('suite', 'file_input')
node.add_tree_nodes(old_prefix, tree_nodes)
# tos = Top of stack
self._update_parsed_node_tos(tree_nodes[-1], keyword_token_indents)
def _update_parsed_node_tos(self, tree_node, keyword_token_indents):
if tree_node.type == 'suite':
def_leaf = tree_node.parent.children[0]
new_tos = _NodesTreeNode(
tree_node,
indentation=keyword_token_indents[def_leaf.start_pos][-1],
)
new_tos.add_tree_nodes('', list(tree_node.children))
self._working_stack[-1].add_child_node(new_tos)
self._working_stack.append(new_tos)
self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents)
elif _func_or_class_has_suite(tree_node):
self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents)
def _remove_endmarker(self, tree_nodes):
"""
Helps cleaning up the tree nodes that get inserted.
"""
last_leaf = tree_nodes[-1].get_last_leaf()
is_endmarker = last_leaf.type == 'endmarker'
self._prefix_remainder = ''
if is_endmarker:
prefix = last_leaf.prefix
separation = max(prefix.rfind('\n'), prefix.rfind('\r'))
if separation > -1:
# Remove the whitespace part of the prefix after a newline.
# That is not relevant if parentheses were opened. Always parse
# until the end of a line.
last_leaf.prefix, self._prefix_remainder = \
last_leaf.prefix[:separation + 1], last_leaf.prefix[separation + 1:]
self.prefix = ''
if is_endmarker:
self.prefix = last_leaf.prefix
tree_nodes = tree_nodes[:-1]
return tree_nodes
def _get_matching_indent_nodes(self, tree_nodes, is_new_suite):
# There might be a random dedent where we have to stop copying.
# Invalid indents are ok, because the parser handled that
# properly before. An invalid dedent can happen, because a few
# lines above there was an invalid indent.
node_iterator = iter(tree_nodes)
if is_new_suite:
yield next(node_iterator)
first_node = next(node_iterator)
indent = _get_indentation(first_node)
if not is_new_suite and indent not in self.indents:
return
yield first_node
for n in node_iterator:
if _get_indentation(n) != indent:
return
yield n
def copy_nodes(self, tree_nodes, until_line, line_offset):
"""
Copies tree nodes from the old parser tree.
Returns the number of tree nodes that were copied.
"""
if tree_nodes[0].type in ('error_leaf', 'error_node'):
# Avoid copying errors in the beginning. Can lead to a lot of
# issues.
return []
indentation = _get_indentation(tree_nodes[0])
old_working_stack = list(self._working_stack)
old_prefix = self.prefix
old_indents = self.indents
self.indents = [i for i in self.indents if i <= indentation]
self._update_insertion_node(indentation)
new_nodes, self._working_stack, self.prefix, added_indents = self._copy_nodes(
list(self._working_stack),
tree_nodes,
until_line,
line_offset,
self.prefix,
)
if new_nodes:
self.indents += added_indents
else:
self._working_stack = old_working_stack
self.prefix = old_prefix
self.indents = old_indents
return new_nodes
def _copy_nodes(self, working_stack, nodes, until_line, line_offset,
prefix='', is_nested=False):
new_nodes = []
added_indents = []
nodes = list(self._get_matching_indent_nodes(
nodes,
is_new_suite=is_nested,
))
new_prefix = ''
for node in nodes:
if node.start_pos[0] > until_line:
break
if node.type == 'endmarker':
break
if node.type == 'error_leaf' and node.token_type in ('DEDENT', 'ERROR_DEDENT'):
break
# TODO this check might take a bit of time for large files. We
# might want to change this to do more intelligent guessing or
# binary search.
if _get_last_line(node) > until_line:
# We can split up functions and classes later.
if _func_or_class_has_suite(node):
new_nodes.append(node)
break
try:
c = node.children
except AttributeError:
pass
else:
# This case basically appears with error recovery of one line
# suites like `def foo(): bar.-`. In this case we might not
# include a newline in the statement and we need to take care
# of that.
n = node
if n.type == 'decorated':
n = n.children[-1]
if n.type in ('async_funcdef', 'async_stmt'):
n = n.children[-1]
if n.type in ('classdef', 'funcdef'):
suite_node = n.children[-1]
else:
suite_node = c[-1]
if suite_node.type in ('error_leaf', 'error_node'):
break
new_nodes.append(node)
# Pop error nodes at the end from the list
if new_nodes:
while new_nodes:
last_node = new_nodes[-1]
if (last_node.type in ('error_leaf', 'error_node')
or _is_flow_node(new_nodes[-1])):
# Error leafs/nodes don't have a defined start/end. Error
# nodes might not end with a newline (e.g. if there's an
# open `(`). Therefore ignore all of them unless they are
# succeeded with valid parser state.
# If we copy flows at the end, they might be continued
# after the copy limit (in the new parser).
# In this while loop we try to remove until we find a newline.
new_prefix = ''
new_nodes.pop()
while new_nodes:
last_node = new_nodes[-1]
if last_node.get_last_leaf().type == 'newline':
break
new_nodes.pop()
continue
if len(new_nodes) > 1 and new_nodes[-2].type == 'error_node':
# The problem here is that Parso error recovery sometimes
# influences nodes before this node.
# Since the new last node is an error node this will get
# cleaned up in the next while iteration.
new_nodes.pop()
continue
break
if not new_nodes:
return [], working_stack, prefix, added_indents
tos = working_stack[-1]
last_node = new_nodes[-1]
had_valid_suite_last = False
# Pop incomplete suites from the list
if _func_or_class_has_suite(last_node):
suite = last_node
while suite.type != 'suite':
suite = suite.children[-1]
indent = _get_suite_indentation(suite)
added_indents.append(indent)
suite_tos = _NodesTreeNode(suite, indentation=_get_indentation(last_node))
# Don't need to pass line_offset here, it's already done by the
# parent.
suite_nodes, new_working_stack, new_prefix, ai = self._copy_nodes(
working_stack + [suite_tos], suite.children, until_line, line_offset,
is_nested=True,
)
added_indents += ai
if len(suite_nodes) < 2:
# A suite only with newline is not valid.
new_nodes.pop()
new_prefix = ''
else:
assert new_nodes
tos.add_child_node(suite_tos)
working_stack = new_working_stack
had_valid_suite_last = True
if new_nodes:
if not _ends_with_newline(new_nodes[-1].get_last_leaf()) and not had_valid_suite_last:
p = new_nodes[-1].get_next_leaf().prefix
# We are not allowed to remove the newline at the end of the
# line, otherwise it's going to be missing. This happens e.g.
# if a bracket is around before that moves newlines to
# prefixes.
new_prefix = split_lines(p, keepends=True)[0]
if had_valid_suite_last:
last = new_nodes[-1]
if last.type == 'decorated':
last = last.children[-1]
if last.type in ('async_funcdef', 'async_stmt'):
last = last.children[-1]
last_line_offset_leaf = last.children[-2].get_last_leaf()
assert last_line_offset_leaf == ':'
else:
last_line_offset_leaf = new_nodes[-1].get_last_leaf()
tos.add_tree_nodes(
prefix, new_nodes, line_offset, last_line_offset_leaf,
)
prefix = new_prefix
self._prefix_remainder = ''
return new_nodes, working_stack, prefix, added_indents
def close(self):
self._base_node.finish()
# Add an endmarker.
try:
last_leaf = self._module.get_last_leaf()
except IndexError:
end_pos = [1, 0]
else:
last_leaf = _skip_dedent_error_leaves(last_leaf)
end_pos = list(last_leaf.end_pos)
lines = split_lines(self.prefix)
assert len(lines) > 0
if len(lines) == 1:
if lines[0].startswith(BOM_UTF8_STRING) and end_pos == [1, 0]:
end_pos[1] -= 1
end_pos[1] += len(lines[0])
else:
end_pos[0] += len(lines) - 1
end_pos[1] = len(lines[-1])
endmarker = EndMarker('', tuple(end_pos), self.prefix + self._prefix_remainder)
endmarker.parent = self._module
self._module.children.append(endmarker)
|
_NodesTree
|
python
|
ApeWorX__ape
|
src/ape/exceptions.py
|
{
"start": 14835,
"end": 14932
}
|
class ____(ApeException):
"""
Raised when problems occur in a project.
"""
|
ProjectError
|
python
|
crytic__slither
|
slither/core/expressions/assignment_operation.py
|
{
"start": 2899,
"end": 4176
}
|
class ____(Expression):
def __init__(
self,
left_expression: Expression,
right_expression: Expression,
expression_type: AssignmentOperationType,
expression_return_type: Optional["Type"],
) -> None:
assert isinstance(left_expression, Expression)
assert isinstance(right_expression, Expression)
super().__init__()
left_expression.set_lvalue()
self._expressions = [left_expression, right_expression]
self._type: AssignmentOperationType = expression_type
self._expression_return_type: Optional["Type"] = expression_return_type
@property
def expressions(self) -> List[Expression]:
return self._expressions
@property
def expression_return_type(self) -> Optional["Type"]:
return self._expression_return_type
@property
def expression_left(self) -> Expression:
return self._expressions[0]
@property
def expression_right(self) -> Expression:
return self._expressions[1]
@property
def type(self) -> Optional["AssignmentOperationType"]:
return self._type
def __str__(self) -> str:
return str(self.expression_left) + " " + str(self.type) + " " + str(self.expression_right)
|
AssignmentOperation
|
python
|
scrapy__scrapy
|
tests/test_spidermiddleware.py
|
{
"start": 2270,
"end": 2789
}
|
class ____(TestSpiderMiddleware):
"""Invalid return value for process_spider_output method"""
@deferred_f_from_coro_f
async def test_invalid_process_spider_output(self):
class InvalidProcessSpiderOutputMiddleware:
def process_spider_output(self, response, result):
return 1
self.mwman._add_middleware(InvalidProcessSpiderOutputMiddleware())
with pytest.raises(_InvalidOutput):
await self._scrape_response()
|
TestProcessSpiderOutputInvalidOutput
|
python
|
neetcode-gh__leetcode
|
python/0048-rotate-image.py
|
{
"start": 0,
"end": 832
}
|
class ____:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
l, r = 0, len(matrix) - 1
while l < r:
for i in range(r - l):
top, bottom = l, r
# save the topleft
topLeft = matrix[top][l + i]
# move bottom left into top left
matrix[top][l + i] = matrix[bottom - i][l]
# move bottom right into bottom left
matrix[bottom - i][l] = matrix[bottom][r - i]
# move top right into bottom right
matrix[bottom][r - i] = matrix[top + i][r]
# move top left into top right
matrix[top + i][r] = topLeft
r -= 1
l += 1
|
Solution
|
python
|
pennersr__django-allauth
|
tests/apps/socialaccount/providers/weibo/tests.py
|
{
"start": 238,
"end": 1358
}
|
class ____(OAuth2TestsMixin, TestCase):
provider_id = WeiboProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{"bi_followers_count": 0,
"domain": "", "avatar_large": "http://tp3.sinaimg.cn/3195025850/180/0/0",
"block_word": 0, "star": 0, "id": 3195025850, "city": "1", "verified": false,
"follow_me": false, "verified_reason": "", "followers_count": 6,
"location": "\u5317\u4eac \u4e1c\u57ce\u533a", "mbtype": 0,
"profile_url": "u/3195025850", "province": "11", "statuses_count": 0,
"description": "", "friends_count": 0, "online_status": 0, "mbrank": 0,
"idstr": "3195025850",
"profile_image_url": "http://tp3.sinaimg.cn/3195025850/50/0/0",
"allow_all_act_msg": false, "allow_all_comment": true, "geo_enabled": true,
"name": "pennersr", "lang": "zh-cn", "weihao": "", "remark": "",
"favourites_count": 0, "screen_name": "pennersr", "url": "", "gender": "f",
"created_at": "Tue Feb 19 19:43:39 +0800 2013", "verified_type": -1,
"following": false}
""",
)
def get_expected_to_str(self):
return "pennersr"
|
WeiboTests
|
python
|
google__jax
|
jaxlib/weakref_lru_cache_test.py
|
{
"start": 793,
"end": 6777
}
|
class ____(absltest.TestCase):
def testMultiThreaded(self):
insert_evs = [threading.Event() for _ in range(2)]
insert_evs_i = 0
class WRKey:
pass
class ClashingKey:
def __eq__(self, other):
return False
def __hash__(self):
return 333 # induce maximal caching problems.
class GilReleasingCacheKey:
def __eq__(self, other):
nonlocal insert_evs_i
if isinstance(other, GilReleasingCacheKey) and insert_evs_i < len(
insert_evs
):
insert_evs[insert_evs_i].set()
insert_evs_i += 1
time.sleep(0.01)
return False
def __hash__(self):
return 333 # induce maximal caching problems.
def CacheFn(obj, gil_releasing_cache_key):
del obj
del gil_releasing_cache_key
return None
cache = weakref_lru_cache.weakref_lru_cache(lambda: None, CacheFn, 2048)
wrkey = WRKey()
def Body():
for insert_ev in insert_evs:
insert_ev.wait()
for _ in range(20):
cache(wrkey, ClashingKey())
t = threading.Thread(target=Body)
t.start()
for _ in range(3):
cache(wrkey, GilReleasingCacheKey())
t.join()
def testAnotherMultiThreaded(self):
num_workers = 5
barrier = threading.Barrier(num_workers)
cache = weakref_lru_cache.weakref_lru_cache(
lambda: None, lambda x, y: y, 2048
)
class WRKey:
pass
def WorkerAddToCache():
barrier.wait()
wrkey = WRKey()
for i in range(10):
cache(wrkey, i)
def WorkerCleanCache():
barrier.wait()
for _ in range(10):
cache.cache_clear()
workers = [
threading.Thread(target=WorkerAddToCache)
for _ in range(num_workers - 1)
] + [threading.Thread(target=WorkerCleanCache)]
for t in workers:
t.start()
for t in workers:
t.join()
def testKwargsDictOrder(self):
miss_id = 0
class WRKey:
pass
def CacheFn(obj, kwkey1, kwkey2):
del obj, kwkey1, kwkey2
nonlocal miss_id
miss_id += 1
return miss_id
cache = weakref_lru_cache.weakref_lru_cache(lambda: None, CacheFn, 4)
wrkey = WRKey()
self.assertEqual(cache(wrkey, kwkey1="a", kwkey2="b"), 1)
self.assertEqual(cache(wrkey, kwkey1="b", kwkey2="a"), 2)
self.assertEqual(cache(wrkey, kwkey2="b", kwkey1="a"), 1)
def testGetKeys(self):
def CacheFn(obj, arg):
del obj
return arg + "extra"
cache = weakref_lru_cache.weakref_lru_cache(lambda: None, CacheFn, 4)
class WRKey:
pass
wrkey = WRKey()
self.assertEmpty(cache.cache_keys())
cache(wrkey, "arg1")
cache(wrkey, "arg2")
self.assertLen(cache.cache_keys(), 2)
def testNonWeakreferenceableKey(self):
class NonWRKey:
__slots__ = ()
non_wr_key = NonWRKey()
with self.assertRaises(TypeError):
weakref.ref(non_wr_key)
cache = weakref_lru_cache.weakref_lru_cache(lambda: None, lambda x: 2048)
for _ in range(100):
with self.assertRaises(TypeError):
cache(non_wr_key)
def testCrashingKey(self):
class WRKey:
pass
class CrashingKey:
# A key that raises exceptions if eq or hash is called.
def __eq__(self, other):
raise ValueError("eq")
def __hash__(self):
raise ValueError("hash")
cache = weakref_lru_cache.weakref_lru_cache(
lambda: None, lambda x, y: y, 2048
)
wrkey = WRKey()
with self.assertRaises(ValueError):
for _ in range(100):
cache(wrkey, CrashingKey())
def testPrintingStats(self):
class WRKey:
pass
cache = weakref_lru_cache.weakref_lru_cache(
lambda: None, lambda x, y: y, 2048
)
wrkey = WRKey()
for i in range(10):
cache(wrkey, i)
for i in range(5):
cache(wrkey, i)
self.assertEqual(
repr(cache.cache_info()),
"WeakrefLRUCache(hits=5, misses=10, maxsize=2048, currsize=10)",
)
def testGCKeys(self):
class WRKey:
def __init__(self, x):
self.x = x
def __eq__(self, other):
return self.x == other.x
def __hash__(self):
return hash(self.x)
cache = weakref_lru_cache.weakref_lru_cache(
lambda: None, lambda x, y: y, 2048
)
keys = [WRKey(i) for i in range(10)]
for i in range(10):
cache(keys[i], i)
# Delete some keys, to exercise the weakref callback behavior.
del keys[::2]
for key in keys:
cache(key, 7)
def testTpTraverse(self):
class WRKey:
pass
def CacheContextFn():
return None
def CallFn(x, y, *args, **kwargs):
del x, args, kwargs
return y
cache = weakref_lru_cache.weakref_lru_cache(CacheContextFn, CallFn, 2048)
keys = [WRKey() for _ in range(10)]
values = [str(i) for i in range(10)]
args = [str(i) for i in range(10)]
kwargs = {"a": "b"}
for key, value in zip(keys, values):
cache(key, value, *args, **kwargs)
expected_refs = (
[
CacheContextFn,
CallFn,
weakref_lru_cache.WeakrefLRUCache,
kwargs,
]
+ [weakref.getweakrefs(key)[0] for key in keys]
+ values
+ args
)
# Can't use assertContainsSubset because it doesn't support kwargs since
# dicts aren't hashable.
for ref in expected_refs:
self.assertIn(ref, gc.get_referents(cache))
def testReentrantKey(self):
cache = weakref_lru_cache.weakref_lru_cache(
lambda: None, lambda x, y: y, 2048
)
class WRKey:
pass
class ReentrantKey:
def __eq__(self, other):
cache(WRKey(), None)
return False
def __hash__(self):
return 42
wrkey = WRKey()
with self.assertRaisesRegex(RecursionError, "Reentrant call"):
for _ in range(100):
cache(wrkey, ReentrantKey())
if __name__ == "__main__":
absltest.main()
|
WeakrefLRUCacheTest
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/textfmts.py
|
{
"start": 3716,
"end": 7000
}
|
class ____(RegexLexer):
"""
Lexer for HTTP sessions.
.. versionadded:: 1.5
"""
name = 'HTTP'
aliases = ['http']
flags = re.DOTALL
def get_tokens_unprocessed(self, text, stack=('root',)):
"""Reset the content-type state."""
self.content_type = None
return RegexLexer.get_tokens_unprocessed(self, text, stack)
def header_callback(self, match):
if match.group(1).lower() == 'content-type':
content_type = match.group(5).strip()
if ';' in content_type:
content_type = content_type[:content_type.find(';')].strip()
self.content_type = content_type
yield match.start(1), Name.Attribute, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator, match.group(3)
yield match.start(4), Text, match.group(4)
yield match.start(5), Literal, match.group(5)
yield match.start(6), Text, match.group(6)
def continuous_header_callback(self, match):
yield match.start(1), Text, match.group(1)
yield match.start(2), Literal, match.group(2)
yield match.start(3), Text, match.group(3)
def content_callback(self, match):
content_type = getattr(self, 'content_type', None)
content = match.group()
offset = match.start()
if content_type:
from pygments.lexers import get_lexer_for_mimetype
possible_lexer_mimetypes = [content_type]
if '+' in content_type:
# application/calendar+xml can be treated as application/xml
# if there's not a better match.
general_type = re.sub(r'^(.*)/.*\+(.*)$', r'\1/\2',
content_type)
possible_lexer_mimetypes.append(general_type)
for i in possible_lexer_mimetypes:
try:
lexer = get_lexer_for_mimetype(i)
except ClassNotFound:
pass
else:
for idx, token, value in lexer.get_tokens_unprocessed(content):
yield offset + idx, token, value
return
yield offset, Text, content
tokens = {
'root': [
(r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)'
r'(HTTP)(/)(1\.[01])(\r?\n|\Z)',
bygroups(Name.Function, Text, Name.Namespace, Text,
Keyword.Reserved, Operator, Number, Text),
'headers'),
(r'(HTTP)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|\Z)',
bygroups(Keyword.Reserved, Operator, Number, Text, Number,
Text, Name.Exception, Text),
'headers'),
],
'headers': [
(r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|\Z)', header_callback),
(r'([\t ]+)([^\r\n]+)(\r?\n|\Z)', continuous_header_callback),
(r'\r?\n', Text, 'content')
],
'content': [
(r'.+', content_callback)
]
}
def analyse_text(text):
return text.startswith(('GET /', 'POST /', 'PUT /', 'DELETE /', 'HEAD /',
'OPTIONS /', 'TRACE /', 'PATCH /'))
|
HttpLexer
|
python
|
pypa__warehouse
|
warehouse/oidc/forms/gitlab.py
|
{
"start": 4455,
"end": 4909
}
|
class ____(GitLabPublisherBase, PendingPublisherMixin):
__params__ = GitLabPublisherBase.__params__ + ["project_name"]
def __init__(self, *args, route_url, check_project_name, user, **kwargs):
super().__init__(*args, **kwargs)
self._route_url = route_url
self._check_project_name = check_project_name
self._user = user
@property
def provider(self) -> str:
return "gitlab"
|
PendingGitLabPublisherForm
|
python
|
ray-project__ray
|
python/ray/autoscaler/_private/cli_logger.py
|
{
"start": 1919,
"end": 6746
}
|
class ____:
_proxy_allowlist = [
"disable",
"reset",
"bold",
"italic",
"underlined",
# used instead of `gray` as `dimmed` adapts to
# both light and dark themes
"dimmed",
"dodgerBlue", # group
"limeGreen", # success
"red", # error
"orange", # warning
"skyBlue", # label
"magenta", # syntax highlighting key words and symbols
"yellow", # syntax highlighting strings
]
def __getattr__(self, name):
res = getattr(_cf, name)
if callable(res) and name not in _ColorfulProxy._proxy_allowlist:
raise ValueError(
"Usage of the colorful method '" + name + "' is forbidden "
"by the proxy to keep a consistent color scheme. "
"Check `cli_logger.py` for allowed methods"
)
return res
cf = _ColorfulProxy()
colorama.init(strip=False)
def _external_caller_info():
"""Get the info from the caller frame.
Used to override the logging function and line number with the correct
ones. See the comment on _patched_makeRecord for more info.
"""
frame = inspect.currentframe()
caller = frame
levels = 0
while caller.f_code.co_filename == __file__:
caller = caller.f_back
levels += 1
return {
"lineno": caller.f_lineno,
"filename": os.path.basename(caller.f_code.co_filename),
}
def _format_msg(
msg: str,
*args: Any,
no_format: bool = None,
_tags: Dict[str, Any] = None,
_numbered: Tuple[str, int, int] = None,
**kwargs: Any,
):
"""Formats a message for printing.
Renders `msg` using the built-in `str.format` and the passed-in
`*args` and `**kwargs`.
Args:
*args (Any): `.format` arguments for `msg`.
no_format (bool):
If `no_format` is `True`,
`.format` will not be called on the message.
Useful if the output is user-provided or may otherwise
contain an unexpected formatting string (e.g. "{}").
_tags (Dict[str, Any]):
key-value pairs to display at the end of
the message in square brackets.
If a tag is set to `True`, it is printed without the value,
the presence of the tag treated as a "flag".
E.g. `_format_msg("hello", _tags=dict(from=mom, signed=True))`
`hello [from=Mom, signed]`
_numbered (Tuple[str, int, int]):
`(brackets, i, n)`
The `brackets` string is composed of two "bracket" characters,
`i` is the index, `n` is the total.
The string `{i}/{n}` surrounded by the "brackets" is
prepended to the message.
This is used to number steps in a procedure, with different
brackets specifying different major tasks.
E.g. `_format_msg("hello", _numbered=("[]", 0, 5))`
`[0/5] hello`
Returns:
The formatted message.
"""
if isinstance(msg, str) or isinstance(msg, ColorfulString):
tags_str = ""
if _tags is not None:
tags_list = []
for k, v in _tags.items():
if v is True:
tags_list += [k]
continue
if v is False:
continue
tags_list += [k + "=" + v]
if tags_list:
tags_str = cf.reset(cf.dimmed(" [{}]".format(", ".join(tags_list))))
numbering_str = ""
if _numbered is not None:
chars, i, n = _numbered
numbering_str = cf.dimmed(chars[0] + str(i) + "/" + str(n) + chars[1]) + " "
if no_format:
# todo: throw if given args/kwargs?
return numbering_str + msg + tags_str
return numbering_str + msg.format(*args, **kwargs) + tags_str
if kwargs:
raise ValueError("We do not support printing kwargs yet.")
res = [msg, *args]
res = [str(x) for x in res]
return ", ".join(res)
# TODO: come up with a plan to unify logging.
# formatter = logging.Formatter(
# # TODO(maximsmol): figure out the required log level padding
# # width automatically
# fmt="[{asctime}] {levelname:6} {message}",
# datefmt="%x %X",
# # We want alignment on our level names
# style="{")
def _isatty():
"""More robust check for interactive terminal/tty."""
try:
# https://stackoverflow.com/questions/6108330/
# checking-for-interactive-shell-in-a-python-script
return sys.__stdin__.isatty()
except Exception:
# sometimes this can fail due to closed output
# either way, no-tty is generally safe fallback.
return False
|
_ColorfulProxy
|
python
|
apache__airflow
|
providers/sftp/tests/unit/sftp/hooks/test_sftp.py
|
{
"start": 27840,
"end": 28510
}
|
class ____:
def __init__(self):
pass
async def listdir(self, path: str):
if path == "/path/does_not/exist/":
raise SFTPNoSuchFile("File does not exist")
return ["..", ".", "file"]
async def readdir(self, path: str):
if path == "/path/does_not/exist/":
raise SFTPNoSuchFile("File does not exist")
return [SFTPName(".."), SFTPName("."), SFTPName("file")]
async def stat(self, path: str):
if path == "/path/does_not/exist/":
raise SFTPNoSuchFile("No files matching")
sftp_obj = SFTPAttrs()
sftp_obj.mtime = 1667302566
return sftp_obj
|
MockSFTPClient
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/parallel_for/control_flow_ops_test.py
|
{
"start": 37233,
"end": 51435
}
|
class ____(PForTestCase):
def test_create_outside_and_write(self):
handle1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
def loop_fn(i):
h1 = list_ops.tensor_list_set_item(handle1, 0, i)
h1 = list_ops.tensor_list_set_item(h1, 1, 1)
h2 = list_ops.tensor_list_set_item(handle2, 0, 1)
return (list_ops.tensor_list_stack(h1, dtypes.int32),
list_ops.tensor_list_stack(h2, dtypes.int32))
self._test_loop_fn(loop_fn, 3)
def _make_graph_def(self, text):
ret = graph_pb2.GraphDef()
text_format.Parse(text, ret)
return ret
def test_no_fallback_with_internal_stacking(self):
# Create an op (really a function) that pfor definitely does not have a
# converter for. Assumes pfor does not start looking up function definitions
# for op-type-is-function-name calls.
@def_function.function
def opaque_list_fetch(x):
array_ops.identity(x)
return list_ops.tensor_list_get_item(x, 0, dtypes.int32)
external_handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
opaque_list_fetch_concrete = opaque_list_fetch.get_concrete_function(
external_handle)
opaque_list_fetch_name = opaque_list_fetch_concrete.name
def loop_fn(i):
h1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h1 = list_ops.tensor_list_set_item(h1, 0, i)
opaque_list_fetch_concrete.add_to_graph()
graph_def = self._make_graph_def("""
node { name: 'x' op: 'Placeholder'
attr { key: 'dtype' value { type: DT_FLOAT } }}
node { name: 'fn' op: '""" + opaque_list_fetch_name.decode()
+ """' input: 'x:0' }""")
return importer.import_graph_def(
graph_def,
input_map={"x:0": h1},
return_elements=["fn"],
name="import")[0].outputs[0]
with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=False)
with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=True)
def test_create_inside_and_write(self):
def loop_fn(i):
h1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h1 = list_ops.tensor_list_set_item(h1, 0, i)
h1 = list_ops.tensor_list_set_item(h1, 1, 1)
h2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h2 = list_ops.tensor_list_set_item(h2, 0, 1)
return (list_ops.tensor_list_stack(h1, dtypes.int32),
list_ops.tensor_list_stack(h2, dtypes.int32))
self._test_loop_fn(loop_fn, 3)
def test_create_outside_and_read(self):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle = list_ops.tensor_list_set_item(handle, 0, 0)
handle = list_ops.tensor_list_set_item(handle, 1, 1)
def loop_fn(i):
return (list_ops.tensor_list_get_item(handle, i, dtypes.int32),
list_ops.tensor_list_get_item(handle, 0, dtypes.int32),
list_ops.tensor_list_length(handle),
list_ops.tensor_list_element_shape(handle, dtypes.int32),
list_ops.tensor_list_element_shape(handle, dtypes.int64))
self._test_loop_fn(loop_fn, 2)
def test_create_outside_and_read_zero_loop_iters(self):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle = list_ops.tensor_list_set_item(handle, 0, 0)
handle = list_ops.tensor_list_set_item(handle, 1, 1)
def loop_fn(i):
return (
list_ops.tensor_list_get_item(handle, i, dtypes.int32),
list_ops.tensor_list_get_item(handle, 0, dtypes.int32),
list_ops.tensor_list_length(handle),
list_ops.tensor_list_element_shape(handle, dtypes.int32),
list_ops.tensor_list_element_shape(handle, dtypes.int64),
)
self._test_loop_fn(loop_fn, 0)
def test_create_inside_and_read(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle = list_ops.tensor_list_set_item(handle, 0, i)
handle = list_ops.tensor_list_set_item(handle, 1, 1)
return (list_ops.tensor_list_get_item(handle, 0, dtypes.int32),
list_ops.tensor_list_get_item(handle, i, dtypes.int32),
list_ops.tensor_list_length(handle),
list_ops.tensor_list_element_shape(handle, dtypes.int32),
list_ops.tensor_list_element_shape(handle, dtypes.int64))
self._test_loop_fn(loop_fn, 2)
def test_create_outside_and_push_back(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_push_back(h, [i, 2])
handle = list_ops.tensor_list_push_back(handle, [1, 2])
handle = list_ops.tensor_list_push_back(handle, [1, 2])
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_create_inside_and_push_back(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_push_back(handle, [i, 2])
handle = list_ops.tensor_list_push_back(handle, [1, 2])
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_pop_back_no_shape(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_push_back(handle, [1, 2])
handle = list_ops.tensor_list_push_back(handle, [i, 2])
handle, tensor = list_ops.tensor_list_pop_back(handle, dtypes.int32)
return tensor, list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_pop_back_no_shape_capture(self):
h = list_ops.tensor_list_reserve([2], 1, dtypes.int32)
h = list_ops.tensor_list_push_back(h, [1, 2])
def loop_fn(i):
handle, tensor = list_ops.tensor_list_pop_back(h, dtypes.int32)
handle = list_ops.tensor_list_push_back(handle, [1, i])
return tensor, list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_pop_back_with_shape(self):
@def_function.function
def loop_fn(i):
with backprop.GradientTape() as tape:
handle = list_ops.tensor_list_reserve(None, 1, dtypes.float32)
x = math_ops.cast(i, dtypes.float32)[None]
tape.watch(x)
handle = list_ops.tensor_list_push_back(handle, x)
stacked = list_ops.tensor_list_stack(handle, dtypes.float32)
list_grad = tape.gradient(stacked, x, x)
self.assertEqual("TensorListPopBack", list_grad.op.type)
return list_grad, stacked, list_grad.op.inputs[1]
self._test_loop_fn(loop_fn, 3)
def test_create_outside_and_scatter(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=h)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_create_inside_and_scatter(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_loop_variant_scatter_indices(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 10, dtypes.int32)
handle = list_ops.tensor_list_scatter(
[[1, i], [i + 1, 2]],
[i, i + 5], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_duplicate_indices(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 10, dtypes.int32)
handle = list_ops.tensor_list_scatter(
[[1, i], [1, i + 1], [i + 2, 3]],
[i, i, i + 2], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_create_outside_and_gather(self):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[2, 3]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
def loop_fn(i):
return (list_ops.tensor_list_gather(handle, [0, 1], dtypes.int32),
list_ops.tensor_list_gather(handle, [i], dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_gather(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return (list_ops.tensor_list_gather(handle, [0, 1], dtypes.int32),
list_ops.tensor_list_gather(handle, [i], dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_concat(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return gen_list_ops.tensor_list_concat_v2(
handle,
element_dtype=dtypes.int32,
element_shape=[2],
leading_dims=[])
output = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([[0, 2, 1, 2], [1, 2, 1, 2]], output[0])
self.assertAllClose([[2, 2], [2, 2]], output[1])
def test_create_outside_and_concat(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=h)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return gen_list_ops.tensor_list_concat_v2(
handle,
element_dtype=dtypes.int32,
element_shape=[2],
leading_dims=[])
output = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([[0, 2, 1, 2], [1, 2, 1, 2]], output[0])
self.assertAllClose([[2, 2], [2, 2]], output[1])
def test_tensor_list_from_tensor(self):
t = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor(array_ops.gather(t, i), [4])
return list_ops.tensor_list_stack(handle, t.dtype)
self._test_loop_fn(loop_fn, 2)
@test_util.enable_control_flow_v2
def test_tensor_list_reserve_while_loop(self):
# Here a loop invariant TensorList is captured by a while_loop, which then
# performs loop dependent operations on it, resulting in a loop variant
# output. This forces stacking of the variant handle captured by the
# while_loop.
# We handle this particular case by forcing vectorization of
# TensorListReserve operation.
def loop_fn(i):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
_, out_handle = while_loop.while_loop(
lambda j, _: j < 2, lambda j, h:
(j + 1, list_ops.tensor_list_set_item(h, j, i)), (0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 2)
@test_util.enable_control_flow_v2
def test_tensor_list_while_loop_stacked_cond_stacked_list(self):
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor([20, 21, 22, 23, i], [])
_, out_handle = while_loop.while_loop(
lambda j, _: j < i,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, i)),
(0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
@test_util.enable_control_flow_v2
def test_tensor_list_while_loop_stacked_cond_stacked_list_unknown_shape(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve(None, 5, dtypes.int32)
_, handle = while_loop.while_loop(
lambda j, _: j < 5,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, 0)),
(0, handle))
_, out_handle = while_loop.while_loop(
lambda j, _: j < i,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, i)),
(0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
@test_util.enable_control_flow_v2
def test_tensor_list_while_loop_stacked_cond_unstacked_list(self):
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor([20, 21, 22, 23, 24], [])
_, out_handle = while_loop.while_loop(
lambda j, _: j < i, lambda j, h:
(j + 1, list_ops.tensor_list_set_item(h, j, i)), (0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_tensor_list_addn_already_stacked(self):
def loop_fn(i):
l1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l1 = list_ops.tensor_list_set_item(l1, 0, i)
l2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l2 = list_ops.tensor_list_set_item(l2, 1, i)
return list_ops.tensor_list_stack(math_ops.add_n([l1, l2]), dtypes.int32)
self._test_loop_fn(loop_fn, 2)
def test_tensor_list_addn_stacking_required(self):
l1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l1 = list_ops.tensor_list_set_item(l1, 1, 1)
def loop_fn(i):
l2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l2 = list_ops.tensor_list_set_item(l2, 1, i)
return list_ops.tensor_list_stack(
math_ops.add_n([l1, l2]), dtypes.int32)
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
|
TensorListTest
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 502616,
"end": 514983
}
|
class ____(LatLongDef):
r"""
LatLongFieldDef schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : Literal['quantitative']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {"$ref": "#/definitions/LatLongFieldDef"}
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[Literal["quantitative"]] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
|
LatLongFieldDef
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/array_ops_test.py
|
{
"start": 2431,
"end": 6246
}
|
class ____(test_util.TensorFlowTestCase):
def testNonBatchMatrix(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
transposed = array_ops.matrix_transpose(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
def testConjugate(self):
m = [[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]
expected_transposed = [[1 - 1j, 4 - 4j], [2 - 2j, 5 - 5j], [3 - 3j, 6 - 6j]]
matrix = ops.convert_to_tensor(m)
transposed = array_ops.matrix_transpose(matrix, conjugate=True)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
def testBatchMatrix(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
transposed = array_ops.matrix_transpose(batch_matrix)
self.assertEqual((2, 3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
def testNonBatchMatrixDynamicallyDefined(self):
# needs explicit `constant` because lists are not automatically
# converted to sensors when applying `transpose` below
matrix = constant_op.constant([[1, 2, 3], [4, 5, 6]]) # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
@def_function.function(input_signature=[
tensor_lib.TensorSpec(shape=None, dtype=dtypes.int32)
])
def transpose(matrix):
self.assertIs(matrix.shape.ndims, None)
return array_ops.matrix_transpose(matrix)
self.assertAllEqual(expected_transposed, transpose(matrix))
def testBatchMatrixDynamicallyDefined(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
# needs explicit `constant` because lists are not automatically
# converted to sensors when applying `transpose` below
batch_matrix = constant_op.constant([matrix_0, matrix_1]) # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
@def_function.function(input_signature=[
tensor_lib.TensorSpec(shape=None, dtype=dtypes.int32)
])
def transpose(matrix):
self.assertIs(matrix.shape.ndims, None)
return array_ops.matrix_transpose(matrix)
self.assertAllEqual(expected_transposed, transpose(batch_matrix))
def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
vector = [1, 2, 3]
with self.assertRaisesRegex(ValueError, "should be a "):
array_ops.matrix_transpose(vector)
def testNarrowMatrixConjugateTranspose(self):
for dtype in (dtypes.float32, dtypes.float64):
for conjugate in (True, False):
with self.subTest(complex_type=dtype, conjugate=conjugate):
vector = math_ops.complex(
constant_op.constant(0, dtype=dtype),
math_ops.range(96, dtype=dtype))
column_vector = array_ops.expand_dims(vector, axis=-1)
row_vector = array_ops.expand_dims(vector, axis=0)
narrow_matrix = array_ops.tile(column_vector, [1, 2]) # [96, 2]
expected_transposed = array_ops.tile(row_vector, [2, 1]) # [2, 96]
if conjugate:
expected_transposed = -expected_transposed
transposed = array_ops.matrix_transpose(
narrow_matrix, conjugate=conjugate)
self.assertEqual((2, 96), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
|
BatchMatrixTransposeTest
|
python
|
tox-dev__tox
|
src/tox/config/source/toml_tox.py
|
{
"start": 127,
"end": 180
}
|
class ____(TomlSection):
PREFIX = ()
|
TomlToxSection
|
python
|
walkccc__LeetCode
|
solutions/1062. Longest Repeating Substring/1062.py
|
{
"start": 0,
"end": 413
}
|
class ____:
def longestRepeatingSubstring(self, s: str) -> int:
n = len(s)
ans = 0
# dp[i][j] := the number of repeating characters of s[0..i) and s[0..j)
dp = [[0] * (n + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
for j in range(i + 1, n + 1):
if s[i - 1] == s[j - 1]:
dp[i][j] = 1 + dp[i - 1][j - 1]
ans = max(ans, dp[i][j])
return ans
|
Solution
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_merge.py
|
{
"start": 28618,
"end": 31059
}
|
class ____(Merge, Blockwise):
"""Merge two dataframes with aligned partitions
This operation will directly merge partition i of the
left dataframe with partition i of the right dataframe.
The two dataframes must be shuffled or partitioned
by the merge key(s) before this operation is performed.
Single-partition dataframes will always be broadcasted.
See Also
--------
Merge
"""
is_broadcast_join = False
@functools.cached_property
def unique_partition_mapping_columns_from_shuffle(self):
result = self.left.unique_partition_mapping_columns_from_shuffle.copy()
result.update(self.right.unique_partition_mapping_columns_from_shuffle)
return result
def _divisions(self):
use_left = self.right_index or _contains_index_name(
self.right._meta, self.right_on
)
use_right = self.left_index or _contains_index_name(
self.left._meta, self.left_on
)
if use_right and self.left.npartitions == 1 and self.how in ("right", "inner"):
return self.right.divisions
elif (
use_left
and self.right.npartitions == 1
and self.how in ("inner", "left", "leftsemi")
):
return self.left.divisions
elif (
self.left.npartitions == self.right.npartitions
and self.merge_indexed_left
and self.merge_indexed_right
):
divisions = list(
unique(merge_sorted(self.left.divisions, self.right.divisions))
)
if len(divisions) == 1:
return (divisions[0], divisions[0])
if self.left.npartitions == 1 and self.right.npartitions == 1:
return (min(divisions), max(divisions))
return divisions
else:
_npartitions = max(self.left.npartitions, self.right.npartitions)
return (None,) * (_npartitions + 1)
def _lower(self):
return None
def _broadcast_dep(self, dep: Expr):
return dep.npartitions == 1
def _task(self, name: Key, index: int) -> Task:
kwargs = self.kwargs.copy()
kwargs["result_meta"] = self._meta
return Task(
name,
merge_chunk,
self._blockwise_arg(self.left, index),
self._blockwise_arg(self.right, index),
**kwargs,
)
|
BlockwiseMerge
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/selectable.py
|
{
"start": 56473,
"end": 56910
}
|
class ____(NamedFromClause):
"""mark a FROM clause as being able to render directly as LATERAL"""
# FromClause ->
# AliasedReturnsRows
# -> Alias only for FromClause
# -> Subquery only for SelectBase
# -> CTE only for HasCTE -> SelectBase, DML
# -> Lateral -> FromClause, but we accept SelectBase
# w/ non-deprecated coercion
# -> TableSample -> only for FromClause
|
LateralFromClause
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/tools/flatbuffer_utils_test.py
|
{
"start": 5407,
"end": 9587
}
|
class ____(test_util.TensorFlowTestCase):
def testRandomizeWeights(self):
# 1. SETUP
# Define the initial model
initial_model = test_utils.build_mock_model()
final_model = copy.deepcopy(initial_model)
# 2. INVOKE
# Invoke the randomize_weights function
flatbuffer_utils.randomize_weights(final_model)
# 3. VALIDATE
# Validate that the initial and final models are the same, except that
# the weights in the model buffer have been modified (i.e, randomized)
# Validate the description
self.assertEqual(initial_model.description, final_model.description)
# Validate the main subgraph's name, inputs, outputs, operators and tensors
initial_subgraph = initial_model.subgraphs[0]
final_subgraph = final_model.subgraphs[0]
self.assertEqual(initial_subgraph.name, final_subgraph.name)
for i in range(len(initial_subgraph.inputs)):
self.assertEqual(initial_subgraph.inputs[i], final_subgraph.inputs[i])
for i in range(len(initial_subgraph.outputs)):
self.assertEqual(initial_subgraph.outputs[i], final_subgraph.outputs[i])
for i in range(len(initial_subgraph.operators)):
self.assertEqual(initial_subgraph.operators[i].opcodeIndex,
final_subgraph.operators[i].opcodeIndex)
initial_tensors = initial_subgraph.tensors
final_tensors = final_subgraph.tensors
for i in range(len(initial_tensors)):
self.assertEqual(initial_tensors[i].name, final_tensors[i].name)
self.assertEqual(initial_tensors[i].type, final_tensors[i].type)
self.assertEqual(initial_tensors[i].buffer, final_tensors[i].buffer)
for j in range(len(initial_tensors[i].shape)):
self.assertEqual(initial_tensors[i].shape[j], final_tensors[i].shape[j])
# Validate the first valid buffer (index 0 is always None)
initial_buffer = initial_model.buffers[1].data
final_buffer = final_model.buffers[1].data
for j in range(initial_buffer.size):
self.assertNotEqual(initial_buffer.data[j], final_buffer.data[j])
def testRandomizeSomeWeights(self):
# 1. SETUP
# Define the initial model
initial_model = test_utils.build_mock_model()
final_model = copy.deepcopy(initial_model)
# 2. INVOKE
# Invoke the randomize_weights function, but skip the first buffer
flatbuffer_utils.randomize_weights(
final_model, buffers_to_skip=[_SKIPPED_BUFFER_INDEX])
# 3. VALIDATE
# Validate that the initial and final models are the same, except that
# the weights in the model buffer have been modified (i.e, randomized)
# Validate the description
self.assertEqual(initial_model.description, final_model.description)
# Validate the main subgraph's name, inputs, outputs, operators and tensors
initial_subgraph = initial_model.subgraphs[0]
final_subgraph = final_model.subgraphs[0]
self.assertEqual(initial_subgraph.name, final_subgraph.name)
for i, _ in enumerate(initial_subgraph.inputs):
self.assertEqual(initial_subgraph.inputs[i], final_subgraph.inputs[i])
for i, _ in enumerate(initial_subgraph.outputs):
self.assertEqual(initial_subgraph.outputs[i], final_subgraph.outputs[i])
for i, _ in enumerate(initial_subgraph.operators):
self.assertEqual(initial_subgraph.operators[i].opcodeIndex,
final_subgraph.operators[i].opcodeIndex)
initial_tensors = initial_subgraph.tensors
final_tensors = final_subgraph.tensors
for i, _ in enumerate(initial_tensors):
self.assertEqual(initial_tensors[i].name, final_tensors[i].name)
self.assertEqual(initial_tensors[i].type, final_tensors[i].type)
self.assertEqual(initial_tensors[i].buffer, final_tensors[i].buffer)
for j in range(len(initial_tensors[i].shape)):
self.assertEqual(initial_tensors[i].shape[j], final_tensors[i].shape[j])
# Validate that the skipped buffer is unchanged.
initial_buffer = initial_model.buffers[_SKIPPED_BUFFER_INDEX].data
final_buffer = final_model.buffers[_SKIPPED_BUFFER_INDEX].data
for j in range(initial_buffer.size):
self.assertEqual(initial_buffer.data[j], final_buffer.data[j])
|
RandomizeWeightsTest
|
python
|
bokeh__bokeh
|
tests/cross/cases/regressions/issue_13637.py
|
{
"start": 403,
"end": 569
}
|
class ____(TypedDict):
top_left: NotRequired[int]
top_right: NotRequired[int]
bottom_right: NotRequired[int]
bottom_left: NotRequired[int]
|
BorderRadiusTD
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 13329,
"end": 13414
}
|
class ____(PydanticTypeError):
msg_template = 'value is not a valid uuid'
|
UUIDError
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/instigation.py
|
{
"start": 30343,
"end": 30607
}
|
class ____(graphene.Union):
class Meta:
name = "InstigationStateOrError"
types = (
GrapheneInstigationState,
GrapheneInstigationStateNotFoundError,
GraphenePythonError,
)
|
GrapheneInstigationStateOrError
|
python
|
huggingface__transformers
|
tests/models/donut/test_image_processing_donut.py
|
{
"start": 1129,
"end": 3129
}
|
class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_thumbnail=True,
do_align_axis=False,
do_pad=True,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size if size is not None else {"height": 18, "width": 20}
self.do_thumbnail = do_thumbnail
self.do_align_axis = do_align_axis
self.do_pad = do_pad
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
|
DonutImageProcessingTester
|
python
|
keon__algorithms
|
algorithms/linkedlist/linkedlist.py
|
{
"start": 690,
"end": 809
}
|
class ____(object):
def __init__(self, value):
self.value = value
self.next = None
|
SinglyLinkedListNode
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_pattern04.py
|
{
"start": 315,
"end": 3559
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_pattern04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [110902272, 110756608]
data = [
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
worksheet.write_column("E1", data[4])
worksheet.write_column("F1", data[5])
worksheet.write_column("G1", data[6])
worksheet.write_column("H1", data[7])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$3",
"pattern": {
"pattern": "percent_20",
"fg_color": "#C00000",
"bg_color": "#FFFFFF",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$B$1:$B$3",
"pattern": {
"pattern": "percent_70",
"fg_color": "#FF0000",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$C$1:$C$3",
"pattern": {
"pattern": "dark_downward_diagonal",
"fg_color": "#FFC000",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$D$1:$D$3",
"pattern": {
"pattern": "narrow_vertical",
"fg_color": "#FFFF00",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$E$1:$E$3",
"pattern": {
"pattern": "dashed_horizontal",
"fg_color": "#92D050",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$F$1:$F$3",
"pattern": {
"pattern": "diagonal_brick",
"fg_color": "#00B050",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$G$1:$G$3",
"pattern": {
"pattern": "dotted_diamond",
"fg_color": "#00B0F0",
},
}
)
chart.add_series(
{
"values": "=Sheet1!$H$1:$H$3",
"pattern": {
"pattern": "small_check",
"fg_color": "#0070C0",
},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/classes5.py
|
{
"start": 5231,
"end": 5343
}
|
class ____(Protocol):
Config1: ClassVar[type[ConfigBase]]
Config2: ClassVar[type[ConfigBase]]
|
ParentClass3
|
python
|
chroma-core__chroma
|
chromadb/api/__init__.py
|
{
"start": 18544,
"end": 27014
}
|
class ____(BaseAPI, AdminAPI, Component):
"""An API instance that extends the relevant Base API methods by passing
in a tenant and database. This is the root component of the Chroma System"""
@abstractmethod
@override
def count_collections(
self, tenant: str = DEFAULT_TENANT, database: str = DEFAULT_DATABASE
) -> int:
pass
@abstractmethod
def list_collections(
self,
limit: Optional[int] = None,
offset: Optional[int] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> Sequence[CollectionModel]:
pass
@abstractmethod
def create_collection(
self,
name: str,
schema: Optional[Schema] = None,
configuration: Optional[CreateCollectionConfiguration] = None,
metadata: Optional[CollectionMetadata] = None,
get_or_create: bool = False,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
pass
@abstractmethod
def get_collection(
self,
name: str,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
pass
@abstractmethod
def get_or_create_collection(
self,
name: str,
schema: Optional[Schema] = None,
configuration: Optional[CreateCollectionConfiguration] = None,
metadata: Optional[CollectionMetadata] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
pass
@abstractmethod
@override
def delete_collection(
self,
name: str,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> None:
pass
@abstractmethod
@override
def _modify(
self,
id: UUID,
new_name: Optional[str] = None,
new_metadata: Optional[CollectionMetadata] = None,
new_configuration: Optional[UpdateCollectionConfiguration] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> None:
pass
@abstractmethod
def _fork(
self,
collection_id: UUID,
new_name: str,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
pass
@abstractmethod
def _search(
self,
collection_id: UUID,
searches: List[Search],
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> SearchResult:
pass
@abstractmethod
@override
def _count(
self,
collection_id: UUID,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> int:
pass
@abstractmethod
@override
def _peek(
self,
collection_id: UUID,
n: int = 10,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> GetResult:
pass
@abstractmethod
@override
def _get(
self,
collection_id: UUID,
ids: Optional[IDs] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Include = IncludeMetadataDocuments,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> GetResult:
pass
@abstractmethod
@override
def _add(
self,
ids: IDs,
collection_id: UUID,
embeddings: Embeddings,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
uris: Optional[URIs] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> bool:
pass
@abstractmethod
@override
def _update(
self,
collection_id: UUID,
ids: IDs,
embeddings: Optional[Embeddings] = None,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
uris: Optional[URIs] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> bool:
pass
@abstractmethod
@override
def _upsert(
self,
collection_id: UUID,
ids: IDs,
embeddings: Embeddings,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
uris: Optional[URIs] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> bool:
pass
@abstractmethod
@override
def _query(
self,
collection_id: UUID,
query_embeddings: Embeddings,
ids: Optional[IDs] = None,
n_results: int = 10,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
include: Include = IncludeMetadataDocumentsDistances,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> QueryResult:
pass
@abstractmethod
@override
def _delete(
self,
collection_id: UUID,
ids: Optional[IDs] = None,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> None:
pass
@abstractmethod
def attach_function(
self,
function_id: str,
name: str,
input_collection_id: UUID,
output_collection: str,
params: Optional[Dict[str, Any]] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> "AttachedFunction":
"""Attach a function to a collection.
Args:
function_id: Built-in function identifier
name: Unique name for this attached function
input_collection_id: Source collection that triggers the function
output_collection: Target collection where function output is stored
params: Optional dictionary with function-specific parameters
tenant: The tenant name
database: The database name
Returns:
AttachedFunction: Object representing the attached function
"""
pass
@abstractmethod
def get_attached_function(
self,
name: str,
input_collection_id: UUID,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> "AttachedFunction":
"""Get an attached function by name for a specific collection.
Args:
name: Name of the attached function
input_collection_id: The collection ID
tenant: The tenant name
database: The database name
Returns:
AttachedFunction: The attached function object
Raises:
NotFoundError: If the attached function doesn't exist
"""
pass
@abstractmethod
def detach_function(
self,
attached_function_id: UUID,
input_collection_id: UUID,
delete_output: bool = False,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> bool:
"""Detach a function and prevent any further runs.
Args:
attached_function_id: ID of the attached function to remove
input_collection_id: ID of the input collection
delete_output: Whether to also delete the output collection
tenant: The tenant name
database: The database name
Returns:
bool: True if successful
"""
pass
@abstractmethod
def get_attached_function(
self,
name: str,
input_collection_id: UUID,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> "AttachedFunction":
"""Get an attached function by name for a specific collection.
Args:
name: Name of the attached function
input_collection_id: ID of the input collection
tenant: The tenant name
database: The database name
Returns:
AttachedFunction: The attached function object
Raises:
NotFoundError: If the attached function doesn't exist
"""
pass
|
ServerAPI
|
python
|
walkccc__LeetCode
|
solutions/929. Unique Email Addresses/929.py
|
{
"start": 0,
"end": 263
}
|
class ____:
def numUniqueEmails(self, emails: list[str]) -> int:
seen = set()
for email in emails:
local, domain = email.split('@')
local = local.split('+')[0].replace('.', '')
seen.add(local + '@' + domain)
return len(seen)
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/tests/generic/test_generic.py
|
{
"start": 10501,
"end": 17027
}
|
class ____:
# tests that don't fit elsewhere
@pytest.mark.parametrize(
"ser",
[
Series(range(10), dtype=np.float64),
Series([str(i) for i in range(10)], dtype=object),
],
)
def test_squeeze_series_noop(self, ser):
# noop
tm.assert_series_equal(ser.squeeze(), ser)
def test_squeeze_frame_noop(self):
# noop
df = DataFrame(np.eye(2))
tm.assert_frame_equal(df.squeeze(), df)
def test_squeeze_frame_reindex(self):
# squeezing
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
).reindex(columns=["A"])
tm.assert_series_equal(df.squeeze(), df["A"])
def test_squeeze_0_len_dim(self):
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = Series([], name="five", dtype=np.float64)
empty_frame = DataFrame([empty_series])
tm.assert_series_equal(empty_series, empty_series.squeeze())
tm.assert_series_equal(empty_series, empty_frame.squeeze())
def test_squeeze_axis(self):
# axis argument
df = DataFrame(
np.random.default_rng(2).standard_normal((1, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=1, freq="B"),
).iloc[:, :1]
assert df.shape == (1, 1)
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0])
assert df.squeeze() == df.iloc[0, 0]
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis=2)
msg = "No axis named x for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.squeeze(axis="x")
def test_squeeze_axis_len_3(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((3, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=3, freq="B"),
)
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
s = Series(range(2), dtype=np.float64)
tm.assert_series_equal(np.squeeze(s), s)
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
).reindex(columns=["A"])
tm.assert_series_equal(np.squeeze(df), df["A"])
@pytest.mark.parametrize(
"ser",
[
Series(range(10), dtype=np.float64),
Series([str(i) for i in range(10)], dtype=object),
],
)
def test_transpose_series(self, ser):
# calls implementation in pandas/core/base.py
tm.assert_series_equal(ser.transpose(), ser)
def test_transpose_frame(self):
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
tm.assert_frame_equal(df.transpose().transpose(), df)
def test_numpy_transpose(self, frame_or_series):
obj = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
obj = tm.get_obj(obj, frame_or_series)
if frame_or_series is Series:
# 1D -> np.transpose is no-op
tm.assert_series_equal(np.transpose(obj), obj)
# round-trip preserved
tm.assert_equal(np.transpose(np.transpose(obj)), obj)
msg = "the 'axes' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.transpose(obj, axes=1)
@pytest.mark.parametrize(
"ser",
[
Series(range(10), dtype=np.float64),
Series([str(i) for i in range(10)], dtype=object),
],
)
def test_take_series(self, ser):
indices = [1, 5, -2, 6, 3, -1]
out = ser.take(indices)
expected = Series(
data=ser.values.take(indices),
index=ser.index.take(indices),
dtype=ser.dtype,
)
tm.assert_series_equal(out, expected)
def test_take_frame(self):
indices = [1, 5, -2, 6, 3, -1]
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 4)),
columns=Index(list("ABCD"), dtype=object),
index=date_range("2000-01-01", periods=10, freq="B"),
)
out = df.take(indices)
expected = DataFrame(
data=df.values.take(indices, axis=0),
index=df.index.take(indices),
columns=df.columns,
)
tm.assert_frame_equal(out, expected)
def test_take_invalid_kwargs(self, frame_or_series):
indices = [-3, 2, 0, 1]
obj = DataFrame(range(5))
obj = tm.get_obj(obj, frame_or_series)
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
obj.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
obj.take(indices, mode="clip")
def test_axis_classmethods(self, frame_or_series):
box = frame_or_series
obj = box(dtype=object)
values = box._AXIS_TO_AXIS_NUMBER.keys()
for v in values:
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
def test_flags_identity(self, frame_or_series):
obj = Series([1, 2])
if frame_or_series is DataFrame:
obj = obj.to_frame()
assert obj.flags is obj.flags
obj2 = obj.copy()
assert obj2.flags is not obj.flags
|
TestNDFrame
|
python
|
python__mypy
|
mypy/types.py
|
{
"start": 147295,
"end": 158512
}
|
class ____(BoolTypeQuery):
def __init__(self) -> None:
super().__init__(ANY_STRATEGY)
def visit_type_alias_type(self, t: TypeAliasType) -> bool:
return t.is_recursive or self.query_types(t.args)
# Use singleton since this is hot (note: call reset() before using)
_has_recursive_type: Final = HasRecursiveType()
def has_recursive_types(typ: Type) -> bool:
"""Check if a type contains any recursive aliases (recursively)."""
_has_recursive_type.reset()
return typ.accept(_has_recursive_type)
def split_with_prefix_and_suffix(
types: tuple[Type, ...], prefix: int, suffix: int
) -> tuple[tuple[Type, ...], tuple[Type, ...], tuple[Type, ...]]:
if len(types) <= prefix + suffix:
types = extend_args_for_prefix_and_suffix(types, prefix, suffix)
if suffix:
return types[:prefix], types[prefix:-suffix], types[-suffix:]
else:
return types[:prefix], types[prefix:], ()
def extend_args_for_prefix_and_suffix(
types: tuple[Type, ...], prefix: int, suffix: int
) -> tuple[Type, ...]:
"""Extend list of types by eating out from variadic tuple to satisfy prefix and suffix."""
idx = None
item = None
for i, t in enumerate(types):
if isinstance(t, UnpackType):
p_type = get_proper_type(t.type)
if isinstance(p_type, Instance) and p_type.type.fullname == "builtins.tuple":
item = p_type.args[0]
idx = i
break
if idx is None:
return types
assert item is not None
if idx < prefix:
start = (item,) * (prefix - idx)
else:
start = ()
if len(types) - idx - 1 < suffix:
end = (item,) * (suffix - len(types) + idx + 1)
else:
end = ()
return types[:idx] + start + (types[idx],) + end + types[idx + 1 :]
def flatten_nested_unions(
types: Sequence[Type], *, handle_type_alias_type: bool = True, handle_recursive: bool = True
) -> list[Type]:
"""Flatten nested unions in a type list."""
if not isinstance(types, list):
typelist = list(types)
else:
typelist = cast("list[Type]", types)
# Fast path: most of the time there is nothing to flatten
if not any(isinstance(t, (TypeAliasType, UnionType)) for t in typelist): # type: ignore[misc]
return typelist
flat_items: list[Type] = []
for t in typelist:
if handle_type_alias_type and isinstance(t, TypeAliasType):
if not handle_recursive and t.is_recursive:
tp: Type = t
else:
tp = get_proper_type(t)
else:
tp = t
if isinstance(tp, ProperType) and isinstance(tp, UnionType):
flat_items.extend(
flatten_nested_unions(
tp.items,
handle_type_alias_type=handle_type_alias_type,
handle_recursive=handle_recursive,
)
)
else:
# Must preserve original aliases when possible.
flat_items.append(t)
return flat_items
def find_unpack_in_list(items: Sequence[Type]) -> int | None:
unpack_index: int | None = None
for i, item in enumerate(items):
if isinstance(item, UnpackType):
# We cannot fail here, so we must check this in an earlier
# semanal phase.
# Funky code here avoids mypyc narrowing the type of unpack_index.
old_index = unpack_index
assert old_index is None
# Don't return so that we can also sanity check there is only one.
unpack_index = i
return unpack_index
def flatten_nested_tuples(types: Iterable[Type]) -> list[Type]:
"""Recursively flatten TupleTypes nested with Unpack.
For example this will transform
Tuple[A, Unpack[Tuple[B, Unpack[Tuple[C, D]]]]]
into
Tuple[A, B, C, D]
"""
res = []
for typ in types:
if not isinstance(typ, UnpackType):
res.append(typ)
continue
p_type = get_proper_type(typ.type)
if not isinstance(p_type, TupleType):
res.append(typ)
continue
if isinstance(typ.type, TypeAliasType):
items = []
for item in p_type.items:
if (
isinstance(item, ProperType)
and isinstance(item, Instance)
or isinstance(item, TypeAliasType)
):
if len(item.args) == 0:
item = item.copy_modified()
item.set_line(typ)
items.append(item)
else:
items = p_type.items
res.extend(flatten_nested_tuples(items))
return res
def is_literal_type(typ: ProperType, fallback_fullname: str, value: LiteralValue) -> bool:
"""Check if this type is a LiteralType with the given fallback type and value."""
if isinstance(typ, Instance) and typ.last_known_value:
typ = typ.last_known_value
return (
isinstance(typ, LiteralType)
and typ.fallback.type.fullname == fallback_fullname
and typ.value == value
)
names: Final = globals().copy()
names.pop("NOT_READY", None)
deserialize_map: Final = {
key: obj.deserialize
for key, obj in names.items()
if isinstance(obj, type) and issubclass(obj, Type) and obj is not Type
}
def callable_with_ellipsis(any_type: AnyType, ret_type: Type, fallback: Instance) -> CallableType:
"""Construct type Callable[..., ret_type]."""
return CallableType(
[any_type, any_type],
[ARG_STAR, ARG_STAR2],
[None, None],
ret_type=ret_type,
fallback=fallback,
is_ellipsis_args=True,
)
def remove_dups(types: list[T]) -> list[T]:
if len(types) <= 1:
return types
# Get unique elements in order of appearance
all_types: set[T] = set()
new_types: list[T] = []
for t in types:
if t not in all_types:
new_types.append(t)
all_types.add(t)
return new_types
def type_vars_as_args(type_vars: Sequence[TypeVarLikeType]) -> tuple[Type, ...]:
"""Represent type variables as they would appear in a type argument list."""
args: list[Type] = []
for tv in type_vars:
if isinstance(tv, TypeVarTupleType):
args.append(UnpackType(tv))
else:
args.append(tv)
return tuple(args)
# See docstring for mypy/cache.py for reserved tag ranges.
# Instance-related tags.
INSTANCE: Final[Tag] = 80
INSTANCE_SIMPLE: Final[Tag] = 81
INSTANCE_GENERIC: Final[Tag] = 82
INSTANCE_STR: Final[Tag] = 83
INSTANCE_FUNCTION: Final[Tag] = 84
INSTANCE_INT: Final[Tag] = 85
INSTANCE_BOOL: Final[Tag] = 86
INSTANCE_OBJECT: Final[Tag] = 87
# Other type tags.
TYPE_ALIAS_TYPE: Final[Tag] = 100
TYPE_VAR_TYPE: Final[Tag] = 101
PARAM_SPEC_TYPE: Final[Tag] = 102
TYPE_VAR_TUPLE_TYPE: Final[Tag] = 103
UNBOUND_TYPE: Final[Tag] = 104
UNPACK_TYPE: Final[Tag] = 105
ANY_TYPE: Final[Tag] = 106
UNINHABITED_TYPE: Final[Tag] = 107
NONE_TYPE: Final[Tag] = 108
DELETED_TYPE: Final[Tag] = 109
CALLABLE_TYPE: Final[Tag] = 110
OVERLOADED: Final[Tag] = 111
TUPLE_TYPE: Final[Tag] = 112
TYPED_DICT_TYPE: Final[Tag] = 113
LITERAL_TYPE: Final[Tag] = 114
UNION_TYPE: Final[Tag] = 115
TYPE_TYPE: Final[Tag] = 116
PARAMETERS: Final[Tag] = 117
def read_type(data: ReadBuffer, tag: Tag | None = None) -> Type:
if tag is None:
tag = read_tag(data)
# The branches here are ordered manually by type "popularity".
if tag == INSTANCE:
return Instance.read(data)
if tag == ANY_TYPE:
return AnyType.read(data)
if tag == TYPE_VAR_TYPE:
return TypeVarType.read(data)
if tag == CALLABLE_TYPE:
return CallableType.read(data)
if tag == NONE_TYPE:
return NoneType.read(data)
if tag == UNION_TYPE:
return UnionType.read(data)
if tag == LITERAL_TYPE:
return LiteralType.read(data)
if tag == TYPE_ALIAS_TYPE:
return TypeAliasType.read(data)
if tag == TUPLE_TYPE:
return TupleType.read(data)
if tag == TYPED_DICT_TYPE:
return TypedDictType.read(data)
if tag == TYPE_TYPE:
return TypeType.read(data)
if tag == OVERLOADED:
return Overloaded.read(data)
if tag == PARAM_SPEC_TYPE:
return ParamSpecType.read(data)
if tag == TYPE_VAR_TUPLE_TYPE:
return TypeVarTupleType.read(data)
if tag == UNPACK_TYPE:
return UnpackType.read(data)
if tag == PARAMETERS:
return Parameters.read(data)
if tag == UNINHABITED_TYPE:
return UninhabitedType.read(data)
if tag == UNBOUND_TYPE:
return UnboundType.read(data)
if tag == DELETED_TYPE:
return DeletedType.read(data)
assert False, f"Unknown type tag {tag}"
def read_function_like(data: ReadBuffer, tag: Tag) -> FunctionLike:
if tag == CALLABLE_TYPE:
return CallableType.read(data)
if tag == OVERLOADED:
return Overloaded.read(data)
assert False, f"Invalid type tag for FunctionLike {tag}"
def read_type_var_likes(data: ReadBuffer) -> list[TypeVarLikeType]:
"""Specialized version of read_type_list() for lists of type variables."""
assert read_tag(data) == LIST_GEN
ret: list[TypeVarLikeType] = []
for _ in range(read_int_bare(data)):
tag = read_tag(data)
if tag == TYPE_VAR_TYPE:
ret.append(TypeVarType.read(data))
elif tag == PARAM_SPEC_TYPE:
ret.append(ParamSpecType.read(data))
elif tag == TYPE_VAR_TUPLE_TYPE:
ret.append(TypeVarTupleType.read(data))
else:
assert False, f"Invalid type tag for TypeVarLikeType {tag}"
return ret
def read_type_opt(data: ReadBuffer) -> Type | None:
tag = read_tag(data)
if tag == LITERAL_NONE:
return None
return read_type(data, tag)
def write_type_opt(data: WriteBuffer, value: Type | None) -> None:
if value is not None:
value.write(data)
else:
write_tag(data, LITERAL_NONE)
def read_type_list(data: ReadBuffer) -> list[Type]:
assert read_tag(data) == LIST_GEN
size = read_int_bare(data)
return [read_type(data) for _ in range(size)]
def write_type_list(data: WriteBuffer, value: Sequence[Type]) -> None:
write_tag(data, LIST_GEN)
write_int_bare(data, len(value))
for item in value:
item.write(data)
def read_type_map(data: ReadBuffer) -> dict[str, Type]:
assert read_tag(data) == DICT_STR_GEN
size = read_int_bare(data)
return {read_str_bare(data): read_type(data) for _ in range(size)}
def write_type_map(data: WriteBuffer, value: dict[str, Type]) -> None:
write_tag(data, DICT_STR_GEN)
write_int_bare(data, len(value))
for key in sorted(value):
write_str_bare(data, key)
value[key].write(data)
# This cyclic import is unfortunate, but to avoid it we would need to move away all uses
# of get_proper_type() from types.py. Majority of them have been removed, but few remaining
# are quite tricky to get rid of, but ultimately we want to do it at some point.
from mypy.expandtype import ExpandTypeVisitor
|
HasRecursiveType
|
python
|
facebook__pyre-check
|
client/backend_arguments.py
|
{
"start": 1568,
"end": 2071
}
|
class ____:
elements: Sequence[search_path.Element] = dataclasses.field(default_factory=list)
def serialize(self) -> Dict[str, object]:
return {
"kind": "simple",
"paths": [element.command_line_argument() for element in self.elements],
}
def get_checked_directory_allowlist(self) -> Set[str]:
return {element.path() for element in self.elements}
def cleanup(self) -> None:
pass
@dataclasses.dataclass(frozen=True)
|
SimpleSourcePath
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/guides/dagster/development_to_production/resources/resources_v1.py
|
{
"start": 126,
"end": 849
}
|
class ____(ConfigurableResource):
"""Hacker News client that fetches live data."""
def fetch_item_by_id(self, item_id: int) -> Optional[dict[str, Any]]:
"""Fetches a single item from the Hacker News API by item id."""
item_url = f"https://hacker-news.firebaseio.com/v0/item/{item_id}.json"
item = requests.get(item_url, timeout=5).json()
return item
def fetch_max_item_id(self) -> int:
return requests.get(
"https://hacker-news.firebaseio.com/v0/maxitem.json", timeout=5
).json()
@property
def item_field_names(self) -> list:
# omitted for brevity, see full code example for implementation
return []
# end_resource
|
HNAPIClient
|
python
|
python-visualization__folium
|
tests/selenium/conftest.py
|
{
"start": 467,
"end": 1969
}
|
class ____(Chrome):
"""Selenium WebDriver wrapper that adds folium test specific features."""
def __init__(self):
options = ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--disable-gpu")
options.add_argument("--headless")
options.add_argument("--window-size=1024,768")
super().__init__(options=options)
def get_file(self, filepath):
self.clean_window()
super().get("file://" + filepath)
def clean_window(self):
"""Make sure we have a fresh window (without restarting the browser)."""
# open new tab
self.execute_script("window.open();")
# close old tab
self.close()
# switch to new tab
self.switch_to.window(self.window_handles[0])
def verify_js_logs(self):
"""Raise an error if there are errors in the browser JS console."""
logs = self.get_log("browser")
for log in logs:
if log["level"] == "SEVERE":
msg = " ".join(log["message"].split()[2:])
raise RuntimeError(f'Javascript error: "{msg}".')
def wait_until(self, css_selector, timeout=10):
"""Wait for and return the element(s) selected by css_selector."""
wait = WebDriverWait(self, timeout=timeout)
is_visible = visibility_of_element_located((By.CSS_SELECTOR, css_selector))
return wait.until(is_visible)
|
DriverFolium
|
python
|
huggingface__transformers
|
tests/models/nougat/test_tokenization_nougat.py
|
{
"start": 5178,
"end": 6014
}
|
class ____(unittest.TestCase):
def test_two_level_lines(self):
input_str = "* Item 1 * Item 2"
expected_output = "* Item 1\n* Item 2\n"
self.assertEqual(normalize_list_like_lines(input_str), expected_output)
def test_three_level_lines(self):
input_str = "- I. Item 1 - II. Item 2 - III. Item 3"
expected_output = "- I. Item 1\n- II. Item 2\n- III. Item 3\n"
self.assertEqual(normalize_list_like_lines(input_str), expected_output)
def test_nested_lines(self):
input_str = "- I. Item 1 - I.1 Sub-item 1 - I.1.1 Sub-sub-item 1 - II. Item 2"
expected_output = "- I. Item 1\n\t- I.1 Sub-item 1\n\t\t- I.1.1 Sub-sub-item 1\n- II. Item 2\n"
self.assertEqual(normalize_list_like_lines(input_str), expected_output)
@require_tokenizers
|
TestNormalizeListLikeLines
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/queries/near_object/generate/executor.py
|
{
"start": 994,
"end": 19647
}
|
class ____(
Generic[ConnectionType, Properties, References], _BaseExecutor[ConnectionType]
):
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[GenerativeReturn[Properties, References]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[GenerativeReturn[Properties, CrossReferences]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[GenerativeReturn[Properties, TReferences]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[GenerativeReturn[TProperties, References]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[GenerativeReturn[TProperties, CrossReferences]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[GenerativeReturn[TProperties, TReferences]]: ...
### GroupBy ###
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[GenerativeGroupByReturn[Properties, References]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[GenerativeGroupByReturn[Properties, CrossReferences]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[GenerativeGroupByReturn[Properties, TReferences]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[GenerativeGroupByReturn[TProperties, References]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[GenerativeGroupByReturn[TProperties, CrossReferences]]: ...
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[GenerativeGroupByReturn[TProperties, TReferences]]: ...
### Default ###
@overload
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[
GenerativeSearchReturnType[Properties, References, TProperties, TReferences]
]: ...
def near_object(
self,
near_object: UUID,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[
GenerativeSearchReturnType[Properties, References, TProperties, TReferences]
]:
"""Perform retrieval-augmented generation (RaG) on the results of a by-object object search in this collection using a vector-based similarity search.
See the [docs](https://weaviate.io/developers/weaviate/api/graphql/search-operators#nearobject) for a more detailed explanation.
Args:
near_object: The UUID of the object to search on, REQUIRED.
certainty: The minimum similarity score to return. If not specified, the default certainty specified by the server is used.
distance: The maximum distance to search. If not specified, the default distance specified by the server is used.
limit: The maximum number of results to return. If not specified, the default limit specified by the server is returned.
offset: The offset to start from. If not specified, the retrieval begins from the first object in the server.
auto_limit: The maximum number of [autocut](https://weaviate.io/developers/weaviate/api/graphql/additional-operators#autocut) results to return. If not specified, no limit is applied.
filters: The filters to apply to the search.
group_by: How the results should be grouped by a specific property.
rerank: How the results should be reranked. NOTE: A `rerank-*` module must be enabled for this functionality to work.
target_vector: The name of the vector space to search in for named vector configurations. Required if multiple spaces are configured.
include_vector: Whether to include the vector in the results. If not specified, this is set to False.
return_metadata: The metadata to return for each object, defaults to `None`.
return_properties: The properties to return for each object.
return_references: The references to return for each object.
NOTE:
- If `return_properties` is not provided then all properties are returned except for blob properties.
- If `return_metadata` is not provided then no metadata is provided. Use MetadataQuery.full() to retrieve all metadata.
- If `return_references` is not provided then no references are provided.
Returns:
A `GenerativeReturn` or `GenerativeGroupByReturn` object that includes the searched objects.
If `group_by` is provided then a `GenerativeGroupByReturn` object is returned, otherwise a `GenerativeReturn` object is returned.
Raises:
weaviate.exceptions.WeaviateGRPCQueryError: If the request to the Weaviate server fails.
"""
def resp(
res: search_get_pb2.SearchReply,
) -> GenerativeSearchReturnType[Properties, References, TProperties, TReferences]:
return cast(
Any,
self._result_to_generative_return(
res,
_QueryOptions.from_input(
return_metadata,
return_properties,
include_vector,
self._references,
return_references,
rerank,
group_by,
),
),
)
request = self._query.near_object(
near_object=near_object,
certainty=certainty,
distance=distance,
limit=limit,
offset=offset,
autocut=auto_limit,
filters=filters,
group_by=_GroupBy.from_input(group_by),
rerank=rerank,
target_vector=target_vector,
generative=_Generative(
single=single_prompt,
grouped=grouped_task,
grouped_properties=grouped_properties,
generative_provider=generative_provider,
),
return_metadata=self._parse_return_metadata(return_metadata, include_vector),
return_properties=self._parse_return_properties(return_properties),
return_references=self._parse_return_references(return_references),
)
return executor.execute(
response_callback=resp, method=self._connection.grpc_search, request=request
)
|
_NearObjectGenerateExecutor
|
python
|
django__django
|
tests/admin_views/customadmin.py
|
{
"start": 2094,
"end": 2905
}
|
class ____(admin.ModelAdmin):
def get_deleted_objects(self, objs, request):
return ["a deletable object"], {"books": 1}, set(), []
site = Admin2(name="admin2")
site.register(models.Article, base_admin.ArticleAdmin)
site.register(models.Book, BookAdmin)
site.register(
models.Section, inlines=[base_admin.ArticleInline], search_fields=["name"]
)
site.register(models.Thing, base_admin.ThingAdmin)
site.register(models.Fabric, base_admin.FabricAdmin)
site.register(models.ChapterXtra1, base_admin.ChapterXtra1Admin)
site.register(User, UserLimitedAdmin)
site.register(models.UndeletableObject, base_admin.UndeletableObjectAdmin)
site.register(models.Simple, base_admin.AttributeErrorRaisingAdmin)
simple_site = Admin2(name="admin4")
simple_site.register(User, CustomPwdTemplateUserAdmin)
|
BookAdmin
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py
|
{
"start": 8726,
"end": 8924
}
|
class ____(IncrementalShopifyStream):
data_field = "tender_transactions"
cursor_field = "processed_at"
filter_field = "processed_at_min"
order_field = "processed_at"
|
TenderTransactions
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-salesforce/source_salesforce/streams.py
|
{
"start": 21264,
"end": 37395
}
|
class ____(SalesforceStream):
def __init__(self, **kwargs) -> None:
self._stream_slicer_cursor = None
self._switch_from_bulk_to_rest = False
self._rest_stream = None
super().__init__(**kwargs)
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
"""
This method needs to be there as `HttpStream.next_page_token` is abstract but it will never get called
"""
pass
def path(self, next_page_token: Mapping[str, Any] = None, **kwargs: Any) -> str:
"""
This method needs to be there as `HttpStream.path` is abstract but it will never get called
"""
pass
def _instantiate_declarative_stream(self, stream_slicer: StreamSlicer, has_bulk_parent: bool) -> None:
"""
For streams with a replication key and where filtering is supported, we need to have the cursor in order to instantiate the
DeclarativeStream hence why this isn't called in the __init__
"""
config = {}
parameters = {}
url_base = self.sf_api.instance_url
job_query_path = f"/services/data/{self.sf_api.version}/jobs/query"
decoder = JsonDecoder(parameters=parameters)
authenticator = BearerAuthenticator(
token_provider=InterpolatedStringTokenProvider(api_token=self.sf_api.access_token, config=config, parameters=parameters),
config=config,
parameters=parameters,
)
error_handler = SalesforceErrorHandler()
select_fields = self.get_query_select_fields()
query = f"SELECT {select_fields} FROM {self.name}" # FIXME "def request_params" is also handling `next_token` (I don't know why, I think it's always None) and parent streams
if self.cursor_field:
where_in_query = '{{ " WHERE " if stream_slice["start_date"] or stream_slice["end_date"] else "" }}'
lower_boundary_interpolation = (
'{{ "' f"{self.cursor_field}" ' >= " + stream_slice["start_date"] if stream_slice["start_date"] else "" }}'
)
and_keyword_interpolation = '{{" AND " if stream_slice["start_date"] and stream_slice["end_date"] else "" }}'
upper_boundary_interpolation = (
'{{ "' f"{self.cursor_field}" ' < " + stream_slice["end_date"] if stream_slice["end_date"] else "" }}'
)
query = query + where_in_query + lower_boundary_interpolation + and_keyword_interpolation + upper_boundary_interpolation
elif isinstance(stream_slicer, BulkParentStreamStreamSlicer):
where_in_query = " WHERE ContentDocumentId IN ('"
parents_interpolation = '{{ "\', \'".join(stream_slice["parents"]) }}'
closing_parenthesis = "')"
query = query + where_in_query + parents_interpolation + closing_parenthesis
creation_requester = HttpRequester(
name=f"{self.name} - creation requester",
url_base=url_base,
path=job_query_path,
authenticator=authenticator,
error_handler=error_handler,
http_method=HttpMethod.POST,
request_options_provider=InterpolatedRequestOptionsProvider(
request_body_data=None,
request_body_json={
"operation": "queryAll",
"query": query,
"contentType": "CSV",
"columnDelimiter": "COMMA",
"lineEnding": "LF",
},
request_headers=None,
request_parameters=None,
config=config,
parameters=parameters,
),
config=config,
parameters=parameters,
disable_retries=False,
message_repository=self._message_repository,
use_cache=False,
decoder=decoder,
stream_response=False,
)
polling_id_interpolation = "{{creation_response['id']}}"
polling_requester = HttpRequester(
name=f"{self.name} - polling requester",
url_base=url_base,
path=f"{job_query_path}/{polling_id_interpolation}",
authenticator=authenticator,
error_handler=error_handler,
http_method=HttpMethod.GET,
request_options_provider=InterpolatedRequestOptionsProvider(
request_body_data=None,
request_body_json=None,
request_headers=None,
request_parameters=None,
config=config,
parameters=parameters,
),
config=config,
parameters=parameters,
disable_retries=False,
message_repository=self._message_repository,
use_cache=False,
decoder=decoder,
stream_response=False,
)
# "GET", url, headers = {"Accept-Encoding": "gzip"}, request_kwargs = {"stream": True}
download_id_interpolation = "{{download_target}}"
job_download_components_name = f"{self.name} - download requester"
download_requester = HttpRequester(
name=job_download_components_name,
url_base=url_base,
path=f"{job_query_path}/{download_id_interpolation}/results",
authenticator=authenticator,
error_handler=error_handler,
http_method=HttpMethod.GET,
request_options_provider=InterpolatedRequestOptionsProvider(
request_body_data=None,
request_body_json=None,
request_headers={"Accept-Encoding": "gzip"},
request_parameters=None,
config=config,
parameters=parameters,
),
config=config,
parameters=parameters,
disable_retries=False,
message_repository=self._message_repository,
use_cache=False,
stream_response=True,
)
download_retriever = SimpleRetriever(
requester=download_requester,
record_selector=RecordSelector(
extractor=ResponseToFileExtractor(parameters={}),
record_filter=None,
transformations=[],
schema_normalization=TypeTransformer(TransformConfig.NoTransform),
config=config,
parameters={},
),
primary_key=None,
name=job_download_components_name,
paginator=DefaultPaginator(
decoder=NoopDecoder(),
page_size_option=None,
page_token_option=RequestOption(
field_name="locator",
inject_into=RequestOptionType.request_parameter,
parameters={},
),
pagination_strategy=CursorPaginationStrategy(
cursor_value="{{ headers['Sforce-Locator'] }}",
stop_condition="{{ headers.get('Sforce-Locator', None) == 'null' or not headers.get('Sforce-Locator', None) }}",
decoder=NoopDecoder(),
config=config,
parameters={},
),
url_base=url_base,
config=config,
parameters={},
),
config=config,
parameters={},
)
abort_requester = HttpRequester(
name=f"{self.name} - abort requester",
url_base=url_base,
path=f"{job_query_path}/{polling_id_interpolation}",
authenticator=authenticator,
error_handler=error_handler,
http_method=HttpMethod.PATCH,
request_options_provider=InterpolatedRequestOptionsProvider(
request_body_data=None,
request_body_json={"state": "Aborted"},
request_headers=None,
request_parameters=None,
config=config,
parameters=parameters,
),
config=config,
parameters=parameters,
disable_retries=False,
message_repository=self._message_repository,
use_cache=False,
stream_response=False,
)
delete_requester = HttpRequester(
name=f"{self.name} - delete requester",
url_base=url_base,
path=f"{job_query_path}/{polling_id_interpolation}",
authenticator=authenticator,
error_handler=error_handler,
http_method=HttpMethod.DELETE,
request_options_provider=None,
config=config,
parameters=parameters,
disable_retries=False,
message_repository=self._message_repository,
use_cache=False,
stream_response=False,
)
status_extractor = DpathExtractor(decoder=JsonDecoder(parameters={}), field_path=["state"], config={}, parameters={})
download_target_extractor = DpathExtractor(decoder=JsonDecoder(parameters={}), field_path=["id"], config={}, parameters={})
job_repository = AsyncHttpJobRepository(
creation_requester=creation_requester,
polling_requester=polling_requester,
download_retriever=download_retriever,
abort_requester=abort_requester,
delete_requester=delete_requester,
status_extractor=status_extractor,
status_mapping={
"InProgress": AsyncJobStatus.RUNNING,
"UploadComplete": AsyncJobStatus.RUNNING,
"JobComplete": AsyncJobStatus.COMPLETED,
"Aborted": AsyncJobStatus.FAILED,
"Failed": AsyncJobStatus.FAILED,
},
download_target_extractor=download_target_extractor,
job_timeout=self.DEFAULT_WAIT_TIMEOUT,
)
record_selector = RecordSelector(
extractor=None, # FIXME typing won't like that but it is not used
record_filter=None,
transformations=[],
schema_normalization=self.transformer,
config=config,
parameters=parameters,
)
self._bulk_job_stream = DeclarativeStream(
retriever=AsyncRetriever(
config={},
parameters={},
record_selector=record_selector,
stream_slicer=AsyncJobPartitionRouter(
job_orchestrator_factory=lambda stream_slices: AsyncJobOrchestrator(
job_repository,
stream_slices,
self._job_tracker,
self._message_repository,
exceptions_to_break_on=[BulkNotSupportedException],
has_bulk_parent=has_bulk_parent,
),
stream_slicer=stream_slicer,
config=config,
parameters={},
),
),
config={},
parameters={},
name=self.name,
primary_key=self.pk,
schema_loader=InlineSchemaLoader({}, {}), # FIXME call get_json_schema?
# the interface mentions that this is Optional,
# but I get `'NoneType' object has no attribute 'eval'` by passing None
stream_cursor_field="",
)
DEFAULT_WAIT_TIMEOUT = timedelta(hours=24)
MAX_CHECK_INTERVAL_SECONDS = 2.0
MAX_RETRY_NUMBER = 3
transformer = TypeTransformer(TransformConfig.CustomSchemaNormalization | TransformConfig.DefaultSchemaNormalization)
def get_query_select_fields(self) -> str:
return ", ".join(
{
key: value
for key, value in self.get_json_schema().get("properties", {}).items()
if value.get("format") != "base64" and "object" not in value["type"]
}
)
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
"""
Salesforce SOQL Query: https://developer.salesforce.com/docs/atlas.en-us.232.0.api_rest.meta/api_rest/dome_queryall.htm
"""
select_fields = self.get_query_select_fields()
query = f"SELECT {select_fields} FROM {self.name}"
if next_page_token:
query += next_page_token["next_token"]
if self.name in PARENT_SALESFORCE_OBJECTS:
# add where clause: " WHERE ContentDocumentId IN ('06905000000NMXXXXX', '06905000000Mxp7XXX', ...)"
parent_field = PARENT_SALESFORCE_OBJECTS[self.name]["field"]
parent_ids = [f"'{parent_record[parent_field]}'" for parent_record in stream_slice["parents"]]
query += f" WHERE ContentDocumentId IN ({','.join(parent_ids)})"
return {"q": query}
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
call_count: int = 0,
) -> Iterable[Mapping[str, Any]]:
if self._is_async_job_slice(stream_slice):
if self._switch_from_bulk_to_rest:
# ignore as we have switched to rest
pass
else:
yield from self._bulk_job_stream.read_records(sync_mode, cursor_field, stream_slice, stream_state)
else:
yield from self._rest_stream.read_records(sync_mode, cursor_field, stream_slice, stream_state)
def _is_async_job_slice(self, stream_slice):
return isinstance(stream_slice, StreamSlice) and "jobs" in stream_slice.extra_fields
def stream_slices(
self, *, sync_mode: SyncMode, cursor_field: Optional[List[str]] = None, stream_state: Optional[Mapping[str, Any]] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
self._instantiate_declarative_stream(BulkDatetimeStreamSlicer(self._stream_slicer_cursor), has_bulk_parent=False)
try:
yield from self._bulk_job_stream.stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=stream_state)
except BulkNotSupportedException:
self.logger.warning(
"attempt to switch to STANDARD(non-BULK) sync. Because the SalesForce BULK job has returned a failed status"
)
self._switch_from_bulk_to_rest = True
self._rest_stream = self.get_standard_instance()
stream_is_available, error = SalesforceAvailabilityStrategy().check_availability(self._rest_stream, self.logger, None)
if not stream_is_available:
self.logger.warning(f"Skipped syncing stream '{self._rest_stream.name}' because it was unavailable. Error: {error}")
yield from []
else:
yield from self._rest_stream.stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=stream_state)
def get_standard_instance(self) -> SalesforceStream:
"""Returns a instance of standard logic(non-BULK) with same settings"""
stream_kwargs = dict(
sf_api=self.sf_api,
pk=self.pk,
stream_name=self.stream_name,
schema=self.schema,
sobject_options=self.sobject_options,
authenticator=self._http_client._session.auth,
job_tracker=self._job_tracker,
message_repository=self._message_repository,
)
new_cls: Type[SalesforceStream] = RestSalesforceStream
if isinstance(self, BulkIncrementalSalesforceStream):
stream_kwargs.update({"replication_key": self.replication_key, "start_date": self.start_date})
new_cls = IncrementalRestSalesforceStream
standard_instance = new_cls(**stream_kwargs)
if hasattr(standard_instance, "set_cursor"):
standard_instance.set_cursor(self._stream_slicer_cursor)
return standard_instance
|
BulkSalesforceStream
|
python
|
pytorch__pytorch
|
torchgen/utils.py
|
{
"start": 1049,
"end": 3484
}
|
class ____(Enum):
# top level namespace (not including at)
DEFINITION = auto()
DECLARATION = auto()
# TORCH_LIBRARY(...) { ... }
REGISTRATION = auto()
# namespace { ... }
ANONYMOUS_DEFINITION = auto()
# namespace cpu { ... }
NAMESPACED_DEFINITION = auto()
NAMESPACED_DECLARATION = auto()
# Matches "foo" in "foo, bar" but not "foobar". Used to search for the
# occurrence of a parameter in the derivative formula
IDENT_REGEX = r"(^|\W){}($|\W)"
# TODO: Use a real parser here; this will get bamboozled
def split_name_params(schema: str) -> tuple[str, list[str]]:
m = re.match(r"(\w+)(\.\w+)?\((.*)\)", schema)
if m is None:
raise RuntimeError(f"Unsupported function schema: {schema}")
name, _, params = m.groups()
return name, params.split(", ")
T = TypeVar("T")
S = TypeVar("S")
# These two functions purposely return generators in analogy to map()
# so that you don't mix up when you need to list() them
# Map over function that may return None; omit Nones from output sequence
def mapMaybe(func: Callable[[T], S | None], xs: Iterable[T]) -> Iterator[S]:
for x in xs:
r = func(x)
if r is not None:
yield r
# Map over function that returns sequences and cat them all together
def concatMap(func: Callable[[T], Sequence[S]], xs: Iterable[T]) -> Iterator[S]:
for x in xs:
yield from func(x)
# Conveniently add error context to exceptions raised. Lets us
# easily say that an error occurred while processing a specific
# context.
@contextlib.contextmanager
def context(msg_fn: Callable[[], str]) -> Iterator[None]:
try:
yield
except Exception as e:
# TODO: this does the wrong thing with KeyError
msg = msg_fn()
msg = textwrap.indent(msg, " ")
msg = f"{e.args[0]}\n{msg}" if e.args else msg
e.args = (msg,) + e.args[1:]
raise
@functools.cache
def _read_template(template_fn: str) -> CodeTemplate:
return CodeTemplate.from_file(template_fn)
# String hash that's stable across different executions, unlike builtin hash
def string_stable_hash(s: str) -> int:
sha1 = hashlib.sha1(s.encode("latin1"), usedforsecurity=False).digest()
return int.from_bytes(sha1, byteorder="little")
# A small abstraction for writing out generated files and keeping track
# of what files have been written (so you can write out a list of output
# files)
|
Target
|
python
|
doocs__leetcode
|
solution/3700-3799/3739.Count Subarrays With Majority Element II/Solution.py
|
{
"start": 404,
"end": 772
}
|
class ____:
def countMajoritySubarrays(self, nums: List[int], target: int) -> int:
n = len(nums)
tree = BinaryIndexedTree(n * 2 + 1)
s = n + 1
tree.update(s, 1)
ans = 0
for x in nums:
s += 1 if x == target else -1
ans += tree.query(s - 1)
tree.update(s, 1)
return ans
|
Solution
|
python
|
django-import-export__django-import-export
|
tests/core/tests/admin_integration/test_action_export.py
|
{
"start": 14812,
"end": 17499
}
|
class ____(AdminTestMixin, TestCase):
def setUp(self):
super().setUp()
self.cat1 = Category.objects.create(name="Cat 1")
self.change_url = reverse(
"%s:%s_%s_change"
% (
"admin",
"core",
"category",
),
args=[self.cat1.id],
)
self.target_str = (
'<input type="submit" value="Export" '
'class="default" name="_export-item">'
)
def test_export_button_on_change_form(self):
self._get_url_response(self.change_url, str_in_response=self.target_str)
response = self._post_url_response(
self.change_url, data={"_export-item": "Export", "name": self.cat1.name}
)
self.assertIn("Export 1 selected item", response.content.decode())
def test_export_button_on_change_form_for_custom_pk(self):
self.cat1 = UUIDCategory.objects.create(name="Cat 1")
self.change_url = reverse(
"%s:%s_%s_change"
% (
"admin",
"core",
"uuidcategory",
),
args=[self.cat1.pk],
)
response = self.client.get(self.change_url)
self.assertIn(self.target_str, response.content.decode())
response = self._post_url_response(
self.change_url, data={"_export-item": "Export", "name": self.cat1.name}
)
self.assertIn("Export 1 selected item", response.content.decode())
def test_save_button_on_change_form(self):
# test default behavior is retained when saving an instance ChangeForm
response = self._post_url_response(
self.change_url, data={"_save": "Save", "name": self.cat1.name}, follow=True
)
target_str = f"The category.*{self.cat1.name}.*was changed successfully."
self.assertRegex(response.content.decode(), target_str)
def test_export_button_on_change_form_disabled(self):
class MockCategoryAdmin(CategoryAdmin):
show_change_form_export = True
factory = RequestFactory()
category_admin = MockCategoryAdmin(Category, admin.site)
request = factory.get(self.change_url)
request.user = self.user
response = category_admin.change_view(request, str(self.cat1.id))
response.render()
self.assertIn(self.target_str, response.content.decode())
category_admin.show_change_form_export = False
response = category_admin.change_view(request, str(self.cat1.id))
response.render()
self.assertNotIn(self.target_str, response.content.decode())
|
TestExportButtonOnChangeForm
|
python
|
spack__spack
|
lib/spack/spack/cmd/mirror.py
|
{
"start": 18236,
"end": 25886
}
|
class ____:
def __init__(self, args):
self.exclude_specs = []
if args.exclude_file:
self.exclude_specs.extend(specs_from_text_file(args.exclude_file, concretize=False))
if args.exclude_specs:
self.exclude_specs.extend(spack.cmd.parse_specs(str(args.exclude_specs).split()))
self.private = args.private
def __call__(self, x):
return all([self._not_license_excluded(x), self._not_cmdline_excluded(x)])
def _not_license_excluded(self, x):
"""True if the spec is for a private mirror, or as long as the
package does not explicitly forbid redistributing source."""
if self.private:
return True
elif spack.repo.PATH.get_pkg_class(x.fullname).redistribute_source(x):
return True
else:
tty.debug(
"Skip adding {0} to mirror: the package.py file"
" indicates that a public mirror should not contain"
" it.".format(x.name)
)
return False
def _not_cmdline_excluded(self, x):
"""True if a spec was not explicitly excluded by the user."""
return not any(x.satisfies(y) for y in self.exclude_specs)
def concrete_specs_from_environment():
env = ev.active_environment()
assert env, "an active environment is required"
mirror_specs = env.all_specs()
mirror_specs = filter_externals(mirror_specs)
return mirror_specs
def all_specs_with_all_versions():
specs = [spack.spec.Spec(n) for n in spack.repo.all_package_names()]
mirror_specs = spack.mirrors.utils.get_all_versions(specs)
mirror_specs.sort(key=lambda s: (s.name, s.version))
return mirror_specs
def versions_per_spec(args):
"""Return how many versions should be mirrored per spec."""
if not args.versions_per_spec:
num_versions = 1
elif args.versions_per_spec == "all":
num_versions = "all"
else:
try:
num_versions = int(args.versions_per_spec)
except ValueError:
raise SpackError(
"'--versions-per-spec' must be a number or 'all',"
" got '{0}'".format(args.versions_per_spec)
)
return num_versions
def process_mirror_stats(present, mirrored, error):
p, m, e = len(present), len(mirrored), len(error)
tty.msg(
"Archive stats:",
" %-4d already present" % p,
" %-4d added" % m,
" %-4d failed to fetch." % e,
)
if error:
tty.error("Failed downloads:")
colify.colify(s.cformat("{name}{@version}") for s in error)
sys.exit(1)
def mirror_create(args):
"""create a directory to be used as a spack mirror, and fill it with package archives"""
if args.file and args.all:
raise SpackError(
"cannot specify specs with a file if you chose to mirror all specs with '--all'"
)
if args.file and args.specs:
raise SpackError("cannot specify specs with a file AND on command line")
if not args.specs and not args.file and not args.all:
raise SpackError(
"no packages were specified.",
"To mirror all packages, use the '--all' option "
"(this will require significant time and space).",
)
if args.versions_per_spec and args.all:
raise SpackError(
"cannot specify '--versions_per-spec' and '--all' together",
"The option '--all' already implies mirroring all versions for each package.",
)
# When no directory is provided, the source dir is used
path = args.directory or spack.caches.fetch_cache_location()
mirror_specs = _specs_to_mirror(args)
workers = args.jobs
if workers is None:
if args.all:
workers = min(
16, spack.config.determine_number_of_jobs(parallel=True), len(mirror_specs)
)
else:
workers = 1
create_mirror_for_all_specs(
mirror_specs,
path=path,
skip_unstable_versions=args.skip_unstable_versions,
workers=workers,
)
def _specs_to_mirror(args):
include_fn = IncludeFilter(args)
if args.all and not ev.active_environment():
mirror_specs = all_specs_with_all_versions()
elif args.all and ev.active_environment():
mirror_specs = concrete_specs_from_environment()
else:
mirror_specs = concrete_specs_from_user(args)
mirror_specs, _ = lang.stable_partition(mirror_specs, predicate_fn=include_fn)
return mirror_specs
def create_mirror_for_one_spec(candidate, mirror_cache):
pkg_cls = spack.repo.PATH.get_pkg_class(candidate.name)
pkg_obj = pkg_cls(spack.spec.Spec(candidate))
mirror_stats = spack.mirrors.utils.MirrorStatsForOneSpec(candidate)
spack.mirrors.utils.create_mirror_from_package_object(pkg_obj, mirror_cache, mirror_stats)
mirror_stats.finalize()
return mirror_stats
def create_mirror_for_all_specs(mirror_specs, path, skip_unstable_versions, workers):
mirror_cache = spack.mirrors.utils.get_mirror_cache(
path, skip_unstable_versions=skip_unstable_versions
)
mirror_stats = spack.mirrors.utils.MirrorStatsForAllSpecs()
with spack.util.parallel.make_concurrent_executor(jobs=workers) as executor:
# Submit tasks to the process pool
futures = [
executor.submit(create_mirror_for_one_spec, candidate, mirror_cache)
for candidate in mirror_specs
]
for mirror_future in as_completed(futures):
ext_mirror_stats = mirror_future.result()
mirror_stats.merge(ext_mirror_stats)
process_mirror_stats(*mirror_stats.stats())
return mirror_stats
def create(path, specs, skip_unstable_versions=False):
"""Create a directory to be used as a spack mirror, and fill it with
package archives.
Arguments:
path: Path to create a mirror directory hierarchy in.
specs: Any package versions matching these specs will be added \
to the mirror.
skip_unstable_versions: if true, this skips adding resources when
they do not have a stable archive checksum (as determined by
``fetch_strategy.stable_target``)
Returns:
A tuple of lists, each containing specs
* present: Package specs that were already present.
* mirrored: Package specs that were successfully mirrored.
* error: Package specs that failed to mirror due to some error.
"""
# automatically spec-ify anything in the specs array.
specs = [s if isinstance(s, spack.spec.Spec) else spack.spec.Spec(s) for s in specs]
mirror_stats = create_mirror_for_all_specs(specs, path, skip_unstable_versions, workers=1)
return mirror_stats.stats()
def mirror_destroy(args):
"""given a url, recursively delete everything under it"""
mirror_url = None
if args.mirror_name:
result = spack.mirrors.mirror.MirrorCollection().lookup(args.mirror_name)
mirror_url = result.push_url
elif args.mirror_url:
mirror_url = args.mirror_url
web_util.remove_url(mirror_url, recursive=True)
def mirror(parser, args):
action = {
"create": mirror_create,
"destroy": mirror_destroy,
"add": mirror_add,
"remove": mirror_remove,
"rm": mirror_remove,
"set-url": mirror_set_url,
"set": mirror_set,
"list": mirror_list,
"ls": mirror_list,
}
if args.no_checksum:
spack.config.set("config:checksum", False, scope="command_line")
action[args.mirror_command](args)
|
IncludeFilter
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeParams1.py
|
{
"start": 263,
"end": 295
}
|
class ____[T3, S1, T3]: ...
|
ClassC
|
python
|
huggingface__transformers
|
src/transformers/models/lfm2/modeling_lfm2.py
|
{
"start": 2984,
"end": 5978
}
|
class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Lfm2Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Lfm2Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
Lfm2RotaryEmbedding
|
python
|
django__django
|
tests/admin_utils/admin.py
|
{
"start": 109,
"end": 543
}
|
class ____(forms.ModelForm):
nolabel_form_field = forms.BooleanField(required=False)
class Meta:
model = Article
fields = ["title"]
@property
def changed_data(self):
data = super().changed_data
if data:
# Add arbitrary name to changed_data to test
# change message construction.
return data + ["not_a_form_field"]
return data
|
ArticleAdminForm
|
python
|
plotly__plotly.py
|
plotly/graph_objs/icicle/_pathbar.py
|
{
"start": 233,
"end": 5683
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "icicle"
_path_str = "icicle.pathbar"
_valid_props = {"edgeshape", "side", "textfont", "thickness", "visible"}
@property
def edgeshape(self):
"""
Determines which shape is used for edges between `barpath`
labels.
The 'edgeshape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['>', '<', '|', '/', '\\']
Returns
-------
Any
"""
return self["edgeshape"]
@edgeshape.setter
def edgeshape(self, val):
self["edgeshape"] = val
@property
def side(self):
"""
Determines on which side of the the treemap the `pathbar`
should be presented.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def textfont(self):
"""
Sets the font used inside `pathbar`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.pathbar.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.icicle.pathbar.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def thickness(self):
"""
Sets the thickness of `pathbar` (in px). If not specified the
`pathbar.textfont.size` is used with 3 pixles extra padding on
each side.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [12, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def visible(self):
"""
Determines if the path bar is drawn i.e. outside the trace
`domain` and with one pixel gap.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
edgeshape
Determines which shape is used for edges between
`barpath` labels.
side
Determines on which side of the the treemap the
`pathbar` should be presented.
textfont
Sets the font used inside `pathbar`.
thickness
Sets the thickness of `pathbar` (in px). If not
specified the `pathbar.textfont.size` is used with 3
pixles extra padding on each side.
visible
Determines if the path bar is drawn i.e. outside the
trace `domain` and with one pixel gap.
"""
def __init__(
self,
arg=None,
edgeshape=None,
side=None,
textfont=None,
thickness=None,
visible=None,
**kwargs,
):
"""
Construct a new Pathbar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.icicle.Pathbar`
edgeshape
Determines which shape is used for edges between
`barpath` labels.
side
Determines on which side of the the treemap the
`pathbar` should be presented.
textfont
Sets the font used inside `pathbar`.
thickness
Sets the thickness of `pathbar` (in px). If not
specified the `pathbar.textfont.size` is used with 3
pixles extra padding on each side.
visible
Determines if the path bar is drawn i.e. outside the
trace `domain` and with one pixel gap.
Returns
-------
Pathbar
"""
super().__init__("pathbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.icicle.Pathbar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.icicle.Pathbar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("edgeshape", arg, edgeshape)
self._set_property("side", arg, side)
self._set_property("textfont", arg, textfont)
self._set_property("thickness", arg, thickness)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Pathbar
|
python
|
huggingface__transformers
|
src/transformers/models/git/modeling_git.py
|
{
"start": 4156,
"end": 8468
}
|
class ____(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.image_patch_tokens = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1)
if config.num_image_with_embedding is not None:
self.image_patch_tokens *= config.num_image_with_embedding
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
pixel_values_present: Optional[bool] = False,
) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
cutoff = self.image_patch_tokens if pixel_values_present else 0
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
if past_key_values is not None:
# NOTE: like in other caches, we store the text component. In GIT it means we discard the image component.
key_layer_past, value_layer_past = past_key_values.update(
key_layer[:, :, cutoff:, :], value_layer[:, :, cutoff:, :], self.layer_idx
)
key_layer = torch.cat([key_layer[:, :, :cutoff, :], key_layer_past], dim=2)
value_layer = torch.cat([value_layer[:, :, :cutoff, :], value_layer_past], dim=2)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in GitModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
return context_layer, attention_probs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
|
GitSelfAttention
|
python
|
openai__openai-python
|
src/openai/types/responses/easy_input_message.py
|
{
"start": 309,
"end": 817
}
|
class ____(BaseModel):
content: Union[str, ResponseInputMessageContentList]
"""
Text, image, or audio input to the model, used to generate a response. Can also
contain previous assistant responses.
"""
role: Literal["user", "assistant", "system", "developer"]
"""The role of the message input.
One of `user`, `assistant`, `system`, or `developer`.
"""
type: Optional[Literal["message"]] = None
"""The type of the message input. Always `message`."""
|
EasyInputMessage
|
python
|
huggingface__transformers
|
src/transformers/models/d_fine/modeling_d_fine.py
|
{
"start": 29948,
"end": 40207
}
|
class ____(DFinePreTrainedModel):
"""
D-FINE Decoder implementing Fine-grained Distribution Refinement (FDR).
This decoder refines object detection predictions through iterative updates across multiple layers,
utilizing attention mechanisms, location quality estimators, and distribution refinement techniques
to improve bounding box accuracy and robustness.
"""
def __init__(self, config: DFineConfig):
super().__init__(config)
self.eval_idx = config.eval_idx if config.eval_idx >= 0 else config.decoder_layers + config.eval_idx
self.dropout = config.dropout
self.layers = nn.ModuleList(
[DFineDecoderLayer(config) for _ in range(config.decoder_layers)]
+ [DFineDecoderLayer(config) for _ in range(config.decoder_layers - self.eval_idx - 1)]
)
self.query_pos_head = DFineMLPPredictionHead(config, 4, 2 * config.d_model, config.d_model, num_layers=2)
# hack implementation for iterative bounding box refinement and two-stage Deformable DETR
self.bbox_embed = None
self.class_embed = None
self.reg_scale = nn.Parameter(torch.tensor([config.reg_scale]), requires_grad=False)
self.max_num_bins = config.max_num_bins
self.d_model = config.d_model
self.layer_scale = config.layer_scale
self.pre_bbox_head = DFineMLP(config.hidden_size, config.hidden_size, 4, 3)
self.integral = DFineIntegral(config)
self.num_head = config.decoder_attention_heads
self.up = nn.Parameter(torch.tensor([config.up]), requires_grad=False)
self.lqe_layers = nn.ModuleList([DFineLQE(config) for _ in range(config.decoder_layers)])
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
encoder_hidden_states: torch.Tensor,
reference_points: torch.Tensor,
inputs_embeds: torch.Tensor,
spatial_shapes,
level_start_index=None,
spatial_shapes_list=None,
output_hidden_states=None,
encoder_attention_mask=None,
memory_mask=None,
output_attentions=None,
return_dict=None,
) -> DFineDecoderOutput:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
The query embeddings that are passed into the decoder.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Position embeddings that are added to the queries and keys in each self-attention layer.
reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*):
Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area.
spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of the feature maps.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*):
Indexes for the start of each feature level. In range `[0, sequence_length]`.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is not None:
hidden_states = inputs_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
intermediate = ()
intermediate_reference_points = ()
intermediate_logits = ()
intermediate_predicted_corners = ()
initial_reference_points = ()
output_detach = pred_corners_undetach = 0
project = weighting_function(self.max_num_bins, self.up, self.reg_scale)
ref_points_detach = F.sigmoid(reference_points)
for i, decoder_layer in enumerate(self.layers):
ref_points_input = ref_points_detach.unsqueeze(2)
query_pos_embed = self.query_pos_head(ref_points_detach).clamp(min=-10, max=10)
if output_hidden_states:
all_hidden_states += (hidden_states,)
output = decoder_layer(
hidden_states=hidden_states,
position_embeddings=query_pos_embed,
reference_points=ref_points_input,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
hidden_states = output[0]
if i == 0:
# Initial bounding box predictions with inverse sigmoid refinement
new_reference_points = F.sigmoid(self.pre_bbox_head(output[0]) + inverse_sigmoid(ref_points_detach))
ref_points_initial = new_reference_points.detach()
# Refine bounding box corners using FDR, integrating previous layer's corrections
if self.bbox_embed is not None:
pred_corners = self.bbox_embed[i](hidden_states + output_detach) + pred_corners_undetach
inter_ref_bbox = distance2bbox(
ref_points_initial, self.integral(pred_corners, project), self.reg_scale
)
pred_corners_undetach = pred_corners
ref_points_detach = inter_ref_bbox.detach()
output_detach = hidden_states.detach()
intermediate += (hidden_states,)
if self.class_embed is not None and (self.training or i == self.eval_idx):
scores = self.class_embed[i](hidden_states)
# Add initial logits and reference points with pre-bbox head
if i == 0:
intermediate_logits += (scores,)
intermediate_reference_points += (new_reference_points,)
# Lqe does not affect the performance here.
scores = self.lqe_layers[i](scores, pred_corners)
intermediate_logits += (scores,)
intermediate_reference_points += (inter_ref_bbox,)
initial_reference_points += (ref_points_initial,)
intermediate_predicted_corners += (pred_corners,)
if output_attentions:
all_self_attns += (output[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (output[2],)
# Keep batch_size as first dimension
intermediate = torch.stack(intermediate)
if self.class_embed is not None and self.bbox_embed is not None:
intermediate_logits = torch.stack(intermediate_logits, dim=1)
intermediate_predicted_corners = torch.stack(intermediate_predicted_corners, dim=1)
initial_reference_points = torch.stack(initial_reference_points, dim=1)
intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
intermediate,
intermediate_logits,
intermediate_reference_points,
intermediate_predicted_corners,
initial_reference_points,
all_hidden_states,
all_self_attns,
all_cross_attentions,
]
if v is not None
)
return DFineDecoderOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate,
intermediate_logits=intermediate_logits,
intermediate_reference_points=intermediate_reference_points,
intermediate_predicted_corners=intermediate_predicted_corners,
initial_reference_points=initial_reference_points,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the RT-DETR encoder-decoder model.
"""
)
|
DFineDecoder
|
python
|
getsentry__sentry
|
tests/sentry/models/test_recentsearch.py
|
{
"start": 270,
"end": 651
}
|
class ____(TestCase):
def test_query_hash(self) -> None:
recent_search = RecentSearch.objects.create(
organization=self.organization, user_id=self.user.id, type=0, query="hello"
)
recent_search = RecentSearch.objects.get(id=recent_search.id)
assert recent_search.query_hash == md5_text(recent_search.query).hexdigest()
|
RecentSearchTest
|
python
|
django-haystack__django-haystack
|
haystack/indexes.py
|
{
"start": 3198,
"end": 13939
}
|
class ____(threading.local, metaclass=DeclarativeMetaclass):
"""
Base class for building indexes.
An example might look like this::
import datetime
from haystack import indexes
from myapp.models import Note
class NoteIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='user')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return Note
def index_queryset(self, using=None):
return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())
"""
def __init__(self):
self.prepared_data = None
content_fields = []
self.field_map = {}
for field_name, field in self.fields.items():
# form field map
self.field_map[field.index_fieldname] = field_name
if field.document is True:
content_fields.append(field_name)
if not len(content_fields) == 1:
raise SearchFieldError(
"The index '%s' must have one (and only one) SearchField with document=True."
% self.__class__.__name__
)
def get_model(self):
"""
Should return the ``Model`` class (not an instance) that the rest of the
``SearchIndex`` should use.
This method is required & you must override it to return the correct class.
"""
raise NotImplementedError(
"You must provide a 'get_model' method for the '%r' index." % self
)
def index_queryset(self, using=None):
"""
Get the default QuerySet to index when doing a full update.
Subclasses can override this method to avoid indexing certain objects.
"""
return self.get_model()._default_manager.all()
def read_queryset(self, using=None):
"""
Get the default QuerySet for read actions.
Subclasses can override this method to work with other managers.
Useful when working with default managers that filter some objects.
"""
return self.index_queryset(using=using)
def build_queryset(self, using=None, start_date=None, end_date=None):
"""
Get the default QuerySet to index when doing an index update.
Subclasses can override this method to take into account related
model modification times.
The default is to use ``SearchIndex.index_queryset`` and filter
based on ``SearchIndex.get_updated_field``
"""
extra_lookup_kwargs = {}
model = self.get_model()
updated_field = self.get_updated_field()
update_field_msg = (
"No updated date field found for '%s' " "- not restricting by age."
) % model.__name__
if start_date:
if updated_field:
extra_lookup_kwargs["%s__gte" % updated_field] = start_date
else:
warnings.warn(update_field_msg)
if end_date:
if updated_field:
extra_lookup_kwargs["%s__lte" % updated_field] = end_date
else:
warnings.warn(update_field_msg)
index_qs = None
if hasattr(self, "get_queryset"):
warnings.warn(
"'SearchIndex.get_queryset' was deprecated in Haystack v2."
" Please rename the method 'index_queryset'."
)
index_qs = self.get_queryset()
else:
index_qs = self.index_queryset(using=using)
if not hasattr(index_qs, "filter"):
raise ImproperlyConfigured(
"The '%r' class must return a 'QuerySet' in the 'index_queryset' method."
% self
)
# `.select_related()` seems like a good idea here but can fail on
# nullable `ForeignKey` as well as what seems like other cases.
return index_qs.filter(**extra_lookup_kwargs).order_by(model._meta.pk.name)
def prepare(self, obj):
"""
Fetches and adds/alters data before indexing.
"""
self.prepared_data = {
ID: get_identifier(obj),
DJANGO_CT: get_model_ct(self.get_model()),
DJANGO_ID: force_str(obj.pk),
}
for field_name, field in self.fields.items():
# Use the possibly overridden name, which will default to the
# variable name of the field.
self.prepared_data[field.index_fieldname] = field.prepare(obj)
if hasattr(self, "prepare_%s" % field_name):
value = getattr(self, "prepare_%s" % field_name)(obj)
self.prepared_data[field.index_fieldname] = value
return self.prepared_data
def full_prepare(self, obj, with_string_facet=True):
self.prepared_data = self.prepare(obj)
for field_name, field in self.fields.items():
# Duplicate data for faceted fields.
if (
not with_string_facet
and field.field_type == "string"
and getattr(field, "facet_for", None) in self.fields
):
continue
if getattr(field, "facet_for", None):
source_field_name = self.fields[field.facet_for].index_fieldname
# If there's data there, leave it alone. Otherwise, populate it
# with whatever the related field has.
if (
self.prepared_data[field_name] is None
and source_field_name in self.prepared_data
):
self.prepared_data[field.index_fieldname] = self.prepared_data[
source_field_name
]
# Remove any fields that lack a value and are ``null=True``.
if field.null is True:
if self.prepared_data[field.index_fieldname] is None:
del self.prepared_data[field.index_fieldname]
return self.prepared_data
def get_content_field(self):
"""Returns the field that supplies the primary document to be indexed."""
for _, field in self.fields.items():
if field.document is True:
return field.index_fieldname
def get_field_weights(self):
"""Returns a dict of fields with weight values"""
weights = {}
for field_name, field in self.fields.items():
if field.boost:
weights[field_name] = field.boost
return weights
def _get_backend(self, using):
warnings.warn(
"SearchIndex._get_backend is deprecated; use SearchIndex.get_backend instead",
DeprecationWarning,
)
return self.get_backend(using)
def get_backend(self, using=None):
if using is None:
try:
using = connection_router.for_write(index=self)[0]
except IndexError:
# There's no backend to handle it. Bomb out.
return None
return connections[using].get_backend()
def update(self, using=None):
"""
Updates the entire index.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
backend = self.get_backend(using)
if backend is not None:
backend.update(self, self.index_queryset(using=using))
def update_object(self, instance, using=None, **kwargs):
"""
Update the index for a single object. Attached to the class's
post-save hook.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
# Check to make sure we want to index this first.
if self.should_update(instance, **kwargs):
backend = self.get_backend(using)
if backend is not None:
backend.update(self, [instance])
def remove_object(self, instance, using=None, **kwargs):
"""
Remove an object from the index. Attached to the class's
post-delete hook.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
backend = self.get_backend(using)
if backend is not None:
backend.remove(instance, **kwargs)
def clear(self, using=None):
"""
Clears the entire index.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
backend = self.get_backend(using)
if backend is not None:
backend.clear(models=[self.get_model()])
def reindex(self, using=None):
"""
Completely clear the index for this model and rebuild it.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
self.clear(using=using)
self.update(using=using)
def get_updated_field(self):
"""
Get the field name that represents the updated date for the model.
If specified, this is used by the reindex command to filter out results
from the QuerySet, enabling you to reindex only recent records. This
method should either return None (reindex everything always) or a
string of the Model's DateField/DateTimeField name.
"""
return None
def should_update(self, instance, **kwargs):
"""
Determine if an object should be updated in the index.
It's useful to override this when an object may save frequently and
cause excessive reindexing. You should check conditions on the instance
and return False if it is not to be indexed.
By default, returns True (always reindex).
"""
return True
def load_all_queryset(self):
"""
Provides the ability to override how objects get loaded in conjunction
with ``SearchQuerySet.load_all``.
This is useful for post-processing the results from the query, enabling
things like adding ``select_related`` or filtering certain data.
By default, returns ``all()`` on the model's default manager.
"""
return self.get_model()._default_manager.all()
|
SearchIndex
|
python
|
apache__airflow
|
providers/redis/tests/unit/redis/triggers/test_redis_await_message.py
|
{
"start": 973,
"end": 3041
}
|
class ____:
def test_trigger_serialization(self):
trigger = AwaitMessageTrigger(
channels=["test_channel"],
redis_conn_id="redis_default",
poll_interval=30,
)
assert isinstance(trigger, AwaitMessageTrigger)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.redis.triggers.redis_await_message.AwaitMessageTrigger"
assert kwargs == dict(
channels=["test_channel"],
redis_conn_id="redis_default",
poll_interval=30,
)
@patch("airflow.providers.redis.hooks.redis.RedisHook.get_conn")
@pytest.mark.asyncio
async def test_trigger_run_succeed(self, mock_redis_conn):
trigger = AwaitMessageTrigger(
channels="test",
redis_conn_id="redis_default",
poll_interval=0.0001,
)
mock_redis_conn().pubsub().get_message.return_value = {
"type": "message",
"channel": "test",
"data": "d1",
}
trigger_gen = trigger.run()
task = asyncio.create_task(trigger_gen.__anext__())
event = await task
assert task.done() is True
assert event.payload["data"] == "d1"
assert event.payload["channel"] == "test"
asyncio.get_event_loop().stop()
@patch("airflow.providers.redis.hooks.redis.RedisHook.get_conn")
@pytest.mark.asyncio
async def test_trigger_run_fail(self, mock_redis_conn):
trigger = AwaitMessageTrigger(
channels="test",
redis_conn_id="redis_default",
poll_interval=0.01,
)
mock_redis_conn().pubsub().get_message.return_value = {
"type": "subscribe",
"channel": "test",
"data": "d1",
}
trigger_gen = trigger.run()
task = asyncio.create_task(trigger_gen.__anext__())
await asyncio.sleep(1.0)
assert task.done() is False
task.cancel()
asyncio.get_event_loop().stop()
|
TestAwaitMessageTrigger
|
python
|
python-openxml__python-docx
|
src/docx/oxml/xmlchemy.py
|
{
"start": 3677,
"end": 5103
}
|
class ____:
"""Base class for OptionalAttribute and RequiredAttribute.
Provides common methods.
"""
def __init__(self, attr_name: str, simple_type: Type[BaseXmlEnum] | Type[BaseSimpleType]):
super(BaseAttribute, self).__init__()
self._attr_name = attr_name
self._simple_type = simple_type
def populate_class_members(self, element_cls: MetaOxmlElement, prop_name: str) -> None:
"""Add the appropriate methods to `element_cls`."""
self._element_cls = element_cls
self._prop_name = prop_name
self._add_attr_property()
def _add_attr_property(self):
"""Add a read/write `.{prop_name}` property to the element class.
The property returns the interpreted value of this attribute on access and
changes the attribute value to its ST_* counterpart on assignment.
"""
property_ = property(self._getter, self._setter, None)
# -- assign unconditionally to overwrite element name definition --
setattr(self._element_cls, self._prop_name, property_)
@property
def _clark_name(self):
if ":" in self._attr_name:
return qn(self._attr_name)
return self._attr_name
@property
def _getter(self) -> Callable[[BaseOxmlElement], Any | None]: ...
@property
def _setter(
self,
) -> Callable[[BaseOxmlElement, Any | None], None]: ...
|
BaseAttribute
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/lib/bedrock/_beta_messages.py
|
{
"start": 2287,
"end": 2510
}
|
class ____:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.create = _legacy_response.to_raw_response_wrapper(
messages.create,
)
|
MessagesWithRawResponse
|
python
|
python-pillow__Pillow
|
src/PIL/ImageStat.py
|
{
"start": 709,
"end": 5495
}
|
class ____:
def __init__(
self, image_or_list: Image.Image | list[int], mask: Image.Image | None = None
) -> None:
"""
Calculate statistics for the given image. If a mask is included,
only the regions covered by that mask are included in the
statistics. You can also pass in a previously calculated histogram.
:param image: A PIL image, or a precalculated histogram.
.. note::
For a PIL image, calculations rely on the
:py:meth:`~PIL.Image.Image.histogram` method. The pixel counts are
grouped into 256 bins, even if the image has more than 8 bits per
channel. So ``I`` and ``F`` mode images have a maximum ``mean``,
``median`` and ``rms`` of 255, and cannot have an ``extrema`` maximum
of more than 255.
:param mask: An optional mask.
"""
if isinstance(image_or_list, Image.Image):
self.h = image_or_list.histogram(mask)
elif isinstance(image_or_list, list):
self.h = image_or_list
else:
msg = "first argument must be image or list" # type: ignore[unreachable]
raise TypeError(msg)
self.bands = list(range(len(self.h) // 256))
@cached_property
def extrema(self) -> list[tuple[int, int]]:
"""
Min/max values for each band in the image.
.. note::
This relies on the :py:meth:`~PIL.Image.Image.histogram` method, and
simply returns the low and high bins used. This is correct for
images with 8 bits per channel, but fails for other modes such as
``I`` or ``F``. Instead, use :py:meth:`~PIL.Image.Image.getextrema` to
return per-band extrema for the image. This is more correct and
efficient because, for non-8-bit modes, the histogram method uses
:py:meth:`~PIL.Image.Image.getextrema` to determine the bins used.
"""
def minmax(histogram: list[int]) -> tuple[int, int]:
res_min, res_max = 255, 0
for i in range(256):
if histogram[i]:
res_min = i
break
for i in range(255, -1, -1):
if histogram[i]:
res_max = i
break
return res_min, res_max
return [minmax(self.h[i:]) for i in range(0, len(self.h), 256)]
@cached_property
def count(self) -> list[int]:
"""Total number of pixels for each band in the image."""
return [sum(self.h[i : i + 256]) for i in range(0, len(self.h), 256)]
@cached_property
def sum(self) -> list[float]:
"""Sum of all pixels for each band in the image."""
v = []
for i in range(0, len(self.h), 256):
layer_sum = 0.0
for j in range(256):
layer_sum += j * self.h[i + j]
v.append(layer_sum)
return v
@cached_property
def sum2(self) -> list[float]:
"""Squared sum of all pixels for each band in the image."""
v = []
for i in range(0, len(self.h), 256):
sum2 = 0.0
for j in range(256):
sum2 += (j**2) * float(self.h[i + j])
v.append(sum2)
return v
@cached_property
def mean(self) -> list[float]:
"""Average (arithmetic mean) pixel level for each band in the image."""
return [self.sum[i] / self.count[i] if self.count[i] else 0 for i in self.bands]
@cached_property
def median(self) -> list[int]:
"""Median pixel level for each band in the image."""
v = []
for i in self.bands:
s = 0
half = self.count[i] // 2
b = i * 256
for j in range(256):
s = s + self.h[b + j]
if s > half:
break
v.append(j)
return v
@cached_property
def rms(self) -> list[float]:
"""RMS (root-mean-square) for each band in the image."""
return [
math.sqrt(self.sum2[i] / self.count[i]) if self.count[i] else 0
for i in self.bands
]
@cached_property
def var(self) -> list[float]:
"""Variance for each band in the image."""
return [
(
(self.sum2[i] - (self.sum[i] ** 2.0) / self.count[i]) / self.count[i]
if self.count[i]
else 0
)
for i in self.bands
]
@cached_property
def stddev(self) -> list[float]:
"""Standard deviation for each band in the image."""
return [math.sqrt(self.var[i]) for i in self.bands]
Global = Stat # compatibility
|
Stat
|
python
|
justquick__django-activity-stream
|
actstream/templatetags/activity_tags.py
|
{
"start": 286,
"end": 1133
}
|
class ____(Node):
def __init__(self, actor, actor_only=True, flag=''):
self.actor = Variable(actor)
self.actor_only = actor_only
self.flag = flag
def render(self, context):
actor_instance = self.actor.resolve(context)
content_type = ContentType.objects.get_for_model(actor_instance).pk
kwargs = {
'content_type_id': content_type,
'object_id': actor_instance.pk
}
if self.flag:
kwargs['flag'] = self.flag
if Follow.objects.is_following(context.get('user'), actor_instance, flag=self.flag):
return reverse('actstream_unfollow', kwargs=kwargs)
if self.actor_only:
return reverse('actstream_follow', kwargs=kwargs)
return reverse('actstream_follow_all', kwargs=kwargs)
|
DisplayActivityFollowUrl
|
python
|
apache__airflow
|
airflow-core/tests/unit/serialization/test_serde.py
|
{
"start": 4787,
"end": 5241
}
|
class ____:
__version__: ClassVar[int] = 1
def __init__(self, x):
self.x = x
def serialize(self) -> dict:
return dict({"x": self.x})
@staticmethod
def deserialize(data: dict, version: int):
if version != 1:
raise TypeError("version != 1")
return Z(data["x"])
def __eq__(self, other):
return self.x == other.x
def __hash__(self):
return hash(self.x)
@attr.define
|
Z
|
python
|
django__django
|
tests/auth_tests/test_context_processors.py
|
{
"start": 620,
"end": 2087
}
|
class ____(SimpleTestCase):
"""
Test some details of the PermWrapper implementation.
"""
class EQLimiterObject:
"""
This object makes sure __eq__ will not be called endlessly.
"""
def __init__(self):
self.eq_calls = 0
def __eq__(self, other):
if self.eq_calls > 0:
return True
self.eq_calls += 1
return False
def test_repr(self):
perms = PermWrapper(MockUser())
self.assertEqual(repr(perms), "PermWrapper(MockUser())")
def test_permwrapper_in(self):
"""
'something' in PermWrapper works as expected.
"""
perms = PermWrapper(MockUser())
# Works for modules and full permissions.
self.assertIn("mockapp", perms)
self.assertNotIn("nonexistent", perms)
self.assertIn("mockapp.someperm", perms)
self.assertNotIn("mockapp.nonexistent", perms)
def test_permlookupdict_in(self):
"""
No endless loops if accessed with 'in' - refs #18979.
"""
pldict = PermLookupDict(MockUser(), "mockapp")
with self.assertRaises(TypeError):
self.EQLimiterObject() in pldict
def test_iter(self):
with self.assertRaisesMessage(TypeError, "PermWrapper is not iterable."):
iter(PermWrapper(MockUser()))
@override_settings(ROOT_URLCONF="auth_tests.urls", TEMPLATES=AUTH_TEMPLATES)
|
PermWrapperTests
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/layers/legacy_rnn/rnn_cell_wrapper_impl.py
|
{
"start": 16940,
"end": 20660
}
|
class ____(object):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device, **kwargs):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
**kwargs: dict of keyword arguments for base layer.
"""
super(DeviceWrapperBase, self).__init__(cell, **kwargs)
self._device = device
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope_v2(type(self).__name__ + "ZeroState"):
with ops.device(self._device):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell on specified device."""
with ops.device(self._device):
return cell_call_fn(inputs, state, **kwargs)
def get_config(self):
config = {"device": self._device}
base_config = super(DeviceWrapperBase, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(function):
"""Serialize the function for get_config()."""
if isinstance(function, python_types.LambdaType):
output = generic_utils.func_dump(function)
output_type = "lambda"
module = function.__module__
elif callable(function):
output = function.__name__
output_type = "function"
module = function.__module__
else:
raise ValueError("Unrecognized function type for input: {}".format(
type(function)))
return output, output_type, module
def _parse_config_to_function(config, custom_objects, func_attr_name,
func_type_attr_name, module_attr_name):
"""Reconstruct the function from the config."""
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn("{} is not loaded, but a layer uses it. "
"It may cause errors.".format(module), UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == "function":
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name="function in wrapper")
elif function_type == "lambda":
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
else:
raise TypeError("Unknown function type:", function_type)
return function
def _default_dropout_state_filter_visitor(substate):
from tensorflow.python.keras.layers.legacy_rnn.rnn_cell_impl import LSTMStateTuple # pylint: disable=g-import-not-at-top
if isinstance(substate, LSTMStateTuple):
# Do not perform dropout on the memory state.
return LSTMStateTuple(c=False, h=True)
elif isinstance(substate, tensor_array_ops.TensorArray):
return False
return True
def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure_up_to(shallow_structure, enumerated_fn, *args,
**kwargs)
|
DeviceWrapperBase
|
python
|
keras-team__keras
|
keras/src/ops/numpy.py
|
{
"start": 134956,
"end": 135644
}
|
class ____(Operation):
def call(self, x):
return backend.numpy.log2(x)
def compute_output_spec(self, x):
dtype = (
backend.floatx()
if backend.standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.log2", "keras.ops.numpy.log2"])
def log2(x):
"""Base-2 logarithm of `x`, element-wise.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise base-2 logarithm of `x`.
"""
if any_symbolic_tensors((x,)):
return Log2().symbolic_call(x)
return backend.numpy.log2(x)
|
Log2
|
python
|
huggingface__transformers
|
src/transformers/models/mobilevitv2/configuration_mobilevitv2.py
|
{
"start": 788,
"end": 6348
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MobileViTV2Model`]. It is used to instantiate a
MobileViTV2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileViTV2
[apple/mobilevitv2-1.0](https://huggingface.co/apple/mobilevitv2-1.0) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 256):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 2):
The size (resolution) of each patch.
expand_ratio (`float`, *optional*, defaults to 2.0):
Expansion factor for the MobileNetv2 layers.
hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolutional kernel in the MobileViTV2 layer.
output_stride (`int`, *optional*, defaults to 32):
The ratio of the spatial resolution of the output to the resolution of the input image.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
aspp_out_channels (`int`, *optional*, defaults to 512):
Number of output channels used in the ASPP layer for semantic segmentation.
atrous_rates (`list[int]`, *optional*, defaults to `[6, 12, 18]`):
Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the ASPP layer for semantic segmentation.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
n_attn_blocks (`list[int]`, *optional*, defaults to `[2, 4, 3]`):
The number of attention blocks in each MobileViTV2Layer
base_attn_unit_dims (`list[int]`, *optional*, defaults to `[128, 192, 256]`):
The base multiplier for dimensions of attention blocks in each MobileViTV2Layer
width_multiplier (`float`, *optional*, defaults to 1.0):
The width multiplier for MobileViTV2.
ffn_multiplier (`int`, *optional*, defaults to 2):
The FFN multiplier for MobileViTV2.
attn_dropout (`float`, *optional*, defaults to 0.0):
The dropout in the attention layer.
ffn_dropout (`float`, *optional*, defaults to 0.0):
The dropout between FFN layers.
Example:
```python
>>> from transformers import MobileViTV2Config, MobileViTV2Model
>>> # Initializing a mobilevitv2-small style configuration
>>> configuration = MobileViTV2Config()
>>> # Initializing a model from the mobilevitv2-small style configuration
>>> model = MobileViTV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mobilevitv2"
def __init__(
self,
num_channels=3,
image_size=256,
patch_size=2,
expand_ratio=2.0,
hidden_act="swish",
conv_kernel_size=3,
output_stride=32,
classifier_dropout_prob=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
aspp_out_channels=512,
atrous_rates=[6, 12, 18],
aspp_dropout_prob=0.1,
semantic_loss_ignore_index=255,
n_attn_blocks=[2, 4, 3],
base_attn_unit_dims=[128, 192, 256],
width_multiplier=1.0,
ffn_multiplier=2,
attn_dropout=0.0,
ffn_dropout=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.expand_ratio = expand_ratio
self.hidden_act = hidden_act
self.conv_kernel_size = conv_kernel_size
self.output_stride = output_stride
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.n_attn_blocks = n_attn_blocks
self.base_attn_unit_dims = base_attn_unit_dims
self.width_multiplier = width_multiplier
self.ffn_multiplier = ffn_multiplier
self.ffn_dropout = ffn_dropout
self.attn_dropout = attn_dropout
self.classifier_dropout_prob = classifier_dropout_prob
# decode head attributes for semantic segmentation
self.aspp_out_channels = aspp_out_channels
self.atrous_rates = atrous_rates
self.aspp_dropout_prob = aspp_dropout_prob
self.semantic_loss_ignore_index = semantic_loss_ignore_index
__all__ = ["MobileViTV2Config"]
|
MobileViTV2Config
|
python
|
django__django
|
tests/apps/apps.py
|
{
"start": 612,
"end": 723
}
|
class ____(AppConfig):
name = "apps"
default_auto_field = "django.db.models.BigAutoField"
|
ModelPKAppsConfig
|
python
|
optuna__optuna
|
optuna/storages/_rdb/storage.py
|
{
"start": 3588,
"end": 45194
}
|
class ____(BaseStorage, BaseHeartbeat):
"""Storage class for RDB backend.
Note that library users can instantiate this class, but the attributes
provided by this class are not supposed to be directly accessed by them.
Example:
Create an :class:`~optuna.storages.RDBStorage` instance with customized
``pool_size`` and ``timeout`` settings.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -100, 100)
return x**2
storage = optuna.storages.RDBStorage(
url="sqlite:///:memory:",
engine_kwargs={"pool_size": 20, "connect_args": {"timeout": 10}},
)
study = optuna.create_study(storage=storage)
study.optimize(objective, n_trials=10)
Args:
url:
URL of the storage.
engine_kwargs:
A dictionary of keyword arguments that is passed to
`sqlalchemy.engine.create_engine`_ function.
skip_compatibility_check:
Flag to skip schema compatibility check if set to :obj:`True`.
heartbeat_interval:
Interval to record the heartbeat. It is recorded every ``interval`` seconds.
``heartbeat_interval`` must be :obj:`None` or a positive integer.
.. note::
Heartbeat mechanism is experimental. API would change in the future.
.. note::
The heartbeat is supposed to be used with :meth:`~optuna.study.Study.optimize`.
If you use :meth:`~optuna.study.Study.ask` and
:meth:`~optuna.study.Study.tell` instead, it will not work.
grace_period:
Grace period before a running trial is failed from the last heartbeat.
``grace_period`` must be :obj:`None` or a positive integer.
If it is :obj:`None`, the grace period will be `2 * heartbeat_interval`.
failed_trial_callback:
A callback function that is invoked after failing each stale trial.
The function must accept two parameters with the following types in this order:
:class:`~optuna.study.Study` and :class:`~optuna.trial.FrozenTrial`.
.. note::
The procedure to fail existing stale trials is called just before asking the
study for a new trial.
skip_table_creation:
Flag to skip table creation if set to :obj:`True`.
.. _sqlalchemy.engine.create_engine:
https://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine
.. note::
If you use MySQL, `pool_pre_ping`_ will be set to :obj:`True` by default to prevent
connection timeout. You can turn it off with ``engine_kwargs['pool_pre_ping']=False``, but
it is recommended to keep the setting if execution time of your objective function is
longer than the `wait_timeout` of your MySQL configuration.
.. _pool_pre_ping:
https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.
pool_pre_ping
.. note::
We would never recommend SQLite3 for parallel optimization.
Please see the FAQ :ref:`sqlite_concurrency` for details.
.. note::
Mainly in a cluster environment, running trials are often killed unexpectedly.
If you want to detect a failure of trials, please use the heartbeat
mechanism. Set ``heartbeat_interval``, ``grace_period``, and ``failed_trial_callback``
appropriately according to your use case. For more details, please refer to the
:ref:`tutorial <heartbeat_monitoring>` and `Example page
<https://github.com/optuna/optuna-examples/blob/main/pytorch/pytorch_checkpoint.py>`__.
.. seealso::
You can use :class:`~optuna.storages.RetryFailedTrialCallback` to automatically retry
failed trials detected by heartbeat.
"""
def __init__(
self,
url: str,
engine_kwargs: dict[str, Any] | None = None,
skip_compatibility_check: bool = False,
*,
heartbeat_interval: int | None = None,
grace_period: int | None = None,
failed_trial_callback: Callable[["optuna.study.Study", FrozenTrial], None] | None = None,
skip_table_creation: bool = False,
) -> None:
self.engine_kwargs = engine_kwargs or {}
self.url = self._fill_storage_url_template(url)
self.skip_compatibility_check = skip_compatibility_check
if heartbeat_interval is not None:
if heartbeat_interval <= 0:
raise ValueError("The value of `heartbeat_interval` should be a positive integer.")
else:
warn_experimental_argument("heartbeat_interval")
if grace_period is not None and grace_period <= 0:
raise ValueError("The value of `grace_period` should be a positive integer.")
self.heartbeat_interval = heartbeat_interval
self.grace_period = grace_period
self.failed_trial_callback = failed_trial_callback
self._set_default_engine_kwargs_for_mysql(url, self.engine_kwargs)
try:
self.engine = sqlalchemy.engine.create_engine(self.url, **self.engine_kwargs)
except ImportError as e:
raise ImportError(
"Failed to import DB access module for the specified storage URL. "
"Please install appropriate one."
) from e
self.scoped_session = sqlalchemy_orm.scoped_session(
sqlalchemy_orm.sessionmaker(bind=self.engine)
)
if not skip_table_creation:
models.BaseModel.metadata.create_all(self.engine)
self._version_manager = _VersionManager(self.url, self.engine, self.scoped_session)
if not skip_compatibility_check:
self._version_manager.check_table_schema_compatibility()
def __getstate__(self) -> dict[Any, Any]:
state = self.__dict__.copy()
del state["scoped_session"]
del state["engine"]
del state["_version_manager"]
return state
def __setstate__(self, state: dict[Any, Any]) -> None:
self.__dict__.update(state)
try:
self.engine = sqlalchemy.engine.create_engine(self.url, **self.engine_kwargs)
except ImportError as e:
raise ImportError(
"Failed to import DB access module for the specified storage URL. "
"Please install appropriate one."
) from e
self.scoped_session = sqlalchemy_orm.scoped_session(
sqlalchemy_orm.sessionmaker(bind=self.engine)
)
models.BaseModel.metadata.create_all(self.engine)
self._version_manager = _VersionManager(self.url, self.engine, self.scoped_session)
if not self.skip_compatibility_check:
self._version_manager.check_table_schema_compatibility()
def create_new_study(
self, directions: Sequence[StudyDirection], study_name: str | None = None
) -> int:
try:
with _create_scoped_session(self.scoped_session) as session:
if study_name is None:
study_name = self._create_unique_study_name(session)
direction_models = [
models.StudyDirectionModel(objective=objective, direction=d)
for objective, d in enumerate(list(directions))
]
session.add(models.StudyModel(study_name=study_name, directions=direction_models))
except sqlalchemy_exc.IntegrityError:
raise optuna.exceptions.DuplicatedStudyError(
f"Another study with name '{study_name}' already exists. "
"Please specify a different name, or reuse the existing one "
"by setting `load_if_exists` (for Python API) or "
"`--skip-if-exists` flag (for CLI)."
)
_logger.info(f"A new study created in RDB with name: {study_name}")
return self.get_study_id_from_name(study_name)
def delete_study(self, study_id: int) -> None:
with _create_scoped_session(self.scoped_session, True) as session:
study = models.StudyModel.find_or_raise_by_id(study_id, session)
session.delete(study)
@staticmethod
def _create_unique_study_name(session: "sqlalchemy_orm.Session") -> str:
while True:
study_uuid = str(uuid.uuid4())
study_name = DEFAULT_STUDY_NAME_PREFIX + study_uuid
study = models.StudyModel.find_by_name(study_name, session)
if study is None:
break
return study_name
def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None:
with _create_scoped_session(self.scoped_session, True) as session:
study = models.StudyModel.find_or_raise_by_id(study_id, session)
attribute = models.StudyUserAttributeModel.find_by_study_and_key(study, key, session)
if attribute is None:
attribute = models.StudyUserAttributeModel(
study_id=study_id, key=key, value_json=json.dumps(value)
)
session.add(attribute)
else:
attribute.value_json = json.dumps(value)
def set_study_system_attr(self, study_id: int, key: str, value: JSONSerializable) -> None:
with _create_scoped_session(self.scoped_session, True) as session:
study = models.StudyModel.find_or_raise_by_id(study_id, session)
attribute = models.StudySystemAttributeModel.find_by_study_and_key(study, key, session)
if attribute is None:
attribute = models.StudySystemAttributeModel(
study_id=study_id, key=key, value_json=json.dumps(value)
)
session.add(attribute)
else:
attribute.value_json = json.dumps(value)
def get_study_id_from_name(self, study_name: str) -> int:
with _create_scoped_session(self.scoped_session) as session:
study = models.StudyModel.find_or_raise_by_name(study_name, session)
study_id = study.study_id
return study_id
def get_study_name_from_id(self, study_id: int) -> str:
with _create_scoped_session(self.scoped_session) as session:
study = models.StudyModel.find_or_raise_by_id(study_id, session)
study_name = study.study_name
return study_name
def get_study_directions(self, study_id: int) -> list[StudyDirection]:
with _create_scoped_session(self.scoped_session) as session:
study = models.StudyModel.find_or_raise_by_id(study_id, session)
directions = [d.direction for d in study.directions]
return directions
def get_study_user_attrs(self, study_id: int) -> dict[str, Any]:
with _create_scoped_session(self.scoped_session) as session:
# Ensure that that study exists.
models.StudyModel.find_or_raise_by_id(study_id, session)
attributes = models.StudyUserAttributeModel.where_study_id(study_id, session)
user_attrs = {attr.key: json.loads(attr.value_json) for attr in attributes}
return user_attrs
def get_study_system_attrs(self, study_id: int) -> dict[str, Any]:
with _create_scoped_session(self.scoped_session) as session:
# Ensure that that study exists.
models.StudyModel.find_or_raise_by_id(study_id, session)
attributes = models.StudySystemAttributeModel.where_study_id(study_id, session)
system_attrs = {attr.key: json.loads(attr.value_json) for attr in attributes}
return system_attrs
def get_trial_user_attrs(self, trial_id: int) -> dict[str, Any]:
with _create_scoped_session(self.scoped_session) as session:
# Ensure trial exists.
models.TrialModel.find_or_raise_by_id(trial_id, session)
attributes = models.TrialUserAttributeModel.where_trial_id(trial_id, session)
user_attrs = {attr.key: json.loads(attr.value_json) for attr in attributes}
return user_attrs
def get_trial_system_attrs(self, trial_id: int) -> dict[str, Any]:
with _create_scoped_session(self.scoped_session) as session:
# Ensure trial exists.
models.TrialModel.find_or_raise_by_id(trial_id, session)
attributes = models.TrialSystemAttributeModel.where_trial_id(trial_id, session)
system_attrs = {attr.key: json.loads(attr.value_json) for attr in attributes}
return system_attrs
def get_all_studies(self) -> list[FrozenStudy]:
with _create_scoped_session(self.scoped_session) as session:
studies = (
session.query(
models.StudyModel.study_id,
models.StudyModel.study_name,
)
.order_by(models.StudyModel.study_id)
.all()
)
_directions = defaultdict(list)
for direction_model in session.query(models.StudyDirectionModel).all():
_directions[direction_model.study_id].append(direction_model.direction)
_user_attrs = defaultdict(list)
for attribute_model in session.query(models.StudyUserAttributeModel).all():
_user_attrs[attribute_model.study_id].append(attribute_model)
_system_attrs = defaultdict(list)
for attribute_model in session.query(models.StudySystemAttributeModel).all():
_system_attrs[attribute_model.study_id].append(attribute_model)
frozen_studies = []
for study in studies:
directions = _directions[study.study_id]
user_attrs = _user_attrs.get(study.study_id, [])
system_attrs = _system_attrs.get(study.study_id, [])
frozen_studies.append(
FrozenStudy(
study_name=study.study_name,
direction=None,
directions=directions,
user_attrs={i.key: json.loads(i.value_json) for i in user_attrs},
system_attrs={i.key: json.loads(i.value_json) for i in system_attrs},
study_id=study.study_id,
)
)
return frozen_studies
def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int:
return self._create_new_trial(study_id, template_trial)._trial_id
def _create_new_trial(
self, study_id: int, template_trial: FrozenTrial | None = None
) -> FrozenTrial:
"""Create a new trial and returns a :class:`~optuna.trial.FrozenTrial`.
Args:
study_id:
Study id.
template_trial:
A :class:`~optuna.trial.FrozenTrial` with default values for trial attributes.
Returns:
A :class:`~optuna.trial.FrozenTrial` instance.
"""
def _create_frozen_trial(
trial: "models.TrialModel", template_trial: FrozenTrial | None
) -> FrozenTrial:
if template_trial:
frozen = copy.deepcopy(template_trial)
frozen.number = trial.number
frozen.datetime_start = trial.datetime_start
frozen._trial_id = trial.trial_id
return frozen
return FrozenTrial(
number=trial.number,
state=trial.state,
value=None,
values=None,
datetime_start=trial.datetime_start,
datetime_complete=None,
params={},
distributions={},
user_attrs={},
system_attrs={},
intermediate_values={},
trial_id=trial.trial_id,
)
# Retry maximum five times. Deadlocks may occur in distributed environments.
MAX_RETRIES = 5
for n_retries in range(1, MAX_RETRIES + 1):
try:
with _create_scoped_session(self.scoped_session) as session:
# This lock is necessary because the trial creation is not an atomic operation
# and the calculation of trial.number is prone to race conditions.
models.StudyModel.find_or_raise_by_id(study_id, session, for_update=True)
trial = self._get_prepared_new_trial(study_id, template_trial, session)
return _create_frozen_trial(trial, template_trial)
# sqlalchemy_exc.OperationalError is converted to ``StorageInternalError``.
except optuna.exceptions.StorageInternalError as e:
# ``OperationalError`` happens either by (1) invalid inputs, e.g., too long string,
# or (2) timeout error, which relates to deadlock. Although Error (1) is not
# intended to be caught here, it must be fixed to use RDBStorage anyways.
if n_retries == MAX_RETRIES:
raise e
# Optuna defers to the DB administrator to reduce DB server congestion, hence
# Optuna simply uses non-exponential backoff here for retries caused by deadlock.
time.sleep(random.random() * 2.0)
assert False, "Should not be reached."
def _get_prepared_new_trial(
self,
study_id: int,
template_trial: FrozenTrial | None,
session: "sqlalchemy_orm.Session",
) -> "models.TrialModel":
if template_trial is None:
trial = models.TrialModel(
study_id=study_id,
number=None,
state=TrialState.RUNNING,
datetime_start=datetime.now(),
)
else:
# Because only `RUNNING` trials can be updated,
# we temporarily set the state of the new trial to `RUNNING`.
# After all fields of the trial have been updated,
# the state is set to `template_trial.state`.
temp_state = TrialState.RUNNING
trial = models.TrialModel(
study_id=study_id,
number=None,
state=temp_state,
datetime_start=template_trial.datetime_start,
datetime_complete=template_trial.datetime_complete,
)
session.add(trial)
# Flush the session cache to reflect the above addition operation to
# the current RDB transaction.
#
# Without flushing, the following operations (e.g, `_set_trial_param_without_commit`)
# will fail because the target trial doesn't exist in the storage yet.
session.flush()
if template_trial is not None:
if template_trial.values is not None and len(template_trial.values) > 1:
for objective, value in enumerate(template_trial.values):
self._set_trial_value_without_commit(session, trial, objective, value)
elif template_trial.value is not None:
self._set_trial_value_without_commit(session, trial, 0, template_trial.value)
for param_name, param_value in template_trial.params.items():
distribution = template_trial.distributions[param_name]
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
self._set_trial_param_without_commit(
session, trial.trial_id, param_name, param_value_in_internal_repr, distribution
)
for key, value in template_trial.user_attrs.items():
self._set_trial_attr_without_commit(
session, models.TrialUserAttributeModel, trial.trial_id, key, value
)
for key, value in template_trial.system_attrs.items():
self._set_trial_attr_without_commit(
session, models.TrialSystemAttributeModel, trial.trial_id, key, value
)
for step, intermediate_value in template_trial.intermediate_values.items():
self._set_trial_intermediate_value_without_commit(
session, trial.trial_id, step, intermediate_value
)
trial.state = template_trial.state
trial.number = trial.count_past_trials(session)
session.add(trial)
return trial
def set_trial_param(
self,
trial_id: int,
param_name: str,
param_value_internal: float,
distribution: distributions.BaseDistribution,
) -> None:
self._set_trial_param(trial_id, param_name, param_value_internal, distribution, None)
def _set_trial_param(
self,
trial_id: int,
param_name: str,
param_value_internal: float,
distribution: distributions.BaseDistribution,
previous_distribution: distributions.BaseDistribution | None,
) -> None:
with _create_scoped_session(self.scoped_session, True) as session:
self._set_trial_param_without_commit(
session,
trial_id,
param_name,
param_value_internal,
distribution,
previous_distribution,
)
def _set_trial_param_without_commit(
self,
session: "sqlalchemy_orm.Session",
trial_id: int,
param_name: str,
param_value_internal: float,
distribution: distributions.BaseDistribution,
previous_distribution: distributions.BaseDistribution | None = None,
) -> None:
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
self.check_trial_is_updatable(trial_id, trial.state)
if previous_distribution is None:
previous_record = (
session.query(models.TrialParamModel)
.join(models.TrialModel)
.filter(models.TrialModel.study_id == trial.study_id)
.filter(models.TrialParamModel.param_name == param_name)
.first()
)
if previous_record is not None:
previous_distribution = distributions.json_to_distribution(
previous_record.distribution_json
)
if previous_distribution is not None:
distributions.check_distribution_compatibility(previous_distribution, distribution)
trial_param = models.TrialParamModel(
trial_id=trial_id,
param_name=param_name,
param_value=param_value_internal,
distribution_json=distributions.distribution_to_json(distribution),
)
session.add(trial_param)
def get_trial_param(self, trial_id: int, param_name: str) -> float:
with _create_scoped_session(self.scoped_session) as session:
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
trial_param = models.TrialParamModel.find_or_raise_by_trial_and_param_name(
trial, param_name, session
)
param_value = trial_param.param_value
return param_value
def set_trial_state_values(
self, trial_id: int, state: TrialState, values: Sequence[float] | None = None
) -> bool:
try:
with _create_scoped_session(self.scoped_session) as session:
trial = models.TrialModel.find_or_raise_by_id(trial_id, session, for_update=True)
self.check_trial_is_updatable(trial_id, trial.state)
if values is not None:
for objective, v in enumerate(values):
self._set_trial_value_without_commit(session, trial, objective, v)
if state == TrialState.RUNNING and trial.state != TrialState.WAITING:
return False
trial.state = state
if state == TrialState.RUNNING:
trial.datetime_start = datetime.now()
if state.is_finished():
trial.datetime_complete = datetime.now()
except sqlalchemy_exc.IntegrityError:
return False
return True
def _set_trial_value_without_commit(
self,
session: "sqlalchemy_orm.Session",
trial: models.TrialModel,
objective: int,
value: float,
) -> None:
self.check_trial_is_updatable(trial.trial_id, trial.state)
stored_value, value_type = models.TrialValueModel.value_to_stored_repr(value)
trial_value = models.TrialValueModel.find_by_trial_and_objective(trial, objective, session)
if trial_value is None:
trial_value = models.TrialValueModel(
trial_id=trial.trial_id,
objective=objective,
value=stored_value,
value_type=value_type,
)
session.add(trial_value)
else:
trial_value.value = stored_value
trial_value.value_type = value_type
def set_trial_intermediate_value(
self, trial_id: int, step: int, intermediate_value: float
) -> None:
with _create_scoped_session(self.scoped_session, True) as session:
self._set_trial_intermediate_value_without_commit(
session, trial_id, step, intermediate_value
)
def _set_trial_intermediate_value_without_commit(
self,
session: "sqlalchemy_orm.Session",
trial_id: int,
step: int,
intermediate_value: float,
) -> None:
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
self.check_trial_is_updatable(trial_id, trial.state)
(
stored_value,
value_type,
) = models.TrialIntermediateValueModel.intermediate_value_to_stored_repr(
intermediate_value
)
trial_intermediate_value = models.TrialIntermediateValueModel.find_by_trial_and_step(
trial, step, session
)
if trial_intermediate_value is None:
trial_intermediate_value = models.TrialIntermediateValueModel(
trial_id=trial_id,
step=step,
intermediate_value=stored_value,
intermediate_value_type=value_type,
)
session.add(trial_intermediate_value)
else:
trial_intermediate_value.intermediate_value = stored_value
trial_intermediate_value.intermediate_value_type = value_type
def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None:
with _create_scoped_session(self.scoped_session, True) as session:
self._set_trial_attr_without_commit(
session,
models.TrialUserAttributeModel,
trial_id,
key,
value,
)
def set_trial_system_attr(self, trial_id: int, key: str, value: JSONSerializable) -> None:
with _create_scoped_session(self.scoped_session, True) as session:
self._set_trial_attr_without_commit(
session,
models.TrialSystemAttributeModel,
trial_id,
key,
value,
)
def _set_trial_attr_without_commit(
self,
session: "sqlalchemy_orm.Session",
model_cls: type[models.TrialUserAttributeModel | models.TrialSystemAttributeModel],
trial_id: int,
key: str,
value: Any,
) -> None:
trial = models.TrialModel.find_or_raise_by_id(trial_id, session)
self.check_trial_is_updatable(trial_id, trial.state)
if self.engine.name == "mysql":
mysql_insert_stmt = sqlalchemy_dialects_mysql.insert(model_cls).values(
trial_id=trial_id, key=key, value_json=json.dumps(value)
)
mysql_upsert_stmt = mysql_insert_stmt.on_duplicate_key_update(
value_json=mysql_insert_stmt.inserted.value_json
)
session.execute(mysql_upsert_stmt)
elif self.engine.name == "sqlite" and sqlite3.sqlite_version_info >= (3, 24, 0):
sqlite_insert_stmt = sqlalchemy_dialects_sqlite.insert(model_cls).values(
trial_id=trial_id, key=key, value_json=json.dumps(value)
)
sqlite_upsert_stmt = sqlite_insert_stmt.on_conflict_do_update(
index_elements=[model_cls.trial_id, model_cls.key],
set_=dict(value_json=sqlite_insert_stmt.excluded.value_json),
)
session.execute(sqlite_upsert_stmt)
elif self.engine.name == "postgresql":
pg_insert_stmt = sqlalchemy_dialects_postgresql.insert(model_cls).values(
trial_id=trial_id, key=key, value_json=json.dumps(value)
)
pg_upsert_stmt = pg_insert_stmt.on_conflict_do_update(
index_elements=[model_cls.trial_id, model_cls.key],
set_=dict(value_json=pg_insert_stmt.excluded.value_json),
)
session.execute(pg_upsert_stmt)
else:
# TODO(porink0424): Add support for other databases.
attribute = model_cls.find_by_trial_and_key(trial, key, session)
if attribute is None:
attribute = model_cls(trial_id=trial_id, key=key, value_json=json.dumps(value))
session.add(attribute)
else:
attribute.value_json = json.dumps(value)
def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int:
with _create_scoped_session(self.scoped_session) as session:
trial_id = (
session.query(models.TrialModel.trial_id)
.filter(
models.TrialModel.number == trial_number,
models.TrialModel.study_id == study_id,
)
.one_or_none()
)
if trial_id is None:
raise KeyError(
f"No trial with trial number {trial_number} "
f"exists in study with study_id {study_id}."
)
return trial_id[0]
def get_trial(self, trial_id: int) -> FrozenTrial:
with _create_scoped_session(self.scoped_session) as session:
trial_model = models.TrialModel.find_or_raise_by_id(trial_id, session)
frozen_trial = self._build_frozen_trial_from_trial_model(trial_model)
return frozen_trial
def get_all_trials(
self,
study_id: int,
deepcopy: bool = True,
states: Container[TrialState] | None = None,
) -> list[FrozenTrial]:
trials = self._get_trials(study_id, states, set(), -1)
return copy.deepcopy(trials) if deepcopy else trials
def _get_trials(
self,
study_id: int,
states: Container[TrialState] | None,
included_trial_ids: set[int],
trial_id_greater_than: int,
) -> list[FrozenTrial]:
included_trial_ids = set(
trial_id for trial_id in included_trial_ids if trial_id <= trial_id_greater_than
)
with _create_scoped_session(self.scoped_session) as session:
# Ensure that the study exists.
models.StudyModel.find_or_raise_by_id(study_id, session)
query = (
session.query(models.TrialModel)
.options(sqlalchemy_orm.selectinload(models.TrialModel.params))
.options(sqlalchemy_orm.selectinload(models.TrialModel.values))
.options(sqlalchemy_orm.selectinload(models.TrialModel.user_attributes))
.options(sqlalchemy_orm.selectinload(models.TrialModel.system_attributes))
.options(sqlalchemy_orm.selectinload(models.TrialModel.intermediate_values))
.filter(
models.TrialModel.study_id == study_id,
)
)
if states is not None:
# This assertion is for type checkers, since `states` is required to be Container
# in the base class while `models.TrialModel.state.in_` requires Iterable.
assert isinstance(states, Iterable)
query = query.filter(models.TrialModel.state.in_(states))
try:
if len(included_trial_ids) > 0 and trial_id_greater_than > -1:
_query = query.filter(
sqlalchemy.or_(
models.TrialModel.trial_id.in_(included_trial_ids),
models.TrialModel.trial_id > trial_id_greater_than,
)
)
elif trial_id_greater_than > -1:
_query = query.filter(models.TrialModel.trial_id > trial_id_greater_than)
else:
_query = query
trial_models = _query.order_by(models.TrialModel.trial_id).all()
except sqlalchemy_exc.OperationalError as e:
# Likely exceeding the number of maximum allowed variables using IN.
# This number differ between database dialects. For SQLite for instance, see
# https://www.sqlite.org/limits.html and the section describing
# SQLITE_MAX_VARIABLE_NUMBER.
_logger.warning(
f"Caught an error from sqlalchemy: {e!s}. "
"Falling back to a slower alternative."
)
trial_models = query.order_by(models.TrialModel.trial_id).all()
trial_models = [
t
for t in trial_models
if t.trial_id in included_trial_ids or t.trial_id > trial_id_greater_than
]
trials = [self._build_frozen_trial_from_trial_model(trial) for trial in trial_models]
return trials
def _build_frozen_trial_from_trial_model(self, trial: "models.TrialModel") -> FrozenTrial:
values: list[float] | None
if trial.values:
values = [0 for _ in trial.values]
for value_model in trial.values:
values[value_model.objective] = models.TrialValueModel.stored_repr_to_value(
value_model.value, value_model.value_type
)
else:
values = None
params = sorted(trial.params, key=lambda p: p.param_id)
return FrozenTrial(
number=trial.number,
state=trial.state,
value=None,
values=values,
datetime_start=trial.datetime_start,
datetime_complete=trial.datetime_complete,
params={
p.param_name: distributions.json_to_distribution(
p.distribution_json
).to_external_repr(p.param_value)
for p in params
},
distributions={
p.param_name: distributions.json_to_distribution(p.distribution_json)
for p in params
},
user_attrs={attr.key: json.loads(attr.value_json) for attr in trial.user_attributes},
system_attrs={
attr.key: json.loads(attr.value_json) for attr in trial.system_attributes
},
intermediate_values={
v.step: models.TrialIntermediateValueModel.stored_repr_to_intermediate_value(
v.intermediate_value, v.intermediate_value_type
)
for v in trial.intermediate_values
},
trial_id=trial.trial_id,
)
def get_best_trial(self, study_id: int) -> FrozenTrial:
_directions = self.get_study_directions(study_id)
if len(_directions) > 1:
raise RuntimeError(
"Best trial can be obtained only for single-objective optimization."
)
direction = _directions[0]
trial_id = self._get_best_trial_id(study_id, direction)
return self.get_trial(trial_id)
def _get_best_trial_id(self, study_id: int, direction: StudyDirection) -> int:
with _create_scoped_session(self.scoped_session) as session:
if direction == StudyDirection.MAXIMIZE:
return models.TrialModel.find_max_value_trial_id(study_id, 0, session)
else:
return models.TrialModel.find_min_value_trial_id(study_id, 0, session)
@staticmethod
def _set_default_engine_kwargs_for_mysql(url: str, engine_kwargs: dict[str, Any]) -> None:
# Skip if RDB is not MySQL.
if not url.startswith("mysql"):
return
# Do not overwrite value.
if "pool_pre_ping" in engine_kwargs:
return
# If True, the connection pool checks liveness of connections at every checkout.
# Without this option, trials that take longer than `wait_timeout` may cause connection
# errors. For further details, please refer to the following document:
# https://docs.sqlalchemy.org/en/13/core/pooling.html#pool-disconnects-pessimistic
engine_kwargs["pool_pre_ping"] = True
_logger.debug("pool_pre_ping=True was set to engine_kwargs to prevent connection timeout.")
@staticmethod
def _fill_storage_url_template(template: str) -> str:
return template.format(SCHEMA_VERSION=models.SCHEMA_VERSION)
def remove_session(self) -> None:
"""Removes the current session.
A session is stored in SQLAlchemy's ThreadLocalRegistry for each thread. This method
closes and removes the session which is associated to the current thread. Particularly,
under multi-thread use cases, it is important to call this method *from each thread*.
Otherwise, all sessions and their associated DB connections are destructed by a thread
that occasionally invoked the garbage collector. By default, it is not allowed to touch
a SQLite connection from threads other than the thread that created the connection.
Therefore, we need to explicitly close the connection from each thread.
"""
self.scoped_session.remove()
def upgrade(self) -> None:
"""Upgrade the storage schema."""
self._version_manager.upgrade()
def get_current_version(self) -> str:
"""Return the schema version currently used by this storage."""
return self._version_manager.get_current_version()
def get_head_version(self) -> str:
"""Return the latest schema version."""
return self._version_manager.get_head_version()
def get_all_versions(self) -> list[str]:
"""Return the schema version list."""
return self._version_manager.get_all_versions()
def record_heartbeat(self, trial_id: int) -> None:
with _create_scoped_session(self.scoped_session, True) as session:
# Fetch heartbeat with read-only.
heartbeat = models.TrialHeartbeatModel.where_trial_id(trial_id, session)
if heartbeat is None: # heartbeat record does not exist.
heartbeat = models.TrialHeartbeatModel(trial_id=trial_id)
session.add(heartbeat)
else:
# Re-fetch the existing heartbeat with the write authorization.
heartbeat = models.TrialHeartbeatModel.where_trial_id(trial_id, session, True)
assert heartbeat is not None
heartbeat.heartbeat = session.execute(sqlalchemy.func.now()).scalar()
def _get_stale_trial_ids(self, study_id: int) -> list[int]:
assert self.heartbeat_interval is not None
if self.grace_period is None:
grace_period = 2 * self.heartbeat_interval
else:
grace_period = self.grace_period
stale_trial_ids = []
with _create_scoped_session(self.scoped_session, True) as session:
current_heartbeat = session.execute(sqlalchemy.func.now()).scalar()
assert current_heartbeat is not None
# Added the following line to prevent mixing of timezone-aware and timezone-naive
# `datetime` in PostgreSQL. See
# https://github.com/optuna/optuna/pull/2190#issuecomment-766605088 for details
current_heartbeat = current_heartbeat.replace(tzinfo=None)
running_trials = (
session.query(models.TrialModel)
.options(sqlalchemy_orm.selectinload(models.TrialModel.heartbeats))
.filter(models.TrialModel.state == TrialState.RUNNING)
.filter(models.TrialModel.study_id == study_id)
.all()
)
for trial in running_trials:
if len(trial.heartbeats) == 0:
continue
assert len(trial.heartbeats) == 1
heartbeat = trial.heartbeats[0].heartbeat
if current_heartbeat - heartbeat > timedelta(seconds=grace_period):
stale_trial_ids.append(trial.trial_id)
return stale_trial_ids
def get_heartbeat_interval(self) -> int | None:
return self.heartbeat_interval
def get_failed_trial_callback(
self,
) -> Callable[["optuna.study.Study", FrozenTrial], None] | None:
return self.failed_trial_callback
|
RDBStorage
|
python
|
apache__airflow
|
airflow-core/src/airflow/callbacks/callback_requests.py
|
{
"start": 3531,
"end": 4142
}
|
class ____(BaseCallbackRequest):
"""A Class with information about the success/failure DAG callback to be executed."""
dag_id: str
run_id: str
context_from_server: DagRunContext | None = None
is_failure_callback: bool | None = True
"""Flag to determine whether it is a Failure Callback or Success Callback"""
type: Literal["DagCallbackRequest"] = "DagCallbackRequest"
CallbackRequest = Annotated[
DagCallbackRequest | TaskCallbackRequest | EmailRequest,
Field(discriminator="type"),
]
# Backwards compatibility alias
EmailNotificationRequest = EmailRequest
|
DagCallbackRequest
|
python
|
allegroai__clearml
|
clearml/backend_config/bucket_config.py
|
{
"start": 3945,
"end": 9909
}
|
class ____(BaseBucketConfigurations):
def __init__(
self,
buckets: Optional[List[S3BucketConfig]] = None,
default_key: str = "",
default_secret: str = "",
default_region: str = "",
default_use_credentials_chain: bool = False,
default_token: str = "",
default_extra_args: Optional[dict] = None,
default_verify: Optional[bool] = None,
default_profile: str = "",
default_secure: bool = True,
) -> None:
super(S3BucketConfigurations, self).__init__()
self._buckets = buckets if buckets else list()
self._default_key = default_key
self._default_secret = default_secret
self._default_token = default_token
self._default_region = default_region
self._default_multipart = True
self._default_use_credentials_chain = default_use_credentials_chain
self._default_extra_args = default_extra_args
self._default_verify = default_verify
self._default_profile = default_profile
self._default_secure = default_secure
@classmethod
def from_config(cls, s3_configuration: dict) -> "S3BucketConfigurations":
config_list = S3BucketConfig.from_list(s3_configuration.get("credentials", []))
default_key = s3_configuration.get("key", "") or getenv("AWS_ACCESS_KEY_ID", "")
default_secret = s3_configuration.get("secret", "") or getenv("AWS_SECRET_ACCESS_KEY", "")
default_token = s3_configuration.get("token", "") or getenv("AWS_SESSION_TOKEN", "")
default_region = s3_configuration.get("region", "") or getenv("AWS_DEFAULT_REGION", "")
default_use_credentials_chain = s3_configuration.get("use_credentials_chain") or False
default_extra_args = s3_configuration.get("extra_args")
default_verify = s3_configuration.get("verify", None)
default_profile = s3_configuration.get("profile", "") or getenv("AWS_PROFILE", "")
default_secure = s3_configuration.get("secure", True)
default_key = _none_to_empty_string(default_key).strip()
default_secret = _none_to_empty_string(default_secret).strip()
default_token = _none_to_empty_string(default_token).strip()
default_region = _none_to_empty_string(default_region).strip()
default_profile = _none_to_empty_string(default_profile).strip()
return cls(
config_list,
default_key,
default_secret,
default_region,
default_use_credentials_chain,
default_token,
default_extra_args,
default_verify,
default_profile,
default_secure,
)
def add_config(self, bucket_config: S3BucketConfig) -> None:
self._buckets.insert(0, bucket_config)
self._prefixes = None
def remove_config(self, bucket_config: S3BucketConfig) -> None:
self._buckets.remove(bucket_config)
self._prefixes = None
def get_config_by_bucket(self, bucket: str, host: str = None) -> S3BucketConfig:
try:
return next(
bucket_config for bucket_config in self._buckets if (bucket, host) == bucket_config.get_bucket_host()
)
except StopIteration:
pass
return None
def update_config_with_defaults(self, bucket_config: S3BucketConfig) -> None:
bucket_config.update(
key=self._default_key,
secret=self._default_secret,
region=bucket_config.region or self._default_region,
multipart=bucket_config.multipart or self._default_multipart,
use_credentials_chain=self._default_use_credentials_chain,
token=self._default_token,
extra_args=self._default_extra_args,
profile=self._default_profile,
secure=self._default_secure,
)
def _get_prefix_from_bucket_config(self, config: S3BucketConfig) -> str:
scheme = "s3"
prefix = furl.furl()
if config.host:
prefix.set(
scheme=scheme,
netloc=config.host.lower(),
path=config.bucket.lower() if config.bucket else "",
)
else:
prefix.set(scheme=scheme, path=config.bucket.lower())
bucket = prefix.path.segments[0]
prefix.path.segments.pop(0)
prefix.set(netloc=bucket)
return str(prefix)
def get_config_by_uri(self, uri: str) -> S3BucketConfig:
"""
Get the credentials for an AWS S3 bucket from the config
:param uri: URI of bucket, directory or file
:return: S3BucketConfig: bucket config
"""
def find_match(uri: str) -> Optional[S3BucketConfig]:
self._update_prefixes(refresh=False)
uri = uri.lower()
res = (config for config, prefix in self._prefixes if prefix is not None and uri.startswith(prefix))
try:
return next(res)
except StopIteration:
return None
match = find_match(uri)
if match:
return match
parsed = furl.furl(uri)
if parsed.port:
host = parsed.netloc
parts = parsed.path.segments
bucket = parts[0] if parts else None
else:
host = None
bucket = parsed.netloc
return S3BucketConfig(
key=self._default_key,
secret=self._default_secret,
region=self._default_region,
multipart=True,
use_credentials_chain=self._default_use_credentials_chain,
bucket=bucket,
host=host,
token=self._default_token,
extra_args=self._default_extra_args,
profile=self._default_profile,
secure=self._default_secure,
)
BucketConfigurations = S3BucketConfigurations
@attrs
|
S3BucketConfigurations
|
python
|
tornadoweb__tornado
|
tornado/test/routing_test.py
|
{
"start": 2385,
"end": 3368
}
|
class ____(AsyncHTTPTestCase):
def get_app(self):
return HTTPMethodRouter(Application())
def test_http_method_router(self):
response = self.fetch("/post_resource", method="POST", body="data")
self.assertEqual(response.code, 200)
response = self.fetch("/get_resource")
self.assertEqual(response.code, 404)
response = self.fetch("/post_resource")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"data")
def _get_named_handler(handler_name):
class Handler(RequestHandler):
def get(self, *args, **kwargs):
if self.application.settings.get("app_name") is not None:
self.write(self.application.settings["app_name"] + ": ")
self.finish(handler_name + ": " + self.reverse_url(handler_name))
return Handler
FirstHandler = _get_named_handler("first_handler")
SecondHandler = _get_named_handler("second_handler")
|
HTTPMethodRouterTestCase
|
python
|
ray-project__ray
|
python/ray/train/v2/_internal/execution/controller/state.py
|
{
"start": 4403,
"end": 4673
}
|
class ____(TrainControllerState):
def __init__(
self,
training_failed_error: TrainingFailedError,
):
super().__init__(state_type=TrainControllerStateType.RESTARTING)
self.training_failed_error = training_failed_error
|
RestartingState
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_embed_image08.py
|
{
"start": 315,
"end": 903
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("embed_image08.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.embed_image(
0, 0, self.image_dir + "red.png", {"description": "Some alt text"}
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
django-compressor__django-compressor
|
compressor/filters/yui.py
|
{
"start": 548,
"end": 730
}
|
class ____(YUICompressorFilter):
type = "js"
options = (
("binary", settings.COMPRESS_YUI_BINARY),
("args", settings.COMPRESS_YUI_JS_ARGUMENTS),
)
|
YUIJSFilter
|
python
|
cython__cython
|
tests/run/methodmangling_T5.py
|
{
"start": 1246,
"end": 2135
}
|
class ____(CyTest):
"""
>>> cy = CyTestSub()
>>> '_CyTestSub__private' in dir(cy)
True
>>> cy._CyTestSub__private()
9
>>> '_CyTest__private' in dir(cy)
True
>>> cy._CyTest__private()
8
>>> '__private' in dir(cy)
False
>>> '_CyTestSub__x' in dir(cy)
False
>>> '_CyTestSub__y' in dir(cy)
True
>>> '_CyTest__x' in dir(cy)
True
>>> '__x' in dir(cy)
False
"""
__y = 2
def __private(self): return 9
def get(self):
"""
>>> CyTestSub().get()
(1, 2, 2, 9)
"""
return self._CyTest__x, self._CyTestSub__y, self.__y, self.__private()
def get_inner(self):
"""
>>> CyTestSub().get_inner()
(1, 2, 2, 9)
"""
def get(o):
return o._CyTest__x, o._CyTestSub__y, o.__y, o.__private()
return get(self)
|
CyTestSub
|
python
|
doocs__leetcode
|
solution/3100-3199/3179.Find the N-th Value After K Seconds/Solution.py
|
{
"start": 0,
"end": 249
}
|
class ____:
def valueAfterKSeconds(self, n: int, k: int) -> int:
a = [1] * n
mod = 10**9 + 7
for _ in range(k):
for i in range(1, n):
a[i] = (a[i] + a[i - 1]) % mod
return a[n - 1]
|
Solution
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/decorators/bash.py
|
{
"start": 1377,
"end": 4132
}
|
class ____(DecoratedOperator, BashOperator):
"""
Wraps a Python callable and uses the callable return value as the Bash command to be executed.
:param python_callable: A reference to an object that is callable.
:param op_kwargs: A dictionary of keyword arguments that will get unpacked
in your function (templated).
:param op_args: A list of positional arguments that will get unpacked when
calling your callable (templated).
"""
template_fields: Sequence[str] = (*DecoratedOperator.template_fields, *BashOperator.template_fields)
template_fields_renderers: ClassVar[dict[str, str]] = {
**DecoratedOperator.template_fields_renderers,
**BashOperator.template_fields_renderers,
}
custom_operator_name: str = "@task.bash"
overwrite_rtif_after_execution: bool = True
def __init__(
self,
*,
python_callable: Callable,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
**kwargs,
) -> None:
if kwargs.pop("multiple_outputs", None):
warnings.warn(
f"`multiple_outputs=True` is not supported in {self.custom_operator_name} tasks. Ignoring.",
UserWarning,
stacklevel=3,
)
super().__init__(
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
bash_command=SET_DURING_EXECUTION,
multiple_outputs=False,
**kwargs,
)
def execute(self, context: Context) -> Any:
context_merge(context, self.op_kwargs)
kwargs = determine_kwargs(self.python_callable, self.op_args, context)
self.bash_command = self.python_callable(*self.op_args, **kwargs)
if not isinstance(self.bash_command, str) or self.bash_command.strip() == "":
raise TypeError("The returned value from the TaskFlow callable must be a non-empty string.")
self._is_inline_cmd = self._is_inline_command(bash_command=self.bash_command)
context["ti"].render_templates() # type: ignore[attr-defined]
return super().execute(context)
def bash_task(
python_callable: Callable | None = None,
**kwargs,
) -> TaskDecorator:
"""
Wrap a function into a BashOperator.
Accepts kwargs for operator kwargs. Can be reused in a single DAG. This function is only used only used
during type checking or auto-completion.
:param python_callable: Function to decorate.
:meta private:
"""
return task_decorator_factory(
python_callable=python_callable,
decorated_operator_class=_BashDecoratedOperator,
**kwargs,
)
|
_BashDecoratedOperator
|
python
|
doocs__leetcode
|
solution/2000-2099/2053.Kth Distinct String in an Array/Solution.py
|
{
"start": 0,
"end": 248
}
|
class ____:
def kthDistinct(self, arr: List[str], k: int) -> str:
cnt = Counter(arr)
for s in arr:
if cnt[s] == 1:
k -= 1
if k == 0:
return s
return ""
|
Solution
|
python
|
pytorch__pytorch
|
torch/fx/experimental/symbolic_shapes.py
|
{
"start": 103845,
"end": 103906
}
|
class ____(ShapeGuardPythonPrinter):
pass
|
ShapeGuardPrinter
|
python
|
django__django
|
tests/i18n/test_extraction.py
|
{
"start": 37350,
"end": 38435
}
|
class ____(ExtractorTests):
def test_no_wrap_enabled(self):
management.call_command(
"makemessages", locale=[LOCALE], verbosity=0, no_wrap=True
)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE) as fp:
po_contents = fp.read()
self.assertMsgId(
"This literal should also be included wrapped or not wrapped "
"depending on the use of the --no-wrap option.",
po_contents,
)
def test_no_wrap_disabled(self):
management.call_command(
"makemessages", locale=[LOCALE], verbosity=0, no_wrap=False
)
self.assertTrue(os.path.exists(self.PO_FILE))
with open(self.PO_FILE) as fp:
po_contents = fp.read()
self.assertMsgId(
'""\n"This literal should also be included wrapped or not '
'wrapped depending on the "\n"use of the --no-wrap option."',
po_contents,
use_quotes=False,
)
|
NoWrapExtractorTests
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/metaclass9.py
|
{
"start": 431,
"end": 557
}
|
class ____(metaclass=Meta1, param2="", param1=1): ...
# This should generate an error because param1 is the wrong type.
|
Class1_2
|
python
|
python-openxml__python-docx
|
src/docx/oxml/text/parfmt.py
|
{
"start": 1292,
"end": 1523
}
|
class ____(BaseOxmlElement):
"""``<w:jc>`` element, specifying paragraph justification."""
val: WD_ALIGN_PARAGRAPH = RequiredAttribute( # pyright: ignore[reportAssignmentType]
"w:val", WD_ALIGN_PARAGRAPH
)
|
CT_Jc
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-duckdb-pandas/dagster_duckdb_pandas/duckdb_pandas_type_handler.py
|
{
"start": 408,
"end": 6023
}
|
class ____(DbTypeHandler[pd.DataFrame]):
"""Stores and loads Pandas DataFrames in DuckDB.
To use this type handler, return it from the ``type_handlers` method of an I/O manager that inherits from ``DuckDBIOManager``.
Example:
.. code-block:: python
from dagster_duckdb import DuckDBIOManager
from dagster_duckdb_pandas import DuckDBPandasTypeHandler
class MyDuckDBIOManager(DuckDBIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [DuckDBPandasTypeHandler()]
@asset(
key_prefix=["my_schema"] # will be used as the schema in duckdb
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
Definitions(
assets=[my_table],
resources={"io_manager": MyDuckDBIOManager(database="my_db.duckdb")}
)
"""
def handle_output(
self, context: OutputContext, table_slice: TableSlice, obj: pd.DataFrame, connection
):
"""Stores the pandas DataFrame in duckdb."""
connection.execute(
f"create table if not exists {table_slice.schema}.{table_slice.table} as select * from"
" obj;"
)
if not connection.fetchall():
# table was not created, therefore already exists. Insert the data
connection.execute(
f"insert into {table_slice.schema}.{table_slice.table} select * from obj"
)
context.add_output_metadata(
{
# output object may be a slice/partition, so we output different metadata keys based on
# whether this output represents an entire table or just a slice/partition
**(
TableMetadataSet(partition_row_count=obj.shape[0])
if context.has_partition_key
else TableMetadataSet(row_count=obj.shape[0])
),
"dataframe_columns": MetadataValue.table_schema(
TableSchema(
columns=[
TableColumn(name=name, type=str(dtype)) # type: ignore # (bad stubs)
for name, dtype in obj.dtypes.items()
]
)
),
}
)
def load_input(
self, context: InputContext, table_slice: TableSlice, connection
) -> pd.DataFrame:
"""Loads the input as a Pandas DataFrame."""
if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:
return pd.DataFrame()
return connection.execute(DuckDbClient.get_select_statement(table_slice)).fetchdf()
@property
def supported_types(self):
return [pd.DataFrame]
duckdb_pandas_io_manager = build_duckdb_io_manager(
[DuckDBPandasTypeHandler()], default_load_type=pd.DataFrame
)
duckdb_pandas_io_manager.__doc__ = """
An I/O manager definition that reads inputs from and writes Pandas DataFrames to DuckDB. When
using the duckdb_pandas_io_manager, any inputs and outputs without type annotations will be loaded
as Pandas DataFrames.
Returns:
IOManagerDefinition
Examples:
.. code-block:: python
from dagster_duckdb_pandas import duckdb_pandas_io_manager
@asset(
key_prefix=["my_schema"] # will be used as the schema in DuckDB
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
Definitions(
assets=[my_table],
resources={"io_manager": duckdb_pandas_io_manager.configured({"database": "my_db.duckdb"})}
)
You can set a default schema to store the assets using the ``schema`` configuration value of the DuckDB I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
Definitions(
assets=[my_table],
resources={"io_manager": duckdb_pandas_io_manager.configured({"database": "my_db.duckdb", "schema": "my_schema"})}
)
On individual assets, you an also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"] # will be used as the schema in duckdb
)
def my_table() -> pd.DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in duckdb
)
def my_other_table() -> pd.DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pd.DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:
# my_table will just contain the data from column "a"
...
"""
|
DuckDBPandasTypeHandler
|
python
|
pytorch__pytorch
|
test/inductor/test_selective_lowering.py
|
{
"start": 406,
"end": 2918
}
|
class ____(InductorTestCase):
"""
Tests for user-controllable selective lowering using node.meta annotations.
"""
device = GPU_TYPE
def _mark_nodes_for_fallback(
self, gm: torch.fx.GraphModule, predicate: Callable[[torch.fx.Node], bool]
) -> torch.fx.GraphModule:
"""
Helper method to mark nodes with should_fallback metadata based on a predicate.
"""
for node in gm.graph.nodes:
if node.op == "call_function" and predicate(node):
node.meta["should_fallback"] = True
return gm
def test_basic_selective_lowering(self):
"""
Test that nodes marked for fallback use fallback handlers instead of lowerings.
"""
def foo(x, y):
a = x + y # This will be marked for fallback
b = a * 2 # This will use normal lowering
return b
x = torch.randn(10, device=self.device)
y = torch.randn(10, device=self.device)
def custom_backend(gm: torch.fx.GraphModule, example_inputs):
# Mark all add operations for fallback
def should_fallback_add(node: torch.fx.Node) -> bool:
return node.target == torch.ops.aten.add.Tensor
self._mark_nodes_for_fallback(gm, should_fallback_add)
from torch._inductor.compile_fx import compile_fx
return compile_fx(gm, example_inputs)
compiled_fn = torch.compile(foo, backend=custom_backend)
result = compiled_fn(x, y)
expected = foo(x, y)
self.assertTrue(torch.allclose(result, expected))
def test_no_fallback_when_unmarked(self):
"""
Test that operations without fallback annotation use normal lowering.
"""
def foo(x, y):
return x + y
x = torch.randn(10, device=self.device)
y = torch.randn(10, device=self.device)
def custom_backend(gm: torch.fx.GraphModule, example_inputs):
# Don't mark anything - all operations should use normal lowering
from torch._inductor.compile_fx import compile_fx
return compile_fx(gm, example_inputs)
compiled_fn = torch.compile(foo, backend=custom_backend)
result = compiled_fn(x, y)
expected = foo(x, y)
self.assertTrue(torch.allclose(result, expected))
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if HAS_GPU:
run_tests(needs="filelock")
|
SelectiveLoweringTest
|
python
|
numba__numba
|
numba/core/typing/builtins.py
|
{
"start": 14786,
"end": 14853
}
|
class ____(TupleCompare):
pass
@infer_global(operator.lt)
|
TupleLe
|
python
|
sympy__sympy
|
sympy/combinatorics/prufer.py
|
{
"start": 281,
"end": 12061
}
|
class ____(Basic):
"""
The Prufer correspondence is an algorithm that describes the
bijection between labeled trees and the Prufer code. A Prufer
code of a labeled tree is unique up to isomorphism and has
a length of n - 2.
Prufer sequences were first used by Heinz Prufer to give a
proof of Cayley's formula.
References
==========
.. [1] https://mathworld.wolfram.com/LabeledTree.html
"""
_prufer_repr = None
_tree_repr = None
_nodes = None
_rank = None
@property
def prufer_repr(self):
"""Returns Prufer sequence for the Prufer object.
This sequence is found by removing the highest numbered vertex,
recording the node it was attached to, and continuing until only
two vertices remain. The Prufer sequence is the list of recorded nodes.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).prufer_repr
[3, 3, 3, 4]
>>> Prufer([1, 0, 0]).prufer_repr
[1, 0, 0]
See Also
========
to_prufer
"""
if self._prufer_repr is None:
self._prufer_repr = self.to_prufer(self._tree_repr[:], self.nodes)
return self._prufer_repr
@property
def tree_repr(self):
"""Returns the tree representation of the Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).tree_repr
[[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]
>>> Prufer([1, 0, 0]).tree_repr
[[1, 2], [0, 1], [0, 3], [0, 4]]
See Also
========
to_tree
"""
if self._tree_repr is None:
self._tree_repr = self.to_tree(self._prufer_repr[:])
return self._tree_repr
@property
def nodes(self):
"""Returns the number of nodes in the tree.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).nodes
6
>>> Prufer([1, 0, 0]).nodes
5
"""
return self._nodes
@property
def rank(self):
"""Returns the rank of the Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> p = Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]])
>>> p.rank
778
>>> p.next(1).rank
779
>>> p.prev().rank
777
See Also
========
prufer_rank, next, prev, size
"""
if self._rank is None:
self._rank = self.prufer_rank()
return self._rank
@property
def size(self):
"""Return the number of possible trees of this Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([0]*4).size == Prufer([6]*4).size == 1296
True
See Also
========
prufer_rank, rank, next, prev
"""
return self.prev(self.rank).prev().rank + 1
@staticmethod
def to_prufer(tree, n):
"""Return the Prufer sequence for a tree given as a list of edges where
``n`` is the number of nodes in the tree.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_repr
[0, 0]
>>> Prufer.to_prufer([[0, 1], [0, 2], [0, 3]], 4)
[0, 0]
See Also
========
prufer_repr: returns Prufer sequence of a Prufer object.
"""
d = defaultdict(int)
L = []
for edge in tree:
# Increment the value of the corresponding
# node in the degree list as we encounter an
# edge involving it.
d[edge[0]] += 1
d[edge[1]] += 1
for i in range(n - 2):
# find the smallest leaf
for x in range(n):
if d[x] == 1:
break
# find the node it was connected to
y = None
for edge in tree:
if x == edge[0]:
y = edge[1]
elif x == edge[1]:
y = edge[0]
if y is not None:
break
# record and update
L.append(y)
for j in (x, y):
d[j] -= 1
if not d[j]:
d.pop(j)
tree.remove(edge)
return L
@staticmethod
def to_tree(prufer):
"""Return the tree (as a list of edges) of the given Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([0, 2], 4)
>>> a.tree_repr
[[0, 1], [0, 2], [2, 3]]
>>> Prufer.to_tree([0, 2])
[[0, 1], [0, 2], [2, 3]]
References
==========
.. [1] https://hamberg.no/erlend/posts/2010-11-06-prufer-sequence-compact-tree-representation.html
See Also
========
tree_repr: returns tree representation of a Prufer object.
"""
tree = []
last = []
n = len(prufer) + 2
d = defaultdict(lambda: 1)
for p in prufer:
d[p] += 1
for i in prufer:
for j in range(n):
# find the smallest leaf (degree = 1)
if d[j] == 1:
break
# (i, j) is the new edge that we append to the tree
# and remove from the degree dictionary
d[i] -= 1
d[j] -= 1
tree.append(sorted([i, j]))
last = [i for i in range(n) if d[i] == 1] or [0, 1]
tree.append(last)
return tree
@staticmethod
def edges(*runs):
"""Return a list of edges and the number of nodes from the given runs
that connect nodes in an integer-labelled tree.
All node numbers will be shifted so that the minimum node is 0. It is
not a problem if edges are repeated in the runs; only unique edges are
returned. There is no assumption made about what the range of the node
labels should be, but all nodes from the smallest through the largest
must be present.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer.edges([1, 2, 3], [2, 4, 5]) # a T
([[0, 1], [1, 2], [1, 3], [3, 4]], 5)
Duplicate edges are removed:
>>> Prufer.edges([0, 1, 2, 3], [1, 4, 5], [1, 4, 6]) # a K
([[0, 1], [1, 2], [1, 4], [2, 3], [4, 5], [4, 6]], 7)
"""
e = set()
nmin = runs[0][0]
for r in runs:
for i in range(len(r) - 1):
a, b = r[i: i + 2]
if b < a:
a, b = b, a
e.add((a, b))
rv = []
got = set()
nmin = nmax = None
for ei in e:
got.update(ei)
nmin = min(ei[0], nmin) if nmin is not None else ei[0]
nmax = max(ei[1], nmax) if nmax is not None else ei[1]
rv.append(list(ei))
missing = set(range(nmin, nmax + 1)) - got
if missing:
missing = [i + nmin for i in missing]
if len(missing) == 1:
msg = 'Node %s is missing.' % missing.pop()
else:
msg = 'Nodes %s are missing.' % sorted(missing)
raise ValueError(msg)
if nmin != 0:
for i, ei in enumerate(rv):
rv[i] = [n - nmin for n in ei]
nmax -= nmin
return sorted(rv), nmax + 1
def prufer_rank(self):
"""Computes the rank of a Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_rank()
0
See Also
========
rank, next, prev, size
"""
r = 0
p = 1
for i in range(self.nodes - 3, -1, -1):
r += p*self.prufer_repr[i]
p *= self.nodes
return r
@classmethod
def unrank(self, rank, n):
"""Finds the unranked Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer.unrank(0, 4)
Prufer([0, 0])
"""
n, rank = as_int(n), as_int(rank)
L = defaultdict(int)
for i in range(n - 3, -1, -1):
L[i] = rank % n
rank = (rank - L[i])//n
return Prufer([L[i] for i in range(len(L))])
def __new__(cls, *args, **kw_args):
"""The constructor for the Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
A Prufer object can be constructed from a list of edges:
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_repr
[0, 0]
If the number of nodes is given, no checking of the nodes will
be performed; it will be assumed that nodes 0 through n - 1 are
present:
>>> Prufer([[0, 1], [0, 2], [0, 3]], 4)
Prufer([[0, 1], [0, 2], [0, 3]], 4)
A Prufer object can be constructed from a Prufer sequence:
>>> b = Prufer([1, 3])
>>> b.tree_repr
[[0, 1], [1, 3], [2, 3]]
"""
arg0 = Array(args[0]) if args[0] else Tuple()
args = (arg0,) + tuple(_sympify(arg) for arg in args[1:])
ret_obj = Basic.__new__(cls, *args, **kw_args)
args = [list(args[0])]
if args[0] and iterable(args[0][0]):
if not args[0][0]:
raise ValueError(
'Prufer expects at least one edge in the tree.')
if len(args) > 1:
nnodes = args[1]
else:
nodes = set(flatten(args[0]))
nnodes = max(nodes) + 1
if nnodes != len(nodes):
missing = set(range(nnodes)) - nodes
if len(missing) == 1:
msg = 'Node %s is missing.' % missing.pop()
else:
msg = 'Nodes %s are missing.' % sorted(missing)
raise ValueError(msg)
ret_obj._tree_repr = [list(i) for i in args[0]]
ret_obj._nodes = nnodes
else:
ret_obj._prufer_repr = args[0]
ret_obj._nodes = len(ret_obj._prufer_repr) + 2
return ret_obj
def next(self, delta=1):
"""Generates the Prufer sequence that is delta beyond the current one.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> b = a.next(1) # == a.next()
>>> b.tree_repr
[[0, 2], [0, 1], [1, 3]]
>>> b.rank
1
See Also
========
prufer_rank, rank, prev, size
"""
return Prufer.unrank(self.rank + delta, self.nodes)
def prev(self, delta=1):
"""Generates the Prufer sequence that is -delta before the current one.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [1, 2], [2, 3], [1, 4]])
>>> a.rank
36
>>> b = a.prev()
>>> b
Prufer([1, 2, 0])
>>> b.rank
35
See Also
========
prufer_rank, rank, next, size
"""
return Prufer.unrank(self.rank -delta, self.nodes)
|
Prufer
|
python
|
doocs__leetcode
|
solution/0900-0999/0985.Sum of Even Numbers After Queries/Solution.py
|
{
"start": 0,
"end": 408
}
|
class ____:
def sumEvenAfterQueries(
self, nums: List[int], queries: List[List[int]]
) -> List[int]:
s = sum(x for x in nums if x % 2 == 0)
ans = []
for v, i in queries:
if nums[i] % 2 == 0:
s -= nums[i]
nums[i] += v
if nums[i] % 2 == 0:
s += nums[i]
ans.append(s)
return ans
|
Solution
|
python
|
doocs__leetcode
|
solution/2300-2399/2382.Maximum Segment Sum After Removals/Solution.py
|
{
"start": 0,
"end": 759
}
|
class ____:
def maximumSegmentSum(self, nums: List[int], removeQueries: List[int]) -> List[int]:
def find(x):
if p[x] != x:
p[x] = find(p[x])
return p[x]
def merge(a, b):
pa, pb = find(a), find(b)
p[pa] = pb
s[pb] += s[pa]
n = len(nums)
p = list(range(n))
s = [0] * n
ans = [0] * n
mx = 0
for j in range(n - 1, 0, -1):
i = removeQueries[j]
s[i] = nums[i]
if i and s[find(i - 1)]:
merge(i, i - 1)
if i < n - 1 and s[find(i + 1)]:
merge(i, i + 1)
mx = max(mx, s[find(i)])
ans[j - 1] = mx
return ans
|
Solution
|
python
|
pezy__LeetCode
|
099. Same Tree/solution.py
|
{
"start": 129,
"end": 1052
}
|
class ____:
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
if p and q:
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
return p is q
if __name__ == "__main__":
example1 = TreeNode(1)
example1.left = TreeNode(2)
example1.right = TreeNode(3)
assert(Solution().isSameTree(example1, example1) is True)
example2l = TreeNode(1)
example2l.left = TreeNode(2)
example2r = TreeNode(1)
example2r.right = TreeNode(2)
assert(Solution().isSameTree(example2l, example2r) is False)
example3l = TreeNode(1)
example3l.left = TreeNode(2)
example3l.right = TreeNode(1)
example3r = TreeNode(1)
example3r.left = TreeNode(1)
example3r.right = TreeNode(2)
assert(Solution().isSameTree(example3l, example3r) is False)
|
Solution
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.