id
stringlengths 30
32
| content
stringlengths 139
2.8k
|
|---|---|
codereview_new_python_data_9371
|
def stem(self):
def filename(self):
return pathlib.Path(self.root.filename).joinpath(self.at)
- def read_text(self, encoding=None, *args, **kwargs):
- encoding = io.text_encoding(encoding)
- with self.open('r', *args, encoding=encoding, **kwargs) as strm:
return strm.read()
def read_bytes(self):
I'm beginning to wonder now if this transform is even necessary, given that `self.open` already handles the parameter.
def stem(self):
def filename(self):
return pathlib.Path(self.root.filename).joinpath(self.at)
+ def read_text(self, *args, **kwargs):
+ with self.open('r', *args, **kwargs) as strm:
return strm.read()
def read_bytes(self):
|
codereview_new_python_data_9372
|
class SystemRandom(Random):
"""
def random(self):
- """Return the next random floating point number in the range ``0.0 <= X < 1.0``"""
return (int.from_bytes(_urandom(7)) >> 3) * RECIP_BPF
def getrandbits(self, k):
No backticks here. This is a docstring and doesn't use markup.
class SystemRandom(Random):
"""
def random(self):
+ """Get the next random number in the range 0.0 <= X < 1.0."""
return (int.from_bytes(_urandom(7)) >> 3) * RECIP_BPF
def getrandbits(self, k):
|
codereview_new_python_data_9373
|
def handle_alt_loop(self, node: Alt, is_gather: bool, rulename: Optional[str]) -
self.print(
"void **_new_children = PyMem_Realloc(_children, _children_capacity*sizeof(void *));"
)
- self.out_of_memory_return(f"!new_children", cleanup_code="PyMem_Free(_children);")
self.print("_children = _new_children;")
self.print("}")
self.print("_children[_n++] = _res;")
```suggestion
self.out_of_memory_return(f"!_new_children", cleanup_code="PyMem_Free(_children);")
```
def handle_alt_loop(self, node: Alt, is_gather: bool, rulename: Optional[str]) -
self.print(
"void **_new_children = PyMem_Realloc(_children, _children_capacity*sizeof(void *));"
)
+ self.out_of_memory_return(f"!_new_children", cleanup_code="PyMem_Free(_children);")
self.print("_children = _new_children;")
self.print("}")
self.print("_children[_n++] = _res;")
|
codereview_new_python_data_9374
|
def _write_atomic(path, data, mode=0o666):
# Python 3.12a1 3513 (Add CALL_INTRINSIC_1 instruction, removed STOPITERATION_ERROR, PRINT_EXPR, IMPORT_STAR)
# Python 3.12a1 3514 (Remove ASYNC_GEN_WRAP, LIST_TO_TUPLE, and UNARY_POSITIVE)
# Python 3.12a1 3515 (Embed jump mask in COMPARE_OP oparg)
-
# Python 3.12a1 3517 (Change YIELD_VALUE oparg to exception block depth)
# Python 3.13 will start with 3550
I'm guessing you have a 3516 PR open somewhere?
def _write_atomic(path, data, mode=0o666):
# Python 3.12a1 3513 (Add CALL_INTRINSIC_1 instruction, removed STOPITERATION_ERROR, PRINT_EXPR, IMPORT_STAR)
# Python 3.12a1 3514 (Remove ASYNC_GEN_WRAP, LIST_TO_TUPLE, and UNARY_POSITIVE)
# Python 3.12a1 3515 (Embed jump mask in COMPARE_OP oparg)
+# Python 3.12a1 3516 (Add COMAPRE_AND_BRANCH instruction)
# Python 3.12a1 3517 (Change YIELD_VALUE oparg to exception block depth)
# Python 3.13 will start with 3550
|
codereview_new_python_data_9375
|
def _write_atomic(path, data, mode=0o666):
# Python 3.12a1 3513 (Add CALL_INTRINSIC_1 instruction, removed STOPITERATION_ERROR, PRINT_EXPR, IMPORT_STAR)
# Python 3.12a1 3514 (Remove ASYNC_GEN_WRAP, LIST_TO_TUPLE, and UNARY_POSITIVE)
# Python 3.12a1 3515 (Embed jump mask in COMPARE_OP oparg)
-# Python 3.12a1 3516 (Add COMAPRE_AND_BRANCH instruction)
# Python 3.12a1 3517 (Change YIELD_VALUE oparg to exception block depth)
# Python 3.13 will start with 3550
Maybe fix the typo here (COMAPRE -> COMPARE)?
def _write_atomic(path, data, mode=0o666):
# Python 3.12a1 3513 (Add CALL_INTRINSIC_1 instruction, removed STOPITERATION_ERROR, PRINT_EXPR, IMPORT_STAR)
# Python 3.12a1 3514 (Remove ASYNC_GEN_WRAP, LIST_TO_TUPLE, and UNARY_POSITIVE)
# Python 3.12a1 3515 (Embed jump mask in COMPARE_OP oparg)
+# Python 3.12a1 3516 (Add COMPARE_AND_BRANCH instruction)
# Python 3.12a1 3517 (Change YIELD_VALUE oparg to exception block depth)
# Python 3.13 will start with 3550
|
codereview_new_python_data_9376
|
def splitroot(p):
empty; the root may be empty, a single slash, or two slashes. The tail
contains anything after the root. For example:
- splitdrive('foo/bar') == ('', '', 'foo/bar')
- splitdrive('/foo/bar') == ('', '/', 'foo/bar')
"""
p = os.fspath(p)
if isinstance(p, bytes):
```suggestion
splitroot('foo/bar') == ('', '', 'foo/bar')
splitroot('/foo/bar') == ('', '/', 'foo/bar')
```
I'd also add an example with the two-slashes case
def splitroot(p):
empty; the root may be empty, a single slash, or two slashes. The tail
contains anything after the root. For example:
+ splitroot('foo/bar') == ('', '', 'foo/bar')
+ splitroot('/foo/bar') == ('', '/', 'foo/bar')
+ splitroot('//foo/bar') == ('', '//', 'foo/bar')
+ splitroot('///foo/bar') == ('', '/', '//foo/bar')
"""
p = os.fspath(p)
if isinstance(p, bytes):
|
codereview_new_python_data_9377
|
def splitroot(p):
splitroot('//server/share/') == ('//server/share', '/', '')
splitroot('C:/Users/Barney') == ('C:', '/', 'Users/Barney')
- splitroot('Windows') == ('', '', 'Windows')
"""
p = os.fspath(p)
if isinstance(p, bytes):
Perhaps add an example that redundant slashes in tail do not get removed.
def splitroot(p):
splitroot('//server/share/') == ('//server/share', '/', '')
splitroot('C:/Users/Barney') == ('C:', '/', 'Users/Barney')
+ splitroot('C:///spam///ham') == ('C:', '/', '//spam///egg')
+ splitroot('Windows/notepad') == ('', '', 'Windows/notepad')
"""
p = os.fspath(p)
if isinstance(p, bytes):
|
codereview_new_python_data_9378
|
def commonpath(paths):
drivesplits = [splitroot(p.replace(altsep, sep).lower()) for p in paths]
split_paths = [p.split(sep) for d, r, p in drivesplits]
- if len(set(r for d, r, p in drivesplits)) != 1:
raise ValueError("Can't mix absolute and relative paths")
# Check that all drive letters or UNC paths match. The check is made only
# now otherwise type errors for mixing strings and bytes would not be
# caught.
- if len(set(d for d, r, p in drivesplits)) != 1:
raise ValueError("Paths don't have the same drive")
drive, root, path = splitroot(paths[0].replace(altsep, sep))
Since we're touching this line anyway, we may as well modernise the code slightly by using a set comprehension
```suggestion
if len({r for d, r, p in drivesplits}) != 1:
```
def commonpath(paths):
drivesplits = [splitroot(p.replace(altsep, sep).lower()) for p in paths]
split_paths = [p.split(sep) for d, r, p in drivesplits]
+ if len({r for d, r, p in drivesplits}) != 1:
raise ValueError("Can't mix absolute and relative paths")
# Check that all drive letters or UNC paths match. The check is made only
# now otherwise type errors for mixing strings and bytes would not be
# caught.
+ if len({d for d, r, p in drivesplits}) != 1:
raise ValueError("Paths don't have the same drive")
drive, root, path = splitroot(paths[0].replace(altsep, sep))
|
codereview_new_python_data_9383
|
def cache_effect(self) -> CacheEffect | None:
@contextual
def stack_effect(self) -> StackEffect | None:
- # IDENTIFIER [':' IDENTIFIER]
# TODO: Conditions
if tkn := self.expect(lx.IDENTIFIER):
if self.expect(lx.COLON):
Update this comment?
def cache_effect(self) -> CacheEffect | None:
@contextual
def stack_effect(self) -> StackEffect | None:
+ # IDENTIFIER
+ # | IDENTIFIER ':' IDENTIFIER
+ # | IDENTIFIER '[' dimension ']'
# TODO: Conditions
if tkn := self.expect(lx.IDENTIFIER):
if self.expect(lx.COLON):
|
codereview_new_python_data_9384
|
def test_copy(self):
def test_deepcopy(self):
s = slice(1, 10)
c = copy.deepcopy(s)
- self.assertIsNot(s, c)
self.assertEqual(s, c)
s = slice(1, 10, 2)
c = copy.deepcopy(s)
- self.assertIsNot(s, c)
self.assertEqual(s, c)
# Corner case for mutable indices:
Nit: not sure we need this one. What behaviour or principle does this assertion demonstrate?
You could plausibly have an implementation of `deepcopy` where the exact same object was returned iff it was detected that it didn't contain any references to mutable objects, and I think that would be _fine_ — it's basically what we already do for deepcopying tuples: https://github.com/python/cpython/blob/e098137cd3250af05f19380590b8dec79dc5942f/Lib/copy.py#L200. But that alternative implementation would cause this assertion to fail. Which makes it seem to me like it's not a great assertion :)
```suggestion
```
def test_copy(self):
def test_deepcopy(self):
s = slice(1, 10)
c = copy.deepcopy(s)
self.assertEqual(s, c)
s = slice(1, 10, 2)
c = copy.deepcopy(s)
self.assertEqual(s, c)
# Corner case for mutable indices:
|
codereview_new_python_data_9385
|
def _next_external_frame(frame, skip_file_prefixes):
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1, source=None,
- *,
- skip_file_prefixes: tuple[str, ...] = ()):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
If you're going to add type hints (did we decide to start doing this), should we add them to the other arguments?
def _next_external_frame(frame, skip_file_prefixes):
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1, source=None,
+ *, skip_file_prefixes=()):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
|
codereview_new_python_data_9386
|
def _next_external_frame(frame, skip_file_prefixes):
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1, source=None,
- *,
- skip_file_prefixes: tuple[str, ...] = ()):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
The stlidb used to proscribe annotations, now a few functions have them.
Here adding the annotation may help a little but not as much as a full docstring, for example to say whether the «prefixes» are true path prefixes or sub-strings (e.g. does `/usr/lib` match `/usr/lib/spam.py` and `/usr/lib64/spam.py`).
Also, maybe it would be more common to use `None` for default value.
(Very minor note: using a whole line just for `*,` marker is not usual)
def _next_external_frame(frame, skip_file_prefixes):
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1, source=None,
+ *, skip_file_prefixes=()):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
|
codereview_new_python_data_9387
|
def absolute(self):
if self.is_absolute():
return self
elif self._drv and _getfullpathname:
- try:
- cwd = _getfullpathname(self._drv)
- except (ValueError, OSError):
- cwd = os.getcwd()
else:
cwd = os.getcwd()
return self._from_parts([cwd] + self._parts)
```suggestion
# There is a CWD on each drive-letter drive.
cwd = _getfullpathname(self._drv)
```
def absolute(self):
if self.is_absolute():
return self
elif self._drv and _getfullpathname:
+ # There is a CWD on each drive-letter drive.
+ cwd = _getfullpathname(self._drv)
else:
cwd = os.getcwd()
return self._from_parts([cwd] + self._parts)
|
codereview_new_python_data_9388
|
def absolute(self):
if self.is_absolute():
return self
elif self._drv and _getfullpathname:
- try:
- cwd = _getfullpathname(self._drv)
- except (ValueError, OSError):
- cwd = os.getcwd()
else:
cwd = os.getcwd()
return self._from_parts([cwd] + self._parts)
I think the only `ValueError` exceptions raised by `_getfullpathname()` are either from a decoding error of a `bytes` path or from embedded null characters in the path. I think the only `OSError` that can occur is if the path is an empty string or just spaces. None of these should apply to `self._drv` in this case. If they do occur for some reason, do we really want to ignore an invalid drive here?
def absolute(self):
if self.is_absolute():
return self
elif self._drv and _getfullpathname:
+ # There is a CWD on each drive-letter drive.
+ cwd = _getfullpathname(self._drv)
else:
cwd = os.getcwd()
return self._from_parts([cwd] + self._parts)
|
codereview_new_python_data_9389
|
def test_absolute(self):
with os_helper.subst_drive(BASE) as other_drive:
other_cwd = f'{other_drive}\\dirA'
- with os_helper.change_cwd(other_cwd):
- pass
# Relative path on another drive
self.assertEqual(str(P(other_drive).absolute()), other_cwd)
I forgot the suggestion.
```suggestion
# set the working directory on the substitute drive
saved_dir = os.getcwd()
os.chdir(other_cwd)
os.chdir(saved_dir)
```
def test_absolute(self):
with os_helper.subst_drive(BASE) as other_drive:
other_cwd = f'{other_drive}\\dirA'
+ # set the working directory on the substitute drive
+ saved_dir = os.getcwd()
+ os.chdir(other_cwd)
+ os.chdir(saved_dir)
# Relative path on another drive
self.assertEqual(str(P(other_drive).absolute()), other_cwd)
|
codereview_new_python_data_9392
|
def test_field_recursive_repr(self):
rec_field.type = rec_field
rec_field.name = "id"
repr_output = repr(rec_field)
- expected_output = "Field(name='id',type=...," \
- f"default={MISSING!r},default_factory={MISSING!r}," \
- "init=True,repr=True,hash=None," \
- "compare=True,metadata=mappingproxy({})," \
- f"kw_only={MISSING!r}," \
- "_field_type=None)"
- self.assertEqual(repr_output, expected_output)
def test_dataclass_params_repr(self):
# Even though this is testing an internal implementation detail,
I think I'd just test that "type=..." is in the repr, not for the rest of the values. If new ones are added, this test would need to be modified.
Also, I think an additional test for the original problem:
```python
@dataclass
class D:
C: C = field()
```
would be good.
Otherwise looks good!
def test_field_recursive_repr(self):
rec_field.type = rec_field
rec_field.name = "id"
repr_output = repr(rec_field)
+ self.assertIn(",type=...,", repr_output)
+
+ def test_recursive_annotation(self):
+ class C:
+ pass
+
+ @dataclass
+ class D:
+ C: C = field()
+
+ self.assertIn(",type=...,", repr(D.__dataclass_fields__["C"]))
def test_dataclass_params_repr(self):
# Even though this is testing an internal implementation detail,
|
codereview_new_python_data_9393
|
def from_subprocess():
['uname', '-p'],
stderr=subprocess.DEVNULL,
text=True,
- encoding="locale",
).strip()
except (OSError, subprocess.CalledProcessError):
pass
I think `uname` output is not localized.
https://github.com/coreutils/coreutils/blob/8d4768c94d0fa9de545a6e1c370f9a6fae4cb3a7/src/uname.c#L314-L371
So we can use "utf-8" or "latin1" here.
def from_subprocess():
['uname', '-p'],
stderr=subprocess.DEVNULL,
text=True,
+ encoding="utf8",
).strip()
except (OSError, subprocess.CalledProcessError):
pass
|
codereview_new_python_data_9394
|
def from_subprocess():
['uname', '-p'],
stderr=subprocess.DEVNULL,
text=True,
- encoding="locale",
).strip()
except (OSError, subprocess.CalledProcessError):
pass
```suggestion
encoding="utf8",
```
def from_subprocess():
['uname', '-p'],
stderr=subprocess.DEVNULL,
text=True,
+ encoding="utf8",
).strip()
except (OSError, subprocess.CalledProcessError):
pass
|
codereview_new_python_data_9396
|
def Manager(self):
from .managers import SyncManager
ctx = self.get_context()
m = SyncManager(ctx=ctx)
- proc_class = ctx.Process
m.start()
return m
proc_class isn't used?
def Manager(self):
from .managers import SyncManager
ctx = self.get_context()
m = SyncManager(ctx=ctx)
m.start()
return m
|
codereview_new_python_data_9399
|
def get_event_loop(self):
except AttributeError:
pass
else:
while f:
module = f.f_globals.get('__name__')
if not (module == 'asyncio' or module.startswith('asyncio.')):
Can you add a comment explaining why this loop is needed? (IIUC it's so the warning is attached to a line outside asyncio itself?)
def get_event_loop(self):
except AttributeError:
pass
else:
+ # Move up the call stack so that the warning is attached
+ # to the line outside asyncio itself.
while f:
module = f.f_globals.get('__name__')
if not (module == 'asyncio' or module.startswith('asyncio.')):
|
codereview_new_python_data_9400
|
def getfqdn(name=''):
hostname from gethostname() is returned.
"""
name = name.strip()
- if not name or name == '0.0.0.0' or name == '::':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
```suggestion
if not name or name in ('0.0.0.0', '::'):
```
def getfqdn(name=''):
hostname from gethostname() is returned.
"""
name = name.strip()
+ if not name or name in ('0.0.0.0', '::'):
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
|
codereview_new_python_data_9401
|
def test_splitdrive(self):
# gh-96290: support partial/invalid UNC drives
tester('ntpath.splitdrive("//")', ("//", "")) # empty server & missing share
tester('ntpath.splitdrive("///")', ("//", "/")) # empty server & empty share
- tester('ntpath.splitdrive("///y")', ("///y", "")) # empty server & valid share
- tester('ntpath.splitdrive("//x")', ("//x", "")) # valid server & missing share
- tester('ntpath.splitdrive("//x/")', ("//x", "/")) # valid server & empty share
def test_split(self):
tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
Maybe change the wording in the comments to "non-empty" instead of "valid".
```suggestion
tester('ntpath.splitdrive("///y")', ("///y", "")) # empty server & non-empty share
tester('ntpath.splitdrive("//x")', ("//x", "")) # non-empty server & missing share
tester('ntpath.splitdrive("//x/")', ("//x", "/")) # non-empty server & empty share
```
def test_splitdrive(self):
# gh-96290: support partial/invalid UNC drives
tester('ntpath.splitdrive("//")', ("//", "")) # empty server & missing share
tester('ntpath.splitdrive("///")', ("//", "/")) # empty server & empty share
+ tester('ntpath.splitdrive("///y")', ("///y", "")) # empty server & non-empty share
+ tester('ntpath.splitdrive("//x")', ("//x", "")) # non-empty server & missing share
+ tester('ntpath.splitdrive("//x/")', ("//x", "/")) # non-empty server & empty share
def test_split(self):
tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
|
codereview_new_python_data_9402
|
def test_normpath(self):
tester("ntpath.normpath('//server/share/../..')", '\\\\server\\share\\')
tester("ntpath.normpath('//server/share/../../')", '\\\\server\\share\\')
- # gh-96290: don't normalize partial/invalid UNC drives
tester("ntpath.normpath('\\\\foo\\bar')", '\\\\foo\\bar')
tester("ntpath.normpath('\\\\foo\\')", '\\\\foo\\')
tester("ntpath.normpath('\\\\foo')", '\\\\foo')
An invalid UNC drive part is skipped instead of normalizing it as a rooted path, but it's still normalized to replace forward slashes with backslashes.
```suggestion
# gh-96290: don't normalize partial/invalid UNC drives as rooted paths
```
def test_normpath(self):
tester("ntpath.normpath('//server/share/../..')", '\\\\server\\share\\')
tester("ntpath.normpath('//server/share/../../')", '\\\\server\\share\\')
+ # gh-96290: don't normalize partial/invalid UNC drives as rooted paths
tester("ntpath.normpath('\\\\foo\\bar')", '\\\\foo\\bar')
tester("ntpath.normpath('\\\\foo\\')", '\\\\foo\\')
tester("ntpath.normpath('\\\\foo')", '\\\\foo')
|
codereview_new_python_data_9403
|
def current_task(loop=None):
loop = events.get_running_loop()
try:
return _current_tasks[loop]
- except:
return None
It _shouldn't_ matter, but I think preferably we would not blanket silence all exceptions here.
```suggestion
except KeyError:
```
def current_task(loop=None):
loop = events.get_running_loop()
try:
return _current_tasks[loop]
+ except KeyError:
return None
|
codereview_new_python_data_9405
|
def test_default(self):
self.dumps(repr(type)))
def test_ordereddict(self):
- od = collections.OrderedDict(a=1, b=2)
- od.move_to_end('a')
self.assertEqual(
self.dumps(od),
- '{"b": 2, "a": 1}')
self.assertEqual(
self.dumps(od, sort_keys=True),
- '{"a": 1, "b": 2}')
class TestPyDefault(TestDefault, PyTest): pass
I suggest adding a few more pairs to this to make this a more convincing test.
def test_default(self):
self.dumps(repr(type)))
def test_ordereddict(self):
+ od = collections.OrderedDict(a=1, b=2, c=3, d=4)
+ od.move_to_end('b')
self.assertEqual(
self.dumps(od),
+ '{"a": 1, "c": 3, "d": 4, "b": 2}')
self.assertEqual(
self.dumps(od, sort_keys=True),
+ '{"a": 1, "b": 2, "c": 3, "d": 4}')
class TestPyDefault(TestDefault, PyTest): pass
|
codereview_new_python_data_9406
|
def test_create_server_trsock(self):
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
dup = sock.dup()
I would add a comment here that `sock` is an `asyncio.trsock.TransportSocket` instance -- this was the root of my confusion.
def test_create_server_trsock(self):
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
+ self.assertIsInstance(sock, asyncio.trsock.TransportSocket)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
dup = sock.dup()
|
codereview_new_python_data_9407
|
def dummy():
frame = _testcapi.frame_new(dummy.__code__, globals(), locals())
# The following line should not cause a segmentation fault.
- self.assertEqual(frame.f_back, None)
if __name__ == "__main__":
unittest.main()
```suggestion
self.assertIsNone(frame.f_back)
```
def dummy():
frame = _testcapi.frame_new(dummy.__code__, globals(), locals())
# The following line should not cause a segmentation fault.
+ self.assertIsNone(frame.f_back)
if __name__ == "__main__":
unittest.main()
|
codereview_new_python_data_9408
|
async def get_command_stdout(cmd, *args):
async def main():
outputs = [f'foo{i}' for i in range(10)]
res = await asyncio.gather(*[get_command_stdout(sys.executable, '-c',
- f'import sys; print({out!r})') for out in outputs])
self.assertEqual(res, outputs)
self.loop.run_until_complete(main())
```suggestion
f'print({out!r})') for out in outputs])
```
To be committed after buildbots are done.
async def get_command_stdout(cmd, *args):
async def main():
outputs = [f'foo{i}' for i in range(10)]
res = await asyncio.gather(*[get_command_stdout(sys.executable, '-c',
+ f'print({out!r})') for out in outputs])
self.assertEqual(res, outputs)
self.loop.run_until_complete(main())
|
codereview_new_python_data_9419
|
def get_ci_stage(event_name):
elif event_name == PUSH_EVENT_NAME:
return "postsubmit"
elif event_name == SCHEDULE_EVENT_NAME:
- return "schedule"
elif event_name == WORKFLOW_DISPATCH_EVENT_NAME:
return "unknown"
raise ValueError(f"Unrecognized event name '{event_name}'")
Schedule should probably use "postsubmit" as the stage since it runs on already merged code.
```suggestion
return "postsubmit"
```
def get_ci_stage(event_name):
elif event_name == PUSH_EVENT_NAME:
return "postsubmit"
elif event_name == SCHEDULE_EVENT_NAME:
+ return "postsubmit"
elif event_name == WORKFLOW_DISPATCH_EVENT_NAME:
return "unknown"
raise ValueError(f"Unrecognized event name '{event_name}'")
|
codereview_new_python_data_9426
|
def build_run_flags_for_execution_config(
gpu_id: str = "0") -> List[str]:
"""Returns the IREE run module flags of the execution config."""
- run_flags = list(module_execution_config.extra_flags)
- run_flags.append("--device_allocator=caching")
driver = module_execution_config.driver
if driver == RuntimeDriver.CUDA:
run_flags.append(f"--device=cuda://{gpu_id}")
This feels a bit ad-hoc. Basically we're saying that we *always* run with the caching allocator. I don't think that we want to hardcode this. It should be part of the execution configuration.
def build_run_flags_for_execution_config(
gpu_id: str = "0") -> List[str]:
"""Returns the IREE run module flags of the execution config."""
+ run_flags = list(module_execution_config.global_flags)
+ run_flags.extend(module_execution_config.extra_flags)
driver = module_execution_config.driver
if driver == RuntimeDriver.CUDA:
run_flags.append(f"--device=cuda://{gpu_id}")
|
codereview_new_python_data_9427
|
def generate_rules() -> List[str]:
# TODO(#11136): Currently the DRIVER is a separate field in the CMake rule (
# and has effect on test labels). Generates the flags without the driver.
runner_args += run_module_utils.build_run_flags_for_execution_config(
- test_config.execution_config, without_driver=True)
cmake_rule = cmake_builder.rules.build_iree_benchmark_suite_module_test(
target_name=test_config.name,
model=f"{model.id}_{model.name}",
Isn't this just
```suggestion
runner_args += test_config.execution_config.extra_flags
```
def generate_rules() -> List[str]:
# TODO(#11136): Currently the DRIVER is a separate field in the CMake rule (
# and has effect on test labels). Generates the flags without the driver.
runner_args += run_module_utils.build_run_flags_for_execution_config(
+ test_config.execution_config, with_driver=False)
cmake_rule = cmake_builder.rules.build_iree_benchmark_suite_module_test(
target_name=test_config.name,
model=f"{model.id}_{model.name}",
|
codereview_new_python_data_9440
|
def get_test_shapes(shapes_id: ShapesId):
# (see get_test_generators).
]
if shapes_id == ShapesId.GPU_LARGE:
- return [TestShape(m=512, k=128, n=256)]
raise ValueError(shapes_id)
why do we need this change?
def get_test_shapes(shapes_id: ShapesId):
# (see get_test_generators).
]
if shapes_id == ShapesId.GPU_LARGE:
+ return [TestShape(m=256, k=128, n=512)]
raise ValueError(shapes_id)
|
codereview_new_python_data_9442
|
def test_get_previous_comment_on_pr(self):
},
"body": "comment id: 1234"
}]
-
- def _handle_get(endpoint: str, payload: Any):
- if payload["page"] == 1:
- return first_mock_response
- if payload["page"] == 2:
- return second_mock_response
- raise ValueError("Unexpected page")
-
mock_requester = mock.create_autospec(post_benchmark_comment.APIRequester)
- mock_requester.get.side_effect = _handle_get
client = post_benchmark_comment.GithubClient(mock_requester)
comment_id = client.get_previous_comment_on_pr(pr_number=23,
FYI, you can give side_effect an iterable:
```suggestion
requester.get.side_effect = [first_response, second_response]
```
It's a bit less robust because it doesn't actually ensure you're calling with the correct page numbers, but it's much less verbose. Writing good tests is hard...
def test_get_previous_comment_on_pr(self):
},
"body": "comment id: 1234"
}]
mock_requester = mock.create_autospec(post_benchmark_comment.APIRequester)
+ mock_requester.get.side_effect = [first_mock_response, second_mock_response]
client = post_benchmark_comment.GithubClient(mock_requester)
comment_id = client.get_previous_comment_on_pr(pr_number=23,
|
codereview_new_python_data_9443
|
def real_path_or_none(
capture_tmp_dir=per_commit_tmp_dir / CAPTURES_REL_PATH)
if args.e2e_test_artifacts_dir is not None:
- if args.run_config is None:
- raise ValueError(
- "--e2e_test_artifacts_dir only supports using with --run_config.")
-
root_benchmark_dir = args.e2e_test_artifacts_dir
else:
# TODO(#11076): Remove legacy path.
- if args.build_dir is None:
- raise ValueError(
- "Either --e2e_test_artifacts_dir or <build-dir> must be specified.")
-
build_dir = args.build_dir.resolve()
if args.run_config is not None:
root_benchmark_dir = build_dir / E2E_TEST_ARTIFACTS_REL_PATH
Grammar is a bit weird here. Maybe this is what you mean?
```suggestion
"--e2e_test_artifacts_dir requires --run_config.")
```
def real_path_or_none(
capture_tmp_dir=per_commit_tmp_dir / CAPTURES_REL_PATH)
if args.e2e_test_artifacts_dir is not None:
root_benchmark_dir = args.e2e_test_artifacts_dir
else:
# TODO(#11076): Remove legacy path.
build_dir = args.build_dir.resolve()
if args.run_config is not None:
root_benchmark_dir = build_dir / E2E_TEST_ARTIFACTS_REL_PATH
|
codereview_new_python_data_9444
|
def real_path_or_none(
capture_tmp_dir=per_commit_tmp_dir / CAPTURES_REL_PATH)
if args.e2e_test_artifacts_dir is not None:
- if args.run_config is None:
- raise ValueError(
- "--e2e_test_artifacts_dir only supports using with --run_config.")
-
root_benchmark_dir = args.e2e_test_artifacts_dir
else:
# TODO(#11076): Remove legacy path.
- if args.build_dir is None:
- raise ValueError(
- "Either --e2e_test_artifacts_dir or <build-dir> must be specified.")
-
build_dir = args.build_dir.resolve()
if args.run_config is not None:
root_benchmark_dir = build_dir / E2E_TEST_ARTIFACTS_REL_PATH
Don't we also want to check that both aren't specified? Could use a mutually exclusive group for that: https://docs.python.org/3/library/argparse.html#mutual-exclusion
def real_path_or_none(
capture_tmp_dir=per_commit_tmp_dir / CAPTURES_REL_PATH)
if args.e2e_test_artifacts_dir is not None:
root_benchmark_dir = args.e2e_test_artifacts_dir
else:
# TODO(#11076): Remove legacy path.
build_dir = args.build_dir.resolve()
if args.run_config is not None:
root_benchmark_dir = build_dir / E2E_TEST_ARTIFACTS_REL_PATH
|
codereview_new_python_data_9445
|
def real_path_or_none(
capture_tmp_dir=per_commit_tmp_dir / CAPTURES_REL_PATH)
if args.e2e_test_artifacts_dir is not None:
- if args.run_config is None:
- raise ValueError(
- "--e2e_test_artifacts_dir only supports using with --run_config.")
-
root_benchmark_dir = args.e2e_test_artifacts_dir
else:
# TODO(#11076): Remove legacy path.
- if args.build_dir is None:
- raise ValueError(
- "Either --e2e_test_artifacts_dir or <build-dir> must be specified.")
-
build_dir = args.build_dir.resolve()
if args.run_config is not None:
root_benchmark_dir = build_dir / E2E_TEST_ARTIFACTS_REL_PATH
This sort of validation seems like it would be better suited to the argument parser itself
def real_path_or_none(
capture_tmp_dir=per_commit_tmp_dir / CAPTURES_REL_PATH)
if args.e2e_test_artifacts_dir is not None:
root_benchmark_dir = args.e2e_test_artifacts_dir
else:
# TODO(#11076): Remove legacy path.
build_dir = args.build_dir.resolve()
if args.run_config is not None:
root_benchmark_dir = build_dir / E2E_TEST_ARTIFACTS_REL_PATH
|
codereview_new_python_data_9446
|
def main(args: argparse.Namespace):
)
host_environment = host_environments.pop()
- module_dir_paths = sort_and_dedup_paths([
- iree_artifacts.get_module_dir_path(config.module_generation_config)
for config in run_configs
- ])
output_map[device_name] = {
"host_environment": dataclasses.asdict(host_environment),
If you made the function accept a generator, you could avoid even creating this list in the first place.
def main(args: argparse.Namespace):
)
host_environment = host_environments.pop()
+ module_dir_paths = sorted(set(
+ str(iree_artifacts.get_module_dir_path(config.module_generation_config))
for config in run_configs
+ ))
output_map[device_name] = {
"host_environment": dataclasses.asdict(host_environment),
|
codereview_new_python_data_9447
|
def main(args: argparse.Namespace):
)
host_environment = host_environments.pop()
- module_dir_paths = sort_and_dedup_paths([
- iree_artifacts.get_module_dir_path(config.module_generation_config)
for config in run_configs
- ])
output_map[device_name] = {
"host_environment": dataclasses.asdict(host_environment),
Per other comments, this looks a lot simpler to me:
```suggestion
module_dir_paths = sorted(set(
str(iree_artifacts.get_module_dir_path(config.module_generation_config))
for config in run_configs
))
```
def main(args: argparse.Namespace):
)
host_environment = host_environments.pop()
+ module_dir_paths = sorted(set(
+ str(iree_artifacts.get_module_dir_path(config.module_generation_config))
for config in run_configs
+ ))
output_map[device_name] = {
"host_environment": dataclasses.asdict(host_environment),
|
codereview_new_python_data_9448
|
def main(args: argparse.Namespace):
)
host_environment = host_environments.pop()
- module_dir_paths = sort_and_dedup_paths([
- iree_artifacts.get_module_dir_path(config.module_generation_config)
for config in run_configs
- ])
output_map[device_name] = {
"host_environment": dataclasses.asdict(host_environment),
Huh, would be nice if the path was just naturally serializable
def main(args: argparse.Namespace):
)
host_environment = host_environments.pop()
+ module_dir_paths = sorted(set(
+ str(iree_artifacts.get_module_dir_path(config.module_generation_config))
for config in run_configs
+ ))
output_map[device_name] = {
"host_environment": dataclasses.asdict(host_environment),
|
codereview_new_python_data_9450
|
def get_table_title() -> str:
class TotalArtifactSizeToTable(MetricsToTableMapper[CompilationMetrics]):
- """Helper to map CompilationMetrics to total dispatch size column."""
def update_base_value(self, compile_metrics: CompilationMetrics,
base_value: Any) -> CompilationMetrics:
nit: ... to total artifact size column.
def get_table_title() -> str:
class TotalArtifactSizeToTable(MetricsToTableMapper[CompilationMetrics]):
+ """Helper to map CompilationMetrics to total artifact size column."""
def update_base_value(self, compile_metrics: CompilationMetrics,
base_value: Any) -> CompilationMetrics:
|
codereview_new_python_data_9465
|
def get_ci_stage(event_name):
def get_benchmark_presets(trailers: Mapping[str, str]) -> str:
trailer = trailers.get(BENCHMARK_PRESET_KEY)
if trailer is None:
return ""
I think it's worth a comment why the output here is a string
def get_ci_stage(event_name):
def get_benchmark_presets(trailers: Mapping[str, str]) -> str:
+ """Parses the benchmark presets from trailers.
+
+ Args:
+ trailers: trailers from PR description.
+
+ Returns:
+ A comma separated preset string, which later will be parsed by
+ build_tools/benchmarks/export_benchmark_config.py.
+ """
trailer = trailers.get(BENCHMARK_PRESET_KEY)
if trailer is None:
return ""
|
codereview_new_python_data_9469
|
def adb_start_cmd(cmd_args: Sequence[str],
def get_vmfb_full_path_for_benchmark_case(
benchmark_case_dir: pathlib.Path) -> pathlib.Path:
flagfile_path = benchmark_case_dir / MODEL_FLAGFILE_NAME
- with flagfile_path.open("r") as flagfile:
- flagfile_lines = flagfile.readlines()
- for line in flagfile_lines:
flag_name, flag_value = line.strip().split("=")
if flag_name == "--module_file":
# Realpath canonicalization matters. The caller may rely on that to track
Since you're reading all the lines at once anyway, maybe this is simpler?
```suggestion
for line in flagfile_path.read_text().splitlines():
```
(note that this also drops the trailing newline).
Alternatively, if you don't want to bring the whole file into memory at once, you could bring the for loop into the with clause and just iterate over `flagfile`
def adb_start_cmd(cmd_args: Sequence[str],
def get_vmfb_full_path_for_benchmark_case(
benchmark_case_dir: pathlib.Path) -> pathlib.Path:
flagfile_path = benchmark_case_dir / MODEL_FLAGFILE_NAME
+ for line in flagfile_path.read_text().splitlines():
flag_name, flag_value = line.strip().split("=")
if flag_name == "--module_file":
# Realpath canonicalization matters. The caller may rely on that to track
|
codereview_new_python_data_9470
|
def adb_start_cmd(cmd_args: Sequence[str],
def get_vmfb_full_path_for_benchmark_case(
benchmark_case_dir: pathlib.Path) -> pathlib.Path:
flagfile_path = benchmark_case_dir / MODEL_FLAGFILE_NAME
- with flagfile_path.open("r") as flagfile:
- flagfile_lines = flagfile.readlines()
- for line in flagfile_lines:
flag_name, flag_value = line.strip().split("=")
if flag_name == "--module_file":
# Realpath canonicalization matters. The caller may rely on that to track
I think the variable names don't need to be so verbose. It's clear what all these things are here.
```suggestion
flagfile = benchmark_case_dir / MODEL_FLAGFILE_NAME
with flagfile.open("r") as f:
lines = f.readlines()
```
def adb_start_cmd(cmd_args: Sequence[str],
def get_vmfb_full_path_for_benchmark_case(
benchmark_case_dir: pathlib.Path) -> pathlib.Path:
flagfile_path = benchmark_case_dir / MODEL_FLAGFILE_NAME
+ for line in flagfile_path.read_text().splitlines():
flag_name, flag_value = line.strip().split("=")
if flag_name == "--module_file":
# Realpath canonicalization matters. The caller may rely on that to track
|
codereview_new_python_data_9471
|
def __create_bench(dir_path: pathlib.Path, model_name: str,
if len(model_tags) > 0:
model_name_with_tags += f"-{','.join(model_tags)}"
bench_path = dir_path / model_name_with_tags / case_name
- os.makedirs(bench_path)
(bench_path / "tool").write_text(tool)
return BenchmarkCase(model_name=model_name,
```suggestion
bench_path.mkdir(parents=True)
```
def __create_bench(dir_path: pathlib.Path, model_name: str,
if len(model_tags) > 0:
model_name_with_tags += f"-{','.join(model_tags)}"
bench_path = dir_path / model_name_with_tags / case_name
+ bench_path.mkdir(parents=True)
(bench_path / "tool").write_text(tool)
return BenchmarkCase(model_name=model_name,
|
codereview_new_python_data_9479
|
def main(args: argparse.Namespace):
artifacts_root = (
e2e_test_artifacts.artifacts.generate_default_artifacts_root())
- root_path = pathlib.PurePath(f"${{{ROOT_ARTIFACTS_DIR_CMAKE_VARIABLE}}}")
package_name = f"${{{PACKAGE_NAME_CMAKE_VARIABLE}}}"
model_rule_map = model_rule_generator.generate_model_rule_map(
root_path=root_path, artifacts_root=artifacts_root.model_artifacts_root)
output_dir = pathlib.Path(args.output_dir)
fetch_models_cmake_file = output_dir / GENERATED_E2E_TEST_FETCH_MODELS_CMAKE_FILENAMAE
- with fetch_models_cmake_file.open("w") as output_file:
- cmake_rules = itertools.chain.from_iterable(
- rule.cmake_rules for rule in model_rule_map.values())
- output_file.write("\n".join(cmake_rules))
iree_cmake_rules = iree_rule_generator.generate_rules(
package_name=package_name,
root_path=root_path,
artifacts_root=artifacts_root.iree_artifacts_root,
model_rule_map=model_rule_map)
- iree_artifacts_cmake_file = output_dir / GENERATED_E2E_TEST_IREE_ARTIFACTS_CMAKE_FILENAME
- with iree_artifacts_cmake_file.open("w") as output_file:
- output_file.write("\n".join(iree_cmake_rules))
if __name__ == "__main__":
Given the use of curly braces here, maybe it would be better to use something other than f-strings for clarity?
```suggestion
root_path = pathlib.PurePath("${%s}" % ROOT_ARTIFACTS_DIR_CMAKE_VARIABLE)
```
It's inconsistent, but I think the reason for the inconsistency is clear (same way you use single-quotes if you need a double-quote in the string, for instance)
def main(args: argparse.Namespace):
artifacts_root = (
e2e_test_artifacts.artifacts.generate_default_artifacts_root())
+ root_path = pathlib.PurePath("${%s}" % ROOT_ARTIFACTS_DIR_CMAKE_VARIABLE)
package_name = f"${{{PACKAGE_NAME_CMAKE_VARIABLE}}}"
model_rule_map = model_rule_generator.generate_model_rule_map(
root_path=root_path, artifacts_root=artifacts_root.model_artifacts_root)
output_dir = pathlib.Path(args.output_dir)
fetch_models_cmake_file = output_dir / GENERATED_E2E_TEST_FETCH_MODELS_CMAKE_FILENAMAE
+ cmake_rules = itertools.chain.from_iterable(
+ rule.cmake_rules for rule in model_rule_map.values())
+ fetch_models_cmake_file.write_text("\n".join(cmake_rules))
iree_cmake_rules = iree_rule_generator.generate_rules(
package_name=package_name,
root_path=root_path,
artifacts_root=artifacts_root.iree_artifacts_root,
model_rule_map=model_rule_map)
+ (output_dir / GENERATED_E2E_TEST_IREE_ARTIFACTS_CMAKE_FILENAME).write_text(
+ "\n".join(iree_cmake_rules))
if __name__ == "__main__":
|
codereview_new_python_data_9480
|
class BenchmarkCase:
def _find_driver_info_by_execution_config(
module_execution_config: iree_definitions.ModuleExecutionConfig
) -> Optional[DriverInfo]:
- """Finds the matched driver info by the module exeuction config.
Args:
module_execution_config: module execution config to match.
Nit: typo
```suggestion
"""Finds the matched driver info by the module execution config.
```
class BenchmarkCase:
def _find_driver_info_by_execution_config(
module_execution_config: iree_definitions.ModuleExecutionConfig
) -> Optional[DriverInfo]:
+ """Finds the matched driver info by the module execution config.
Args:
module_execution_config: module execution config to match.
|
codereview_new_python_data_9481
|
CUDA_CONFIG = iree_definitions.ModuleExecutionConfig(
id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_CUDA,
tags=["full-inference", "default-flags"],
- loader=iree_definitions.RuntimeLoader.CUDA,
driver=iree_definitions.RuntimeDriver.CUDA)
VULKAN_CONFIG = iree_definitions.ModuleExecutionConfig(
id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_VULKAN,
tags=["full-inference", "default-flags"],
- loader=iree_definitions.RuntimeLoader.VULKAN,
driver=iree_definitions.RuntimeDriver.VULKAN)
VULKAN_BATCH_SIZE_16_CONFIG = iree_definitions.ModuleExecutionConfig(
id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_16,
tags=["full-inference", "experimental-flags"],
- loader=iree_definitions.RuntimeLoader.VULKAN,
driver=iree_definitions.RuntimeDriver.VULKAN,
extra_flags=["--batch_size=16"])
VULKAN_BATCH_SIZE_32_CONFIG = iree_definitions.ModuleExecutionConfig(
id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_32,
tags=["full-inference", "experimental-flags"],
- loader=iree_definitions.RuntimeLoader.VULKAN,
driver=iree_definitions.RuntimeDriver.VULKAN,
extra_flags=["--batch_size=32"])
There may be some terminology confusion here. Only the 'local' (CPU) HAL currently has a concept of "loader": https://github.com/iree-org/iree/tree/main/runtime/src/iree/hal/local/loaders. There isn't an equivalent concept for the other HAL drivers. Maybe these should be `RuntimeLoader.NONE` or `NOT_APPLICABLE`?
https://github.com/iree-org/iree/blob/a09e5644504542578fb79851b243a943af748687/CMakeLists.txt#L126-L159
CUDA_CONFIG = iree_definitions.ModuleExecutionConfig(
id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_CUDA,
tags=["full-inference", "default-flags"],
+ loader=iree_definitions.RuntimeLoader.NONE,
driver=iree_definitions.RuntimeDriver.CUDA)
VULKAN_CONFIG = iree_definitions.ModuleExecutionConfig(
id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_VULKAN,
tags=["full-inference", "default-flags"],
+ loader=iree_definitions.RuntimeLoader.NONE,
driver=iree_definitions.RuntimeDriver.VULKAN)
VULKAN_BATCH_SIZE_16_CONFIG = iree_definitions.ModuleExecutionConfig(
id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_16,
tags=["full-inference", "experimental-flags"],
+ loader=iree_definitions.RuntimeLoader.NONE,
driver=iree_definitions.RuntimeDriver.VULKAN,
extra_flags=["--batch_size=16"])
VULKAN_BATCH_SIZE_32_CONFIG = iree_definitions.ModuleExecutionConfig(
id=unique_ids.IREE_MODULE_EXECUTION_CONFIG_VULKAN_BATCH_SIZE_32,
tags=["full-inference", "experimental-flags"],
+ loader=iree_definitions.RuntimeLoader.NONE,
driver=iree_definitions.RuntimeDriver.VULKAN,
extra_flags=["--batch_size=32"])
|
codereview_new_python_data_9483
|
def main():
mlir = torch_mlir.compile(graph,
train_args,
- output_type=torch_mlir.OutputType.LINALG_ON_TENSORS,
- use_tracing=False)
vmfb = iree_torch.compile_to_vmfb(mlir, args.iree_backend)
with open(args.output_file, "wb") as f:
btw we support multiple exported methods if you want to use that support now: https://github.com/llvm/torch-mlir/pull/1565
def main():
mlir = torch_mlir.compile(graph,
train_args,
+ output_type=torch_mlir.OutputType.LINALG_ON_TENSORS)
vmfb = iree_torch.compile_to_vmfb(mlir, args.iree_backend)
with open(args.output_file, "wb") as f:
|
codereview_new_python_data_9484
|
def main():
mlir = torch_mlir.compile(graph,
train_args,
- output_type=torch_mlir.OutputType.LINALG_ON_TENSORS,
- use_tracing=False)
vmfb = iree_torch.compile_to_vmfb(mlir, args.iree_backend)
with open(args.output_file, "wb") as f:
use_tracing=False is the default value so can be omitted
def main():
mlir = torch_mlir.compile(graph,
train_args,
+ output_type=torch_mlir.OutputType.LINALG_ON_TENSORS)
vmfb = iree_torch.compile_to_vmfb(mlir, args.iree_backend)
with open(args.output_file, "wb") as f:
|
codereview_new_python_data_9487
|
def __build_tool_cmds(self, benchmark_case: BenchmarkCase,
cmds: List[Any] = run_module_utils.build_linux_wrapper_cmds_for_device_spec(
run_config.target_device_spec)
- cmds += [tool_path]
module_path = iree_artifacts.get_module_path(
run_config.module_generation_config,
append?
```suggestion
cmds.append(tool_path)
```
(same below)
def __build_tool_cmds(self, benchmark_case: BenchmarkCase,
cmds: List[Any] = run_module_utils.build_linux_wrapper_cmds_for_device_spec(
run_config.target_device_spec)
+ cmds.append(tool_path)
module_path = iree_artifacts.get_module_path(
run_config.module_generation_config,
|
codereview_new_python_data_9488
|
def build_model_import_rule(
model = imported_model.model
if model.source_type == common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR:
- if pathlib.Path(source_model_rule.file_path) != output_file_path:
raise ValueError("Separate path for Linalg model isn't supported ('" +
source_model_rule.file_path + "' != '" +
str(output_file_path) + "')")
Any reason not to use `pathlib.PurePath` here? (The file path can contain CMake variable name, so they are not real path on the filesystem.)
def build_model_import_rule(
model = imported_model.model
if model.source_type == common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR:
+ if pathlib.PurePath(source_model_rule.file_path) != output_file_path:
raise ValueError("Separate path for Linalg model isn't supported ('" +
source_model_rule.file_path + "' != '" +
str(output_file_path) + "')")
|
codereview_new_python_data_9492
|
def generate_rules(
artifacts_root=artifacts_root.iree_artifacts_root,
model_rule_map=model_rule_map)
- # Currently the rules are simple so the common rules can be always put at the
# top. Need a topological sort once the dependency gets complicated.
return model_cmake_rules + iree_cmake_rules
```suggestion
# Currently the rules are simple so the model rules can be always put at the
```
def generate_rules(
artifacts_root=artifacts_root.iree_artifacts_root,
model_rule_map=model_rule_map)
+ # Currently the rules are simple so the model rules can be always put at the
# top. Need a topological sort once the dependency gets complicated.
return model_cmake_rules + iree_cmake_rules
|
codereview_new_python_data_9496
|
-## copyright 2022 the iree authors
#
-# licensed under the apache license v2.0 with llvm exceptions.
-# see https://llvm.org/license.txt for license information.
-# spdx-license-identifier: apache-2.0 with llvm-exception
"""Defines the collections of device specs and provides query methods."""
from typing import List, Sequence, Set
Something went weird with casing here
+# Copyright 2022 The IREE Authors
#
+# Licensed under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Defines the collections of device specs and provides query methods."""
from typing import List, Sequence, Set
|
codereview_new_python_data_9502
|
def generate(
default_run_configs = cls._generate_default_run_configs()
- # Generate compile specs for mobile models.
- mobile_model_compile_specs = [
iree_definitions.CompileSpec(
compile_config=cls.CASCADELAKE_COMPILE_CONFIG, model=model)
- for model in model_groups.MOBILE
]
- # Generate compile specs for workstation models.
- workstation_model_compile_specs = [
- iree_definitions.CompileSpec(
- compile_config=cls.CASCADELAKE_COMPILE_CONFIG, model=model)
- for model in model_groups.WORKSTATION
- ]
-
- # Generate run specs for all models.
- compile_specs = (mobile_model_compile_specs +
- workstation_model_compile_specs)
run_specs = _generate_run_specs(
compile_specs=compile_specs,
run_configs=default_run_configs,
These comments seem superfluous. Especially the last one, where you're clearly just adding two lists together (same with the "...mobile..." above
```suggestion
workstation_model_compile_specs = [
iree_definitions.CompileSpec(
compile_config=cls.CASCADELAKE_COMPILE_CONFIG, model=model)
for model in model_groups.WORKSTATION
]
compile_specs = (mobile_model_compile_specs +
```
def generate(
default_run_configs = cls._generate_default_run_configs()
+ compile_specs = [
iree_definitions.CompileSpec(
compile_config=cls.CASCADELAKE_COMPILE_CONFIG, model=model)
+ for model in model_groups.SMALL + model_groups.LARGE
]
run_specs = _generate_run_specs(
compile_specs=compile_specs,
run_configs=default_run_configs,
|
codereview_new_python_data_9509
|
def setUp(self):
return
self.workdir = _setup_artifacts_dir("download")
print(f"TMPDIR = {self.workdir}")
- self.tflite_file = '/'.join([self.workdir, 'model.bc'])
- self.tflite_ir = '/'.join([self.workdir, 'tflite.bc'])
- self.iree_ir = '/'.join([self.workdir, 'tosa.bc'])
if os.path.exists(self.model_path):
self.tflite_file = self.model_path
else:
is the canonical mlir extension bc? (I hope not, as that's llvm's extension!)
def setUp(self):
return
self.workdir = _setup_artifacts_dir("download")
print(f"TMPDIR = {self.workdir}")
+ self.tflite_file = '/'.join([self.workdir, 'model.mlirbc'])
+ self.tflite_ir = '/'.join([self.workdir, 'tflite.mlirbc'])
+ self.iree_ir = '/'.join([self.workdir, 'tosa.mlirbc'])
if os.path.exists(self.model_path):
self.tflite_file = self.model_path
else:
|
codereview_new_python_data_9511
|
def main(args):
print(f"Updating {mig.name} to new versions:"
f" {summarize_versions(new_versions)}")
if not args.dry_run:
- migs_client.patch(
- project=args.project,
- region=region,
- instance_group_manager=mig.name,
- instance_group_manager_resource=compute.InstanceGroupManager(
- versions=new_versions, update_policy=update_policy))
print(f"Successfully updated {mig.name}")
Seeing "Successfully updated" in the output could be confusing in dry run mode. Maybe print something different for dry run mode?
def main(args):
print(f"Updating {mig.name} to new versions:"
f" {summarize_versions(new_versions)}")
+ request = compute.PatchRegionInstanceGroupManagerRequest(
+ project=args.project,
+ region=region,
+ instance_group_manager=mig.name,
+ instance_group_manager_resource=compute.InstanceGroupManager(
+ versions=new_versions, update_policy=update_policy))
+
if not args.dry_run:
+ migs_client.patch(request)
+ else:
+ print(f"Dry run, so not sending this patch request:\n```\n{request}```")
print(f"Successfully updated {mig.name}")
|
codereview_new_python_data_9585
|
def get_addons_stats(hass):
@callback
@bind_hass
def get_core_stats(hass):
- """Return Addons stats.
Async friendly.
"""
```suggestion
"""Return core stats.
```
def get_addons_stats(hass):
@callback
@bind_hass
def get_core_stats(hass):
+ """Return core stats.
Async friendly.
"""
|
codereview_new_python_data_9586
|
async def test_restore_state(mock_heat_meter, hass: HomeAssistant) -> None:
assert state.attributes.get(ATTR_STATE_CLASS) is None
-@patch(API_HEAT_METER_SERVICE)
-async def test_exception_during_setup(mock_heat_meter, hass: HomeAssistant) -> None:
- """Test sensor."""
- entry_data = {
- "device": "/dev/USB0",
- "model": "LUGCUH50",
- "device_number": "123456789",
- }
- mock_entry = MockConfigEntry(domain=DOMAIN, unique_id=DOMAIN, data=entry_data)
- mock_heat_meter().read.side_effect = serial.serialutil.SerialException
- mock_heat_meter.reset_mock()
- mock_entry.add_to_hass(hass)
-
- await hass.config_entries.async_setup(mock_entry.entry_id)
- await async_setup_component(hass, HA_DOMAIN, {})
- await hass.async_block_till_done()
-
- mock_heat_meter.assert_called_once()
-
-
@patch(API_HEAT_METER_SERVICE)
async def test_exception_on_polling(mock_heat_meter, hass: HomeAssistant) -> None:
"""Test sensor."""
I don't think this test is correct.
You are setting up the mock, but then resetting it straight away and finally only asserting that the mock was created ?!?
Or maybe I am just not understanding this test?
async def test_restore_state(mock_heat_meter, hass: HomeAssistant) -> None:
assert state.attributes.get(ATTR_STATE_CLASS) is None
@patch(API_HEAT_METER_SERVICE)
async def test_exception_on_polling(mock_heat_meter, hass: HomeAssistant) -> None:
"""Test sensor."""
|
codereview_new_python_data_9587
|
async def _async_shutdown(self, event: Event) -> None:
*(
asyncio.create_task(
entry.async_shutdown(),
- name=f"shutdown config entry {entry.title} {entry.domain} {entry.entry_id}",
)
for entry in self._entries.values()
)
```suggestion
name=f"config entry shutdown {entry.title} {entry.domain} {entry.entry_id}",
```
async def _async_shutdown(self, event: Event) -> None:
*(
asyncio.create_task(
entry.async_shutdown(),
+ name=f"config entry shutdown {entry.title} {entry.domain} {entry.entry_id}",
)
for entry in self._entries.values()
)
|
codereview_new_python_data_9588
|
def native_value(self) -> float | int | str | None:
"""Return current state."""
descr = self.entity_description
state: float | int | str | None = self.coordinator.data.get(descr.key)
- if state is not None and descr.factor and isinstance(state, float | int):
return state / descr.factor
return state
Side note: The union syntax is 100 - 50 % slower on Python 3.10 and Python 3.11 than the tuple syntax, according to the issue for this feature in ruff.
def native_value(self) -> float | int | str | None:
"""Return current state."""
descr = self.entity_description
state: float | int | str | None = self.coordinator.data.get(descr.key)
+ if state is not None and descr.factor and isinstance(state, (float, int)):
return state / descr.factor
return state
|
codereview_new_python_data_9589
|
def _rgbx_received(
if self._topic[CONF_BRIGHTNESS_STATE_TOPIC] is None:
rgb = convert_color(*color)
brightness = max(rgb)
- self._attr_brightness = min(round(brightness), 255)
# Normalize the color to 100% brightness
color = tuple(
min(round(channel / brightness * 255), 255) for channel in color
There's no reason to round when the division is removed
```suggestion
self._attr_brightness = brightness
```
def _rgbx_received(
if self._topic[CONF_BRIGHTNESS_STATE_TOPIC] is None:
rgb = convert_color(*color)
brightness = max(rgb)
+ self._attr_brightness = brightness
# Normalize the color to 100% brightness
color = tuple(
min(round(channel / brightness * 255), 255) for channel in color
|
codereview_new_python_data_9590
|
class ReolinkNumberEntityDescription(
value=lambda api, ch: api.get_focus(ch),
method=lambda api, ch, value: api.set_zoom(ch, int(value)),
),
- # "Floodlight turn on brightness" controlles the brightness of the floodlight when
# it is turned on internally by the camera (see "select.floodlight_mode" entity)
# or when using the "light.floodlight" entity.
ReolinkNumberEntityDescription(
```suggestion
# "Floodlight turn on brightness" controls the brightness of the floodlight when
```
class ReolinkNumberEntityDescription(
value=lambda api, ch: api.get_focus(ch),
method=lambda api, ch, value: api.set_zoom(ch, int(value)),
),
+ # "Floodlight turn on brightness" controls the brightness of the floodlight when
# it is turned on internally by the camera (see "select.floodlight_mode" entity)
# or when using the "light.floodlight" entity.
ReolinkNumberEntityDescription(
|
codereview_new_python_data_9591
|
async def async_attach_trigger(
armed_entities = set()
period: dict = {}
attribute = config.get(CONF_ATTRIBUTE)
- job = HassJob(action, f"numeric_state trigger {trigger_info}")
trigger_data = trigger_info["trigger_data"]
_variables = trigger_info["variables"] or {}
```suggestion
job = HassJob(action, f"numeric state trigger {trigger_info}")
```
async def async_attach_trigger(
armed_entities = set()
period: dict = {}
attribute = config.get(CONF_ATTRIBUTE)
+ job = HassJob(action, f"numeric state trigger {trigger_info}")
trigger_data = trigger_info["trigger_data"]
_variables = trigger_info["variables"] or {}
|
codereview_new_python_data_9592
|
def reolink_connect_fixture(mock_get_source_ip):
"homeassistant.components.reolink.host.Host", autospec=True
) as host_mock_class:
host_mock = host_mock_class.return_value
- host_mock.get_host_data = AsyncMock(return_value=None)
- host_mock.get_states = AsyncMock(return_value=None)
- host_mock.check_new_firmware = AsyncMock(return_value=False)
- host_mock.unsubscribe = AsyncMock(return_value=True)
- host_mock.logout = AsyncMock(return_value=True)
host_mock.mac_address = TEST_MAC
host_mock.onvif_enabled = True
host_mock.rtmp_enabled = True
```suggestion
host_mock.get_host_data.return_value = None
host_mock.get_states.return_value = None
host_mock.check_new_firmware.return_value = False
host_mock.unsubscribe.return_value = True
host_mock.logout.return_value = True
```
def reolink_connect_fixture(mock_get_source_ip):
"homeassistant.components.reolink.host.Host", autospec=True
) as host_mock_class:
host_mock = host_mock_class.return_value
+ host_mock.get_host_data.return_value = None
+ host_mock.get_states.return_value = None
+ host_mock.check_new_firmware.return_value = False
+ host_mock.unsubscribe.return_value = True
+ host_mock.logout.return_value = True
host_mock.mac_address = TEST_MAC
host_mock.onvif_enabled = True
host_mock.rtmp_enabled = True
|
codereview_new_python_data_9593
|
def process_write_state_requests(self, msg: MQTTMessage) -> None:
entity.async_write_ha_state()
except Exception: # pylint: disable=broad-except
_LOGGER.error(
- "Exception on handling write state request to %s for msg on "
"'%s' with payload: %s",
entity.entity_id,
msg.topic,
```suggestion
"Exception raised when updating state of %s, topic: "
```
def process_write_state_requests(self, msg: MQTTMessage) -> None:
entity.async_write_ha_state()
except Exception: # pylint: disable=broad-except
_LOGGER.error(
+ "Exception raised when updating state of %s, topic: "
"'%s' with payload: %s",
entity.entity_id,
msg.topic,
|
codereview_new_python_data_9594
|
def __init__(
manufacturer = device["manufacturer"]
device_type = device["type"]
- room_id: str = device.get("location")
room_name: str | None = None
if room_id is not None:
room_name = coordinator.rooms.get(room_id)
```suggestion
room_id: str | None = device.get("location")
```
def __init__(
manufacturer = device["manufacturer"]
device_type = device["type"]
+ room_id: str | None = device.get("location")
room_name: str | None = None
if room_id is not None:
room_name = coordinator.rooms.get(room_id)
|
codereview_new_python_data_9595
|
def __init__(
config_entry: ConfigEntry,
coordinator: LivisiDataUpdateCoordinator,
device: dict[str, Any],
- use_room_as_device_name=False,
) -> None:
"""Initialize the common properties of a Livisi device."""
self.config_details: Mapping[str, Any] = device["config"]
I suggest to make this a keyword only argument, which will give more flexibility:
```suggestion
*,
use_room_as_device_name: bool = False,
```
def __init__(
config_entry: ConfigEntry,
coordinator: LivisiDataUpdateCoordinator,
device: dict[str, Any],
+ use_room_as_device_name: bool = False,
) -> None:
"""Initialize the common properties of a Livisi device."""
self.config_details: Mapping[str, Any] = device["config"]
|
codereview_new_python_data_9596
|
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
-from .const import (
- DOMAIN,
- LIVISI_STATE_CHANGE,
- LOGGER,
- PSS_DEVICE_TYPE,
-)
from .coordinator import LivisiDataUpdateCoordinator
from .entity import LivisiEntity
Did you miss this `isort` issue?
`from .const import DOMAIN, LIVISI_STATE_CHANGE, LOGGER, PSS_DEVICE_TYPE`
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
+from .const import DOMAIN, LIVISI_STATE_CHANGE, LOGGER, PSS_DEVICE_TYPE
from .coordinator import LivisiDataUpdateCoordinator
from .entity import LivisiEntity
|
codereview_new_python_data_9597
|
def _update_state(msg: ReceiveMessage) -> None:
return
try:
if (payload_datetime := dt_util.parse_datetime(new_value)) is None:
- _LOGGER.warning(
- "Invalid state message '%s' from '%s'", msg.payload, msg.topic
- )
- return
except ValueError:
_LOGGER.warning(
"Invalid state message '%s' from '%s'", msg.payload, msg.topic
We should not change the native value if there is an error in the format.
def _update_state(msg: ReceiveMessage) -> None:
return
try:
if (payload_datetime := dt_util.parse_datetime(new_value)) is None:
+ raise ValueError
except ValueError:
_LOGGER.warning(
"Invalid state message '%s' from '%s'", msg.payload, msg.topic
|
codereview_new_python_data_9598
|
async def test_reauth(
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_PASSWORD: "password"}
)
- assert result["type"] == data_entry_flow.FlowResultType.ABORT
- assert result["reason"] == "reauth_successful"
- assert len(hass.config_entries.async_entries()) == 1
-
# Block to ensure the setup_config_entry fixture does not
# get undone before hass is shutdown so we do not try
# to setup the config entry via reload.
await hass.async_block_till_done()
We normally put the block till done directly after the function call that spawns the task.
```suggestion
# Block to ensure the setup_config_entry fixture does not
# get undone before hass is shutdown so we do not try
# to setup the config entry via reload.
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.FlowResultType.ABORT
assert result["reason"] == "reauth_successful"
assert len(hass.config_entries.async_entries()) == 1
```
async def test_reauth(
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_PASSWORD: "password"}
)
# Block to ensure the setup_config_entry fixture does not
# get undone before hass is shutdown so we do not try
# to setup the config entry via reload.
await hass.async_block_till_done()
+
+ assert result["type"] == data_entry_flow.FlowResultType.ABORT
+ assert result["reason"] == "reauth_successful"
+ assert len(hass.config_entries.async_entries()) == 1
|
codereview_new_python_data_9599
|
async def test_reauth(
assert result["type"] == data_entry_flow.FlowResultType.ABORT
assert result["reason"] == "reauth_successful"
- assert len(hass.config_entries.async_entries()) == 1
```suggestion
assert len(hass.config_entries.async_entries()) == 1
```
async def test_reauth(
assert result["type"] == data_entry_flow.FlowResultType.ABORT
assert result["reason"] == "reauth_successful"
+ assert len(hass.config_entries.async_entries()) == 1
|
codereview_new_python_data_9600
|
def _context_id_to_bytes(context_id: str | None) -> bytes | None:
"""Convert a context_id to bytes."""
if context_id is None:
return None
- if len(context_id) == 32:
return UUID(context_id).bytes
if len(context_id) == 26:
return ulid_to_bytes(context_id)
This needs a test to make sure we can handle empty context_ids, the old format 32 byte uuids, and the 26 byte ulids
```python
>>> uuid.uuid4().hex
'34449f26f2514677bd1cdb2019071a76'
>>> len(uuid.uuid4().hex)
32
>>> uuid.UUID('34449f26f2514677bd1cdb2019071a76')
UUID('34449f26-f251-4677-bd1c-db2019071a76')
>>> uuid.UUID('34449f26f2514677bd1cdb2019071a76').hex
'34449f26f2514677bd1cdb2019071a76'
>>> uuid.UUID('34449f26f2514677bd1cdb2019071a76').bytes
b'4D\x9f&\xf2QFw\xbd\x1c\xdb \x19\x07\x1av'
```
def _context_id_to_bytes(context_id: str | None) -> bytes | None:
"""Convert a context_id to bytes."""
if context_id is None:
return None
+ if len(context_id) == 36:
return UUID(context_id).bytes
if len(context_id) == 26:
return ulid_to_bytes(context_id)
|
codereview_new_python_data_9601
|
def _context_id_to_bytes(context_id: str | None) -> bytes | None:
"""Convert a context_id to bytes."""
if context_id is None:
return None
- if len(context_id) == 32:
return UUID(context_id).bytes
if len(context_id) == 26:
return ulid_to_bytes(context_id)
```suggestion
if len(context_id) == 36:
```
def _context_id_to_bytes(context_id: str | None) -> bytes | None:
"""Convert a context_id to bytes."""
if context_id is None:
return None
+ if len(context_id) == 36:
return UUID(context_id).bytes
if len(context_id) == 26:
return ulid_to_bytes(context_id)
|
codereview_new_python_data_9602
|
def _context_id_to_bytes(context_id: str | None) -> bytes | None:
"""Convert a context_id to bytes."""
if context_id is None:
return None
- if len(context_id) == 36:
return UUID(context_id).bytes
if len(context_id) == 26:
return ulid_to_bytes(context_id)
```suggestion
if len(context_id) == 32:
```
def _context_id_to_bytes(context_id: str | None) -> bytes | None:
"""Convert a context_id to bytes."""
if context_id is None:
return None
+ if len(context_id) == 32:
return UUID(context_id).bytes
if len(context_id) == 26:
return ulid_to_bytes(context_id)
|
codereview_new_python_data_9603
|
async def test_camera_fail(hass, init_integration, mock_install, caplog):
return_value=b"ABC", side_effect=ProsegurException()
)
- with caplog.at_level(logging.ERROR, logger="homeassistant.components.prosegur"):
- with pytest.raises(HomeAssistantError) as exc:
- await camera.async_get_image(hass, "camera.test_cam")
assert "Unable to get image" in str(exc.value)
assert "Image test_cam doesn't exist" in caplog.text
I think we can merge the two `with` statements by using a comma between the context managers.
async def test_camera_fail(hass, init_integration, mock_install, caplog):
return_value=b"ABC", side_effect=ProsegurException()
)
+ with caplog.at_level(
+ logging.ERROR, logger="homeassistant.components.prosegur"
+ ), pytest.raises(HomeAssistantError) as exc:
+ await camera.async_get_image(hass, "camera.test_cam")
+
assert "Unable to get image" in str(exc.value)
assert "Image test_cam doesn't exist" in caplog.text
|
codereview_new_python_data_9604
|
async def async_validate_creds(hass: HomeAssistant, user_input: dict[str, Any]) -> bool:
"""Manage Obihai options."""
- if await hass.async_add_executor_job(
validate_auth,
user_input[CONF_HOST],
user_input[CONF_USERNAME],
user_input[CONF_PASSWORD],
- ):
- return True
-
- return False
class ObihaiFlowHandler(ConfigFlow, domain=DOMAIN):
```suggestion
return await hass.async_add_executor_job(
validate_auth,
user_input[CONF_HOST],
user_input[CONF_USERNAME],
user_input[CONF_PASSWORD],
)
```
async def async_validate_creds(hass: HomeAssistant, user_input: dict[str, Any]) -> bool:
"""Manage Obihai options."""
+ return await hass.async_add_executor_job(
validate_auth,
user_input[CONF_HOST],
user_input[CONF_USERNAME],
user_input[CONF_PASSWORD],
+ )
class ObihaiFlowHandler(ConfigFlow, domain=DOMAIN):
|
codereview_new_python_data_9605
|
def __init__(self, pyobihai, serial):
entity_category=EntityCategory.CONFIG,
)
- def press(
- self,
- **kwargs: Any,
- ) -> None:
"""Press button."""
try:
There are no arguments to a button press.
```suggestion
def press(self) -> None:
```
def __init__(self, pyobihai, serial):
entity_category=EntityCategory.CONFIG,
)
+ def press(self) -> None:
"""Press button."""
try:
|
codereview_new_python_data_9606
|
def __init__(self, pyobihai, serial):
entity_category=EntityCategory.CONFIG,
)
- def press(
- self,
- **kwargs: Any,
- ) -> None:
"""Press button."""
try:
This is a constant. It should be defined outside the `__init__` method.
I suggest that you create a module constant BUTTON_DESCRIPTION and assign it to as a class attribute:
```python
class ObihaiButton(ButtonEntity):
"""Obihai Reboot button."""
entity_description =BUTTON_DESCRIPTION
def __init__(self, pyobihai, serial):
```
def __init__(self, pyobihai, serial):
entity_category=EntityCategory.CONFIG,
)
+ def press(self) -> None:
"""Press button."""
try:
|
codereview_new_python_data_9607
|
async def async_set_temperature(self, **kwargs: Any) -> None:
OverkizCommandParam.FURTHER_NOTICE,
)
- async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
return
A second thing that I don't really like here is that we have a compulsory "empty" function.
I suggested that we raise a HomeAssistantError in https://github.com/home-assistant/core/pull/86691/files#r1118390937 but now I wonder...
I'll ask for a second-opinion...
async def async_set_temperature(self, **kwargs: Any) -> None:
OverkizCommandParam.FURTHER_NOTICE,
)
+ async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None:
"""Set new target hvac mode."""
return
|
codereview_new_python_data_9608
|
def async_setup_light_services(hass: HomeAssistant) -> None:
)
-@callback
-def async_setup_lock_services(hass: HomeAssistant) -> None:
- """Create lock-specific services for the ISY Integration."""
- platform = entity_platform.async_get_current_platform()
-
- platform.async_register_entity_service(
- SERVICE_SET_ZWAVE_LOCK_USER_CODE,
- SERVICE_SET_USER_CODE_SCHEMA,
- "async_set_zwave_lock_user_code",
- )
- platform.async_register_entity_service(
- SERVICE_DELETE_ZWAVE_LOCK_USER_CODE,
- SERVICE_DELETE_USER_CODE_SCHEMA,
- "async_delete_zwave_lock_user_code",
- )
-
-
@callback
def async_log_deprecated_service_call(
hass: HomeAssistant,
We should keep these in `lock.py` since they are platform specific services and do not apply to the whole integration.
def async_setup_light_services(hass: HomeAssistant) -> None:
)
@callback
def async_log_deprecated_service_call(
hass: HomeAssistant,
|
codereview_new_python_data_9609
|
async def async_stop(self, exit_code: int = 0, *, force: bool = False) -> None:
def _async_log_running_tasks(self, stage: int) -> None:
"""Log all running tasks."""
for task in self._tasks:
- if not task.done():
- _LOGGER.warning("Shutdown stage %s: still running: %s", stage, task)
class Context:
This shouldn't be possible, as there is a done callback to remove it from the set.
async def async_stop(self, exit_code: int = 0, *, force: bool = False) -> None:
def _async_log_running_tasks(self, stage: int) -> None:
"""Log all running tasks."""
for task in self._tasks:
+ _LOGGER.warning("Shutdown stage %s: still running: %s", stage, task)
class Context:
|
codereview_new_python_data_9610
|
def _apply_update( # noqa: C901
# Add name column to StatisticsMeta
_add_columns(session_maker, "statistics_meta", ["name VARCHAR(255)"])
elif new_version == 24:
- # This used to create the unique indices for start and metadata_id
# but we changed the format in schema 34 which will now take care
# of removing any duplicate if they still exist.
pass
```suggestion
# This used to create the unique indices for start and statistic_id
```
def _apply_update( # noqa: C901
# Add name column to StatisticsMeta
_add_columns(session_maker, "statistics_meta", ["name VARCHAR(255)"])
elif new_version == 24:
+ # This used to create the unique indices for start and statistic_id
# but we changed the format in schema 34 which will now take care
# of removing any duplicate if they still exist.
pass
|
codereview_new_python_data_9611
|
def __init__(
self.entity_description = entity_description
self._attr_unique_id = (
- f"{self._host.unique_id}_{self._channel}_{entity_description.key}"
)
async def async_press(self) -> None:
```suggestion
f"{self._host.unique_id}_{channel}_{entity_description.key}"
```
def __init__(
self.entity_description = entity_description
self._attr_unique_id = (
+ f"{self._host.unique_id}_{channel}_{entity_description.key}"
)
async def async_press(self) -> None:
|
codereview_new_python_data_9612
|
async def async_turn_off(self) -> None:
async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None:
"""Set the HVAC Mode and State."""
if hvac_mode == HVACMode.OFF:
- await self.async_turn_on()
- else:
await self.async_turn_off()
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set the Temperature."""
Why do we turn on when the passed hvac mode is `off`?
async def async_turn_off(self) -> None:
async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None:
"""Set the HVAC Mode and State."""
if hvac_mode == HVACMode.OFF:
await self.async_turn_off()
+ else:
+ await self.async_turn_on()
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set the Temperature."""
|
codereview_new_python_data_9613
|
def mock_hass_config(
with `hass_config` as parameterized.
"""
if hass_config:
- hass.config_entries = ConfigEntries(
- hass,
- hass_config,
- )
with patch("homeassistant.config.load_yaml_config_file", return_value=hass_config):
yield
Small style suggestion:
```suggestion
hass.config_entries = ConfigEntries(hass, hass_config)
```
def mock_hass_config(
with `hass_config` as parameterized.
"""
if hass_config:
+ hass.config_entries = ConfigEntries(hass, hass_config)
with patch("homeassistant.config.load_yaml_config_file", return_value=hass_config):
yield
|
codereview_new_python_data_9614
|
async def setup_again(*_: Any) -> None:
await self._async_process_on_unload()
return
- except BaseException: # pylint: disable=broad-except
_LOGGER.exception(
"Error setting up entry %s for %s", self.title, integration.domain
)
Should we instead do this?
```suggestion
except (CancelledError, Exception): # pylint: disable=broad-except
```
async def setup_again(*_: Any) -> None:
await self._async_process_on_unload()
return
+ except (asyncio.CancelledError, Exception): # pylint: disable=broad-except
_LOGGER.exception(
"Error setting up entry %s for %s", self.title, integration.domain
)
|
codereview_new_python_data_9615
|
async def async_attach_trigger(
) -> CALLBACK_TYPE:
"""Listen for events based on configuration."""
trigger_data = trigger_info["trigger_data"]
- number = cast(int, config.get(CONF_NUMBER))
held_more_than = config.get(CONF_HELD_MORE_THAN)
held_less_than = config.get(CONF_HELD_LESS_THAN)
pressed_time = None
This is wrong. It seems CONF_NUMBER is required:
```suggestion
number = cast(int, config[CONF_NUMBER])
```
async def async_attach_trigger(
) -> CALLBACK_TYPE:
"""Listen for events based on configuration."""
trigger_data = trigger_info["trigger_data"]
+ number = cast(int, config[CONF_NUMBER])
held_more_than = config.get(CONF_HELD_MORE_THAN)
held_less_than = config.get(CONF_HELD_LESS_THAN)
pressed_time = None
|
codereview_new_python_data_9616
|
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
-from homeassistant.helpers.typing import ConfigType
-from .const import CONF_OBIHAI_HOST, PLATFORMS
-
-__all__ = [
- "CONF_OBIHAI_HOST",
-]
-
-
-def setup(hass: HomeAssistant, config: ConfigType) -> bool:
- """Set up the Obihai integration."""
- return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up from a config entry."""
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
- # Reload entry when its updated.
- entry.async_on_unload(entry.add_update_listener(async_reload_entry))
-
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
-
-
-async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
- """Reload the config entry when it changed."""
- await hass.config_entries.async_reload(entry.entry_id)
Can you keep options flow for a follow-up PR?
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
+from .const import PLATFORMS
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up from a config entry."""
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
codereview_new_python_data_9617
|
from . import USER_INPUT
-async def test_user_form(hass: HomeAssistant) -> None:
"""Test we get the user initiated form."""
result = await hass.config_entries.flow.async_init(
```suggestion
async def test_user_form(hass: HomeAssistant, mock_setup_entry: AsyncMock) -> None:
```
from . import USER_INPUT
+async def test_user_form(hass: HomeAssistant, mock_setup_entry: AsyncMock) -> None:
"""Test we get the user initiated form."""
result = await hass.config_entries.flow.async_init(
|
codereview_new_python_data_9618
|
DATA_SCHEMA = vol.Schema(
{
- vol.Required(CONF_HOST, default=""): str,
vol.Optional(
CONF_USERNAME,
default=DEFAULT_USERNAME,
You don't need empty default
```suggestion
vol.Required(CONF_HOST): str,
```
DATA_SCHEMA = vol.Schema(
{
+ vol.Required(CONF_HOST): str,
vol.Optional(
CONF_USERNAME,
default=DEFAULT_USERNAME,
|
codereview_new_python_data_9619
|
def update(self) -> None:
if self._service_name in call_direction:
self._state = call_direction.get(self._service_name)
-
- self._state = None
I think this line shouldn't be here. Leftover?
def update(self) -> None:
if self._service_name in call_direction:
self._state = call_direction.get(self._service_name)
|
codereview_new_python_data_9620
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up from a config entry."""
- await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
-
- return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
```suggestion
return await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
```
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up from a config entry."""
+ return await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
|
codereview_new_python_data_9621
|
async def async_setup_platform(
translation_key="manual_migration",
)
- if discovery_info:
- config = PLATFORM_SCHEMA(discovery_info)
-
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
I am a little confused by this line.
When does it get used?
async def async_setup_platform(
translation_key="manual_migration",
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
|
codereview_new_python_data_9622
|
def __init__(
def update(self) -> bool:
"""Validate connection and retrieve a list of sensors."""
- self.pyobihai = get_pyobihai(self.host, self.username, self.password)
- if not self.pyobihai.check_account():
- return False
self.serial = self.pyobihai.get_device_serial()
self.services = self.pyobihai.get_state()
Should this not check if `pyobihai` already exists?
```suggestion
if not self.pyobihai:
self.pyobihai = get_pyobihai(self.host, self.username, self.password)
```
And maybe `check_account` can also be guarded?
def __init__(
def update(self) -> bool:
"""Validate connection and retrieve a list of sensors."""
+ if not self.pyobihai:
+ self.pyobihai = get_pyobihai(self.host, self.username, self.password)
+ if not self.pyobihai.check_account():
+ return False
self.serial = self.pyobihai.get_device_serial()
self.services = self.pyobihai.get_state()
|
codereview_new_python_data_9623
|
async def test_yaml_import(hass: HomeAssistant) -> None:
)
await hass.async_block_till_done()
- assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
assert "errors" not in result
```suggestion
assert result["type"] == FlowResultType.CREATE_ENTRY
```
async def test_yaml_import(hass: HomeAssistant) -> None:
)
await hass.async_block_till_done()
+ assert result["type"] == FlowResultType.CREATE_ENTRY
assert "errors" not in result
|
codereview_new_python_data_9624
|
def pytest_configure(config: pytest.Config) -> None:
"markers", "no_fail_on_log_exception: mark test to not fail on logged exception"
)
if config.getoption("verbose") > 0:
- logging.getLogger().level = logging.DEBUG
def pytest_runtest_setup() -> None:
```suggestion
logging.getLogger().setLevel(logging.DEBUG)
```
def pytest_configure(config: pytest.Config) -> None:
"markers", "no_fail_on_log_exception: mark test to not fail on logged exception"
)
if config.getoption("verbose") > 0:
+ logging.getLogger().setLevel(logging.DEBUG)
def pytest_runtest_setup() -> None:
|
codereview_new_python_data_9625
|
async def async_matching_config_entries(
if not type_filter:
return [entry_json(entry) for entry in entries]
- # Fetch all the integrations so we can check their type
integrations = {}
domains = {entry.domain for entry in entries}
for domain_key, integration_or_exc in (
```suggestion
integrations = {}
# Fetch all the integrations so we can check their type
```
async def async_matching_config_entries(
if not type_filter:
return [entry_json(entry) for entry in entries]
integrations = {}
domains = {entry.domain for entry in entries}
for domain_key, integration_or_exc in (
|
codereview_new_python_data_9626
|
async def test_get_config_parameters(
@pytest.mark.parametrize(
- ("include_target"),
- [(True), (False)],
)
async def test_firmware_upload_view(
hass: HomeAssistant,
```suggestion
("firmware_data", "expected_data"),
[({"target": "1"}, {"firmware_target": 1}), ({}, {})],
```
async def test_get_config_parameters(
@pytest.mark.parametrize(
+ ("firmware_data", "expected_data"),
+ [({"target": "1"}, {"firmware_target": 1}), ({}, {})],
)
async def test_firmware_upload_view(
hass: HomeAssistant,
|
codereview_new_python_data_9627
|
class DeviceAutomationTriggerProtocol(TriggerProtocol, Protocol):
"""Define the format of device_trigger modules.
- Each module must define either TRIGGER_SCHEMA or async_validate_trigger_config.
"""
async def async_get_trigger_capabilities(
Does this need double inheritance?
```suggestion
class DeviceAutomationTriggerProtocol(TriggerProtocol):
```
class DeviceAutomationTriggerProtocol(TriggerProtocol, Protocol):
"""Define the format of device_trigger modules.
+ Each module must define either TRIGGER_SCHEMA or async_validate_trigger_config
+ from TriggerProtocol.
"""
async def async_get_trigger_capabilities(
|
codereview_new_python_data_9628
|
class DeviceAutomationTriggerProtocol(TriggerProtocol, Protocol):
"""Define the format of device_trigger modules.
- Each module must define either TRIGGER_SCHEMA or async_validate_trigger_config.
"""
async def async_get_trigger_capabilities(
Should this docstring be adjusted?
class DeviceAutomationTriggerProtocol(TriggerProtocol, Protocol):
"""Define the format of device_trigger modules.
+ Each module must define either TRIGGER_SCHEMA or async_validate_trigger_config
+ from TriggerProtocol.
"""
async def async_get_trigger_capabilities(
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.