id
stringlengths
30
32
content
stringlengths
139
2.8k
codereview_new_python_data_9729
from homeassistant import core from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import aiohttp_client, device_registry as dr -from .const import CLIMATE_PLATFORM, DOMAIN, SWITCH_PLATFORM from .coordinator import LivisiDataUpdateCoordinator -PLATFORMS: Final = [SWITCH_PLATFORM, CLIMATE_PLATFORM] async def async_setup_entry(hass: core.HomeAssistant, entry: ConfigEntry) -> bool: Please import and use the Platform enum ```suggestion from .const import DOMAIN, Platform ``` from homeassistant import core from homeassistant.config_entries import ConfigEntry +from homeassistant.const import Platform from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import aiohttp_client, device_registry as dr +from .const import DOMAIN from .coordinator import LivisiDataUpdateCoordinator +PLATFORMS: Final = [Platform.CLIMATE, Platform.SWITCH] async def async_setup_entry(hass: core.HomeAssistant, entry: ConfigEntry) -> bool:
codereview_new_python_data_9730
from homeassistant import core from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import aiohttp_client, device_registry as dr -from .const import CLIMATE_PLATFORM, DOMAIN, SWITCH_PLATFORM from .coordinator import LivisiDataUpdateCoordinator -PLATFORMS: Final = [SWITCH_PLATFORM, CLIMATE_PLATFORM] async def async_setup_entry(hass: core.HomeAssistant, entry: ConfigEntry) -> bool: ```suggestion PLATFORMS: Final = [Platform.CLIMATE, Platform.SWITCH] ``` from homeassistant import core from homeassistant.config_entries import ConfigEntry +from homeassistant.const import Platform from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import aiohttp_client, device_registry as dr +from .const import DOMAIN from .coordinator import LivisiDataUpdateCoordinator +PLATFORMS: Final = [Platform.CLIMATE, Platform.SWITCH] async def async_setup_entry(hass: core.HomeAssistant, entry: ConfigEntry) -> bool:
codereview_new_python_data_9731
LIVISI_STATE_CHANGE: Final = "livisi_state_change" LIVISI_REACHABILITY_CHANGE: Final = "livisi_reachability_change" -SWITCH_PLATFORM: Final = "switch" -CLIMATE_PLATFORM: Final = "climate" - PSS_DEVICE_TYPE: Final = "PSS" VRCC_DEVICE_TYPE: Final = "VRCC" I don't think these two constants are any use. ```suggestion ``` LIVISI_STATE_CHANGE: Final = "livisi_state_change" LIVISI_REACHABILITY_CHANGE: Final = "livisi_reachability_change" PSS_DEVICE_TYPE: Final = "PSS" VRCC_DEVICE_TYPE: Final = "VRCC"
codereview_new_python_data_9732
async def async_set_temperature(self, **kwargs: Any) -> None: def set_hvac_mode(self, hvac_mode: HVACMode) -> None: """Do nothing as LIVISI devices do not support changing the hvac mode.""" async def async_added_to_hass(self) -> None: """Register callbacks.""" I don't think it should do nothing as that will not tell the users why nothing is happening. I think you should instead raise a `HomeAssistantError` to indicate that it is not supported. async def async_set_temperature(self, **kwargs: Any) -> None: def set_hvac_mode(self, hvac_mode: HVACMode) -> None: """Do nothing as LIVISI devices do not support changing the hvac mode.""" + raise HomeAssistantError( + "This feature is not supported with the LIVISI climate devices" + ) async def async_added_to_hass(self) -> None: """Register callbacks."""
codereview_new_python_data_9733
def __init__( self._humidity_capability = humidity_capability self.aio_livisi = coordinator.aiolivisi self._attr_available = False - self._attr_hvac_modes = [HVACMode.HEAT] - self._attr_hvac_mode = HVACMode.HEAT - self._attr_temperature_unit = UnitOfTemperature.CELSIUS - self._attr_supported_features = ClimateEntityFeature.TARGET_TEMPERATURE - self._attr_target_temperature_high = MAX_TEMPERATURE - self._attr_target_temperature_low = MIN_TEMPERATURE self._attr_device_info = DeviceInfo( identifiers={(DOMAIN, unique_id)}, manufacturer=manufacturer, Since these 6 attributes are fixed, then should be set as class attributes not instance attributes: ```python class LivisiClimate(CoordinatorEntity[LivisiDataUpdateCoordinator], ClimateEntity): """Represents the Livisi Climate.""" _attr_hvac_modes = [HVACMode.HEAT] _attr_hvac_mode = HVACMode.HEAT _attr_temperature_unit = UnitOfTemperature.CELSIUS _attr_supported_features = ClimateEntityFeature.TARGET_TEMPERATURE _attr_target_temperature_high = MAX_TEMPERATURE _attr_target_temperature_low = MIN_TEMPERATURE ``` def __init__( self._humidity_capability = humidity_capability self.aio_livisi = coordinator.aiolivisi self._attr_available = False + _attr_hvac_modes = [HVACMode.HEAT] + _attr_hvac_mode = HVACMode.HEAT + _attr_temperature_unit = UnitOfTemperature.CELSIUS + _attr_supported_features = ClimateEntityFeature.TARGET_TEMPERATURE + _attr_target_temperature_high = MAX_TEMPERATURE + _attr_target_temperature_low = MIN_TEMPERATURE self._attr_device_info = DeviceInfo( identifiers={(DOMAIN, unique_id)}, manufacturer=manufacturer,
codereview_new_python_data_9734
async def async_will_remove_from_hass(self) -> None: self._lj.unsubscribe(self._on_switch_released) self._lj.unsubscribe(self._on_connected_changed) - def _on_switch_pressed(self): self._attr_is_on = True self.async_write_ha_state() - def _on_switch_released(self): self._attr_is_on = False self.async_write_ha_state() ```suggestion def _on_switch_released(self) -> None: ``` async def async_will_remove_from_hass(self) -> None: self._lj.unsubscribe(self._on_switch_released) self._lj.unsubscribe(self._on_connected_changed) + def _on_switch_pressed(self) -> None: self._attr_is_on = True self.async_write_ha_state() + def _on_switch_released(self) -> None: self._attr_is_on = False self.async_write_ha_state()
codereview_new_python_data_9735
async def async_will_remove_from_hass(self) -> None: self._lj.unsubscribe(self._on_switch_released) self._lj.unsubscribe(self._on_connected_changed) - def _on_switch_pressed(self): self._attr_is_on = True self.async_write_ha_state() - def _on_switch_released(self): self._attr_is_on = False self.async_write_ha_state() ```suggestion def _on_switch_pressed(self) -> None: ``` async def async_will_remove_from_hass(self) -> None: self._lj.unsubscribe(self._on_switch_released) self._lj.unsubscribe(self._on_connected_changed) + def _on_switch_pressed(self) -> None: self._attr_is_on = True self.async_write_ha_state() + def _on_switch_released(self) -> None: self._attr_is_on = False self.async_write_ha_state()
codereview_new_python_data_9736
def on_connected_changed(callback): mock_lj.last_delta = timedelta(0) mock_lj.connected = True - def connected_changed(connected: bool, reason: str): mock_lj.connected = connected for callback in mock_lj.connected_changed_callbacks: callback(connected, reason) ```suggestion def connected_changed(connected: bool, reason: str) -> None: ``` def on_connected_changed(callback): mock_lj.last_delta = timedelta(0) mock_lj.connected = True + def connected_changed(connected: bool, reason: str) -> None: mock_lj.connected = connected for callback in mock_lj.connected_changed_callbacks: callback(connected, reason)
codereview_new_python_data_9737
def _migrate_columns_to_timestamp( elif engine.dialect.name == SupportedDialect.MYSQL: # With MySQL we do this in chunks to avoid hitting the `innodb_buffer_pool_size` limit # We also need to do this in a loop since we can't be sure that we have - # updated all rows in the table. result: CursorResult | None = None while result is None or result.rowcount > 0: result = connection.execute( ```suggestion # updated all rows in the table until the rowcount is 0 ``` def _migrate_columns_to_timestamp( elif engine.dialect.name == SupportedDialect.MYSQL: # With MySQL we do this in chunks to avoid hitting the `innodb_buffer_pool_size` limit # We also need to do this in a loop since we can't be sure that we have + # updated all rows in the table until the rowcount is 0 result: CursorResult | None = None while result is None or result.rowcount > 0: result = connection.execute(
codereview_new_python_data_9738
def _migrate_columns_to_timestamp( result = session.connection().execute( text( "UPDATE states set last_updated_ts= " - "IF(last_updated is NULL,0,UNIX_TIMESTAMP(last_updated)) " " last_changed_ts=" "IF(last_changed is NULL,0,UNIX_TIMESTAMP(last_changed)) " " where last_updated_ts is NULL " The 0 is a safety in case `last_updated` is somehow NULL so we don't loop forever (this should never actually be possible unless the db has previous corruption that was repaired) def _migrate_columns_to_timestamp( result = session.connection().execute( text( "UPDATE states set last_updated_ts= " + "IF(last_updated is NULL,0,UNIX_TIMESTAMP(last_updated)), " " last_changed_ts=" "IF(last_changed is NULL,0,UNIX_TIMESTAMP(last_changed)) " " where last_updated_ts is NULL "
codereview_new_python_data_9739
def cluster_command_schema_to_vol_schema(schema: CommandSchema) -> vol.Schema: def schema_type_to_vol(field_type: Any) -> Any: """Convert a schema type to a voluptuous type.""" - if issubclass(field_type, enum.Flag) and len(field_type.__members__): return cv.multi_select( [key.replace("_", " ") for key in field_type.__members__] ) - if issubclass(field_type, enum.Enum) and len(field_type.__members__): return vol.In([key.replace("_", " ") for key in field_type.__members__]) if ( issubclass(field_type, zigpy.types.FixedIntType) ```suggestion if issubclass(field_type, enum.Flag) and field_type.__members__: ``` def cluster_command_schema_to_vol_schema(schema: CommandSchema) -> vol.Schema: def schema_type_to_vol(field_type: Any) -> Any: """Convert a schema type to a voluptuous type.""" + if issubclass(field_type, enum.Flag) and field_type.__members__: return cv.multi_select( [key.replace("_", " ") for key in field_type.__members__] ) + if issubclass(field_type, enum.Enum) and field_type.__members__: return vol.In([key.replace("_", " ") for key in field_type.__members__]) if ( issubclass(field_type, zigpy.types.FixedIntType)
codereview_new_python_data_9740
def cluster_command_schema_to_vol_schema(schema: CommandSchema) -> vol.Schema: def schema_type_to_vol(field_type: Any) -> Any: """Convert a schema type to a voluptuous type.""" - if issubclass(field_type, enum.Flag) and len(field_type.__members__): return cv.multi_select( [key.replace("_", " ") for key in field_type.__members__] ) - if issubclass(field_type, enum.Enum) and len(field_type.__members__): return vol.In([key.replace("_", " ") for key in field_type.__members__]) if ( issubclass(field_type, zigpy.types.FixedIntType) ```suggestion if issubclass(field_type, enum.Enum) and field_type.__members__: ``` def cluster_command_schema_to_vol_schema(schema: CommandSchema) -> vol.Schema: def schema_type_to_vol(field_type: Any) -> Any: """Convert a schema type to a voluptuous type.""" + if issubclass(field_type, enum.Flag) and field_type.__members__: return cv.multi_select( [key.replace("_", " ") for key in field_type.__members__] ) + if issubclass(field_type, enum.Enum) and field_type.__members__: return vol.In([key.replace("_", " ") for key in field_type.__members__]) if ( issubclass(field_type, zigpy.types.FixedIntType)
codereview_new_python_data_9741
async def test_plex_play_media(hass, async_autosetup_sonos): mock_lookup.reset_mock() mock_add_to_queue.reset_mock() shuffle_media_content_id = ( - '{"library_name": "Music", "artist_name": "Artist", "album_name": "Album",' - ' "shuffle": 1}' ) assert await hass.services.async_call( ```suggestion '{"library_name": "Music", "artist_name": "Artist", ' '"album_name": "Album", "shuffle": 1}' ``` async def test_plex_play_media(hass, async_autosetup_sonos): mock_lookup.reset_mock() mock_add_to_queue.reset_mock() shuffle_media_content_id = ( + '{"library_name": "Music", "artist_name": "Artist", ' + '"album_name": "Album", "shuffle": 1}' ) assert await hass.services.async_call(
codereview_new_python_data_9742
async def test_plex_play_media(hass, async_autosetup_sonos): mock_lookup.reset_mock() mock_add_to_queue.reset_mock() shuffle_media_content_id = ( - '{"library_name": "Music", "artist_name": "Artist", "album_name": "Album",' - ' "shuffle": 1}' ) assert await hass.services.async_call( ```suggestion '{"library_name": "Music", "artist_name": "Artist", ' '"album_name": "Album", "shuffle": 1}' ``` async def test_plex_play_media(hass, async_autosetup_sonos): mock_lookup.reset_mock() mock_add_to_queue.reset_mock() shuffle_media_content_id = ( + '{"library_name": "Music", "artist_name": "Artist", ' + '"album_name": "Album", "shuffle": 1}' ) assert await hass.services.async_call(
codereview_new_python_data_9743
@pytest.fixture(name="mock_config_entry") -async def mock_config_entry_fixture(prepare_config_entry): """Mock config entry and setup entry.""" with patch("homeassistant.components.axis.async_setup_entry", return_value=True): - yield await prepare_config_entry() async def test_flow_manual_configuration(hass, setup_default_vapix_requests): Maybe not compulsory, but I find that it helps to make it "autouse=True" in `test_config_flow.py`. Then if tests require a "real" setup then I split or move them to `test_init.py` (see #86601) @pytest.fixture(name="mock_config_entry") +async def mock_config_entry_fixture(hass, config_entry): """Mock config entry and setup entry.""" with patch("homeassistant.components.axis.async_setup_entry", return_value=True): + assert await hass.config_entries.async_setup(config_entry.entry_id) + await hass.async_block_till_done() + yield config_entry async def test_flow_manual_configuration(hass, setup_default_vapix_requests):
codereview_new_python_data_9744
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: await hass.async_add_executor_job( partial(openai.Engine.list, request_timeout=10) ) except error.OpenAIError as err: raise ConfigEntryNotReady(err) from err Should `error.AuthenticationError` be handled here too? async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: await hass.async_add_executor_job( partial(openai.Engine.list, request_timeout=10) ) + except error.AuthenticationError as err: + _LOGGER.error("Invalid API key: %s", err) + return False except error.OpenAIError as err: raise ConfigEntryNotReady(err) from err
codereview_new_python_data_9745
async def async_will_remove_from_hass(self) -> None: def _on_connected_changed(self, connected: bool, reason: str) -> None: self._attr_available = connected - self.async_write_ha_state() @property def extra_state_attributes(self): Let's move this to a separate PR which also adds tests of availability 👍 async def async_will_remove_from_hass(self) -> None: def _on_connected_changed(self, connected: bool, reason: str) -> None: self._attr_available = connected + self.schedule_update_ha_state() @property def extra_state_attributes(self):
codereview_new_python_data_9746
async def async_set_cover_tilt_position(self, **kwargs: Any) -> None: def _handle_coordinator_update(self) -> None: """Handle updated data from the coordinator.""" _tilt = self.parsed_data["tilt"] - _moving_up = ( - self.parsed_data["motionDirection"]["up"] and self.parsed_data["inMotion"] - ) - _moving_down = ( - self.parsed_data["motionDirection"]["down"] and self.parsed_data["inMotion"] - ) - # NOTE: when motion is down, motion up is also set to true for some reason - if _moving_up: - _opening = bool(_tilt > 50) - _closing = not _opening - elif _moving_down: - _opening = bool(_tilt < 50) - _closing = not _opening - else: - _opening = _closing = False self._attr_current_cover_tilt_position = _tilt self._attr_is_closed = (_tilt < self.CLOSED_DOWN_THRESHOLD) or ( _tilt > self.CLOSED_UP_THRESHOLD ) - self._attr_is_opening = _opening - self._attr_is_closing = _closing self.async_write_ha_state() @BelowAverageDev After some debugging today I made https://github.com/Danielhiversen/pySwitchbot/pull/188 async def async_set_cover_tilt_position(self, **kwargs: Any) -> None: def _handle_coordinator_update(self) -> None: """Handle updated data from the coordinator.""" _tilt = self.parsed_data["tilt"] self._attr_current_cover_tilt_position = _tilt self._attr_is_closed = (_tilt < self.CLOSED_DOWN_THRESHOLD) or ( _tilt > self.CLOSED_UP_THRESHOLD ) + self._attr_is_opening = self.parsed_data["motionDirection"]["opening"] + self._attr_is_closing = self.parsed_data["motionDirection"]["closing"] self.async_write_ha_state()
codereview_new_python_data_9747
def get_block_entity_name( channel_name = get_block_channel_name(device, block) if description: - return f"{channel_name} {description}".capitalize() return channel_name ```suggestion return f"{channel_name} {description.lower()}" ``` Wouldn't it be better just to lowercase the description and leave the name as it is? afterall the name arrives from the user/device and we don't want to change it def get_block_entity_name( channel_name = get_block_channel_name(device, block) if description: + return f"{channel_name} {description.lower()}" return channel_name
codereview_new_python_data_9748
async def async_setup(self): await self.async_update() self.config_entry.add_update_listener(self.async_options_updated) - self.hass.data.setdefault(DOMAIN, self) await self.hass.config_entries.async_forward_entry_setups( self.config_entry, PLATFORMS I had to move this because the platform would try to use `hass.data[DOMAIN]` but it wasn't set yet async def async_setup(self): await self.async_update() self.config_entry.add_update_listener(self.async_options_updated) await self.hass.config_entries.async_forward_entry_setups( self.config_entry, PLATFORMS
codereview_new_python_data_9749
async def async_added_to_hass(self) -> None: LOGGER.info("Restoring entity %s", self.name) last_state = await self.async_get_last_state() - last_extra_data = await self.async_get_last_extra_data() - if last_extra_data is not None: - self._last_target_temp = last_extra_data.as_dict()["last_target_temp"] - if last_state is not None: self.last_state = last_state self.last_state_attributes = self.last_state.attributes self._preset_modes = cast( list, self.last_state.attributes.get("preset_modes") ) await super().async_added_to_hass() @callback Maybe move this to above line 332 so that the `last_state` handling is not breaked in the middle to improve readability? async def async_added_to_hass(self) -> None: LOGGER.info("Restoring entity %s", self.name) last_state = await self.async_get_last_state() if last_state is not None: self.last_state = last_state self.last_state_attributes = self.last_state.attributes self._preset_modes = cast( list, self.last_state.attributes.get("preset_modes") ) + last_extra_data = await self.async_get_last_extra_data() + if last_extra_data is not None: + self._last_target_temp = last_extra_data.as_dict()["last_target_temp"] + await super().async_added_to_hass() @callback
codereview_new_python_data_9750
async def async_added_to_hass(self) -> None: LOGGER.info("Restoring entity %s", self.name) last_state = await self.async_get_last_state() - last_extra_data = await self.async_get_last_extra_data() - if last_extra_data is not None: - self._last_target_temp = last_extra_data.as_dict()["last_target_temp"] - if last_state is not None: self.last_state = last_state self.last_state_attributes = self.last_state.attributes self._preset_modes = cast( list, self.last_state.attributes.get("preset_modes") ) await super().async_added_to_hass() @callback ```suggestion self._last_target_temp = last_extra_data.last_target_temp ``` async def async_added_to_hass(self) -> None: LOGGER.info("Restoring entity %s", self.name) last_state = await self.async_get_last_state() if last_state is not None: self.last_state = last_state self.last_state_attributes = self.last_state.attributes self._preset_modes = cast( list, self.last_state.attributes.get("preset_modes") ) + last_extra_data = await self.async_get_last_extra_data() + if last_extra_data is not None: + self._last_target_temp = last_extra_data.as_dict()["last_target_temp"] + await super().async_added_to_hass() @callback
codereview_new_python_data_9751
("radio_browser", "media_source.py"), ("system_bridge", "media_source.py"), ("tuya", "scene.py"), - ("velux", "scene.py"), ("upb", "scene.py"), ("xbox", "media_source.py"), } ```suggestion ("upb", "scene.py"), ("velux", "scene.py"), ``` ("radio_browser", "media_source.py"), ("system_bridge", "media_source.py"), ("tuya", "scene.py"), ("upb", "scene.py"), + ("velux", "scene.py"), ("xbox", "media_source.py"), }
codereview_new_python_data_9752
async def async_turn_on(self, **kwargs: Any) -> None: """Turn off-grid mode on.""" self._attr_is_on = True self.power_wall.set_island_mode(IslandMode.OFFGRID) - self.schedule_update_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn off-grid mode off (return to on-grid usage).""" self._attr_is_on = False self.power_wall.set_island_mode(IslandMode.ONGRID) - self.schedule_update_ha_state() This needs to be run in the executor async def async_turn_on(self, **kwargs: Any) -> None: """Turn off-grid mode on.""" self._attr_is_on = True self.power_wall.set_island_mode(IslandMode.OFFGRID) + self.async_schedule_update_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn off-grid mode off (return to on-grid usage).""" self._attr_is_on = False self.power_wall.set_island_mode(IslandMode.ONGRID) + self.async_schedule_update_ha_state()
codereview_new_python_data_9753
async def async_turn_on(self, **kwargs: Any) -> None: """Turn off-grid mode on.""" self._attr_is_on = True self.power_wall.set_island_mode(IslandMode.OFFGRID) - self.schedule_update_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn off-grid mode off (return to on-grid usage).""" self._attr_is_on = False self.power_wall.set_island_mode(IslandMode.ONGRID) - self.schedule_update_ha_state() This needs to be run in the executor async def async_turn_on(self, **kwargs: Any) -> None: """Turn off-grid mode on.""" self._attr_is_on = True self.power_wall.set_island_mode(IslandMode.OFFGRID) + self.async_schedule_update_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn off-grid mode off (return to on-grid usage).""" self._attr_is_on = False self.power_wall.set_island_mode(IslandMode.ONGRID) + self.async_schedule_update_ha_state()
codereview_new_python_data_9754
async def async_turn_on(self, **kwargs: Any) -> None: """Turn off-grid mode on.""" self._attr_is_on = True self.power_wall.set_island_mode(IslandMode.OFFGRID) - self.schedule_update_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn off-grid mode off (return to on-grid usage).""" self._attr_is_on = False self.power_wall.set_island_mode(IslandMode.ONGRID) - self.schedule_update_ha_state() This should use the async api instead async def async_turn_on(self, **kwargs: Any) -> None: """Turn off-grid mode on.""" self._attr_is_on = True self.power_wall.set_island_mode(IslandMode.OFFGRID) + self.async_schedule_update_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn off-grid mode off (return to on-grid usage).""" self._attr_is_on = False self.power_wall.set_island_mode(IslandMode.ONGRID) + self.async_schedule_update_ha_state()
codereview_new_python_data_9755
async def async_turn_on(self, **kwargs: Any) -> None: """Turn off-grid mode on.""" self._attr_is_on = True self.power_wall.set_island_mode(IslandMode.OFFGRID) - self.schedule_update_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn off-grid mode off (return to on-grid usage).""" self._attr_is_on = False self.power_wall.set_island_mode(IslandMode.ONGRID) - self.schedule_update_ha_state() This should use the async api instead async def async_turn_on(self, **kwargs: Any) -> None: """Turn off-grid mode on.""" self._attr_is_on = True self.power_wall.set_island_mode(IslandMode.OFFGRID) + self.async_schedule_update_ha_state() async def async_turn_off(self, **kwargs: Any) -> None: """Turn off-grid mode off (return to on-grid usage).""" self._attr_is_on = False self.power_wall.set_island_mode(IslandMode.ONGRID) + self.async_schedule_update_ha_state()
codereview_new_python_data_9756
async def async_setup_entry( class PowerwallOffGridEnabledEntity(PowerWallEntity, SwitchEntity): """Representation of a Switch entity for Powerwall Off-grid operation.""" - _attr_name = "Off-Grid Operation" _attr_entity_category = EntityCategory.CONFIG _attr_device_class = SwitchDeviceClass.SWITCH Operation should be lowercase and we can set _attr_has_entity_name async def async_setup_entry( class PowerwallOffGridEnabledEntity(PowerWallEntity, SwitchEntity): """Representation of a Switch entity for Powerwall Off-grid operation.""" + _attr_name = "Off-Grid operation" + _attr_has_entity_name = True _attr_entity_category = EntityCategory.CONFIG _attr_device_class = SwitchDeviceClass.SWITCH
codereview_new_python_data_9757
def __init__( async def async_added_to_hass(self) -> None: """Connect aircon to the cloud.""" - await self._aircon.register_attr_callback(self.async_write_ha_state) await self._aircon.connect() async def async_will_remove_from_hass(self) -> None: According to the library, this method on the appliance is not a coroutine function. https://github.com/abmantis/whirlpool-sixth-sense/blob/ae12505942b0b43cc3c9d4bb97e44ff0356e8bf1/whirlpool/appliance.py#L37 It looks like it's the test that mocks the library incorrectly. https://github.com/home-assistant/core/blob/255611238bddd83b0f6949f31cdfd8ee359b4b6c/tests/components/whirlpool/conftest.py#L66 That should just be a `MagicMock` instead of an `AsyncMock`. def __init__( async def async_added_to_hass(self) -> None: """Connect aircon to the cloud.""" + self._aircon.register_attr_callback(self.async_write_ha_state) await self._aircon.connect() async def async_will_remove_from_hass(self) -> None:
codereview_new_python_data_9758
async def guard_func(*args, **kwargs): @pytest.fixture(name="caplog") def caplog_fixture(caplog): - """Test that capture logs are likely needing debug level.""" caplog.set_level(logging.DEBUG) yield caplog If the idea is to keep this fixture, we should improve the docstring, maybe: ```suggestion """Set log level to debug for tests using the caplog fixture.""" ``` async def guard_func(*args, **kwargs): @pytest.fixture(name="caplog") def caplog_fixture(caplog): + """Set log level to debug for tests using the caplog fixture.""" caplog.set_level(logging.DEBUG) yield caplog
codereview_new_python_data_9759
async def async_update(self) -> None: ): try: await self._data.client.login() - self._attr_available = False except AIOSomecomfort.device.SomeComfortError: self.hass.async_create_task( self.hass.config_entries.async_reload(self._data.entry_id) ) Will reloading set _attr_available back to True? async def async_update(self) -> None: ): try: await self._data.client.login() except AIOSomecomfort.device.SomeComfortError: + self._attr_available = False self.hass.async_create_task( self.hass.config_entries.async_reload(self._data.entry_id) )
codereview_new_python_data_9760
async def async_step_import( self, import_data: dict[str, str] | None = None ) -> FlowResult: """Set up by import from async_setup.""" - if self._async_current_entries(): - return self.async_abort(reason="single_instance_allowed") - return self.async_create_entry(title="Thread", data={}) Can we check this in `async_setup` to avoid instantiating all this just to abort? async def async_step_import( self, import_data: dict[str, str] | None = None ) -> FlowResult: """Set up by import from async_setup.""" return self.async_create_entry(title="Thread", data={})
codereview_new_python_data_9761
async def test_create_entry(hass: HomeAssistant): """Test an entry is created by async_setup.""" assert len(hass.config_entries.async_entries(thread.DOMAIN)) == 0 - hass.async_create_task(async_setup_component(hass, thread.DOMAIN, {})) await hass.async_block_till_done() assert len(hass.config_entries.async_entries(thread.DOMAIN)) == 1 Do we need to create a task instead of just awaiting the call? async def test_create_entry(hass: HomeAssistant): """Test an entry is created by async_setup.""" assert len(hass.config_entries.async_entries(thread.DOMAIN)) == 0 + assert await async_setup_component(hass, thread.DOMAIN, {}) await hass.async_block_till_done() assert len(hass.config_entries.async_entries(thread.DOMAIN)) == 1
codereview_new_python_data_9762
class NetatmoSensorEntityDescription(SensorEntityDescription, NetatmoRequiredKey entity_registry_enabled_default=True, native_unit_of_measurement=UnitOfPressure.HPA, state_class=SensorStateClass.MEASUREMENT, - device_class=SensorDeviceClass.PRESSURE, ), NetatmoSensorEntityDescription( key="pressure_trend", This is a breaking change. Is that needed? As in, if you like prefer hPa, you can change the unit for your entity to your liking. class NetatmoSensorEntityDescription(SensorEntityDescription, NetatmoRequiredKey entity_registry_enabled_default=True, native_unit_of_measurement=UnitOfPressure.HPA, state_class=SensorStateClass.MEASUREMENT, + device_class=SensorDeviceClass.ATMOSPHERIC_PRESSURE, ), NetatmoSensorEntityDescription( key="pressure_trend",
codereview_new_python_data_9763
class NetatmoSensorEntityDescription(SensorEntityDescription, NetatmoRequiredKey name="Pressure", netatmo_name="pressure", entity_registry_enabled_default=True, - native_unit_of_measurement=UnitOfPressure.HPA, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.ATMOSPHERIC_PRESSURE, ), ```suggestion native_unit_of_measurement=UnitOfPressure.MBAR, ``` class NetatmoSensorEntityDescription(SensorEntityDescription, NetatmoRequiredKey name="Pressure", netatmo_name="pressure", entity_registry_enabled_default=True, + native_unit_of_measurement=UnitOfPressure.MBAR, state_class=SensorStateClass.MEASUREMENT, device_class=SensorDeviceClass.ATMOSPHERIC_PRESSURE, ),
codereview_new_python_data_9764
async def test_connection_error(hass: HomeAssistant, client: MagicMock) -> None: async def test_auth_error(hass: HomeAssistant, client: MagicMock) -> None: """Test that an error message is shown on login fail.""" client.login.side_effect = AIOSomecomfort.AuthError - with patch("AIOSomecomfort.AIOSomeComfort", return_value=client,), patch( "homeassistant.components.honeywell.async_setup_entry", return_value=True, ): We normally only need to patch the integration setup function on create entry results or other results that may lead to setting up the config entry again. It's not a bug to do it anyway, but it won't have any effect so we can remove it. async def test_connection_error(hass: HomeAssistant, client: MagicMock) -> None: async def test_auth_error(hass: HomeAssistant, client: MagicMock) -> None: """Test that an error message is shown on login fail.""" client.login.side_effect = AIOSomecomfort.AuthError + with patch( "homeassistant.components.honeywell.async_setup_entry", return_value=True, ):
codereview_new_python_data_9765
def __init__( self, data: HoneywellData, device: AIOSomecomfort.device.Device, - cool_away_temp: str | None, - heat_away_temp: str | None, ) -> None: """Initialize the thermostat.""" self._data = data Aren't these integers? They are validated as such here: https://github.com/home-assistant/core/blob/914704e45974824acaa5b26433a013c5b779c759/homeassistant/components/honeywell/config_flow.py#L91-L109 def __init__( self, data: HoneywellData, device: AIOSomecomfort.device.Device, + cool_away_temp: int | None, + heat_away_temp: int | None, ) -> None: """Initialize the thermostat.""" self._data = data
codereview_new_python_data_9766
async def async_set_dashboard_info( dashboard = ESPHomeDashboard(hass, addon_slug, url, async_get_clientsession(hass)) try: await dashboard.async_request_refresh() - except UpdateFailed: return hass.data[KEY_DASHBOARD] = dashboard Isn't it odd to ignore this? As in, it will be hard to debug if it doesn't work, right? async def async_set_dashboard_info( dashboard = ESPHomeDashboard(hass, addon_slug, url, async_get_clientsession(hass)) try: await dashboard.async_request_refresh() + except UpdateFailed as err: + logging.getLogger(__name__).error("Ignoring dashboard info: %s", err) return hass.data[KEY_DASHBOARD] = dashboard
codereview_new_python_data_9767
async def async_step_reauth_confirm( return self.async_abort(reason="error_pairing") except WEBOSTV_EXCEPTIONS: return self.async_abort(reason="reauth_unsuccessful") - else: - update_client_key(self.hass, self._entry, client) - await self.hass.config_entries.async_reload(self._entry.entry_id) - return self.async_abort(reason="reauth_successful") return self.async_show_form(step_id="reauth_confirm") We can remove this `else:` and outdent below. async def async_step_reauth_confirm( return self.async_abort(reason="error_pairing") except WEBOSTV_EXCEPTIONS: return self.async_abort(reason="reauth_unsuccessful") + + update_client_key(self.hass, self._entry, client) + await self.hass.config_entries.async_reload(self._entry.entry_id) + return self.async_abort(reason="reauth_successful") return self.async_show_form(step_id="reauth_confirm")
codereview_new_python_data_9768
def _datetime_or_none(value: str) -> datetime | None: def build_mysqldb_conv() -> dict: """Build a MySQLDB conv dict that uses cisco8601 to parse datetimes.""" # Late imports since we only call this if they are using mysqldb - # pylint: disable-next=import-outside-toplevel,import-error from MySQLdb.constants import FIELD_TYPE - - # pylint: disable-next=import-outside-toplevel,import-error from MySQLdb.converters import conversions return {**conversions, FIELD_TYPE.DATETIME: _datetime_or_none} I would combine these into one `pylint: disable` for the whole block. def _datetime_or_none(value: str) -> datetime | None: def build_mysqldb_conv() -> dict: """Build a MySQLDB conv dict that uses cisco8601 to parse datetimes.""" # Late imports since we only call this if they are using mysqldb + # pylint: disable=import-outside-toplevel,import-error from MySQLdb.constants import FIELD_TYPE from MySQLdb.converters import conversions return {**conversions, FIELD_TYPE.DATETIME: _datetime_or_none}
codereview_new_python_data_9769
async def test_data_manager_webhook_subscription( WebhookConfig(id="1234", url="http://localhost/api/webhook/1234", enabled=True), ) - # pylint: disable-next=protected-access data_manager._notify_subscribe_delay = datetime.timedelta(seconds=0) - # pylint: disable-next=protected-access data_manager._notify_unsubscribe_delay = datetime.timedelta(seconds=0) api.notify_list.return_value = NotifyListResponse( profiles=( Another case where I'm not sure it's really necessary to use `disable-next` for `protected-access`. async def test_data_manager_webhook_subscription( WebhookConfig(id="1234", url="http://localhost/api/webhook/1234", enabled=True), ) + # pylint: disable=protected-access data_manager._notify_subscribe_delay = datetime.timedelta(seconds=0) data_manager._notify_unsubscribe_delay = datetime.timedelta(seconds=0) + # pylint: enable=protected-access api.notify_list.return_value = NotifyListResponse( profiles=(
codereview_new_python_data_9770
def _create_device_registry( elif not name and device_type_instances: # use the productName if no node label is present name = basic_info.productName node_device_id = get_device_id( server_info, node_device, ```suggestion node_device_id = get_device_id( ``` def _create_device_registry( elif not name and device_type_instances: # use the productName if no node label is present name = basic_info.productName + node_device_id = get_device_id( server_info, node_device,
codereview_new_python_data_9771
LOGGER = logging.getLogger(__package__) # prefixes to identify device identifier id types -ID_TYPE_DEVICE_ID = "device_id" ID_TYPE_SERIAL = "serial" To make it easier to match the type id and not have an underscore before the underscore that separates the type id from the rest of the identifier. ```suggestion ID_TYPE_DEVICE_ID = "deviceid" ``` LOGGER = logging.getLogger(__package__) # prefixes to identify device identifier id types +ID_TYPE_DEVICE_ID = "deviceid" ID_TYPE_SERIAL = "serial"
codereview_new_python_data_9772
async def websocket_info( connection.send_error(msg["id"], "get_dataset_failed", str(exc)) return connection.send_result( msg["id"], { "url": data.url, - "active_dataset_tlvs": dataset.hex(), }, ) Don't duplicate `async_get_active_dataset_tlvs`, call it instead. async def websocket_info( connection.send_error(msg["id"], "get_dataset_failed", str(exc)) return + if dataset: + dataset = dataset.hex() + connection.send_result( msg["id"], { "url": data.url, + "active_dataset_tlvs": dataset, }, )
codereview_new_python_data_9773
class AccuWeatherSensorDescription( key="Precipitation", device_class=SensorDeviceClass.PRECIPITATION, name="Precipitation", - state_class=SensorStateClass.TOTAL_INCREASING, metric_unit=UnitOfPrecipitationDepth.MILLIMETERS, us_customary_unit=UnitOfPrecipitationDepth.INCHES, value_fn=lambda data, unit: cast(float, data[unit][ATTR_VALUE]), Is this "past xx h precipitation"? Or "precipitation since xx:xx"? If it is the first, then I think it should be `SensorStateClass.TOTAL` not increasing. class AccuWeatherSensorDescription( key="Precipitation", device_class=SensorDeviceClass.PRECIPITATION, name="Precipitation", + state_class=SensorStateClass.TOTAL, metric_unit=UnitOfPrecipitationDepth.MILLIMETERS, us_customary_unit=UnitOfPrecipitationDepth.INCHES, value_fn=lambda data, unit: cast(float, data[unit][ATTR_VALUE]),
codereview_new_python_data_9774
class HomeWizardSensorEntityDescription( key="active_tariff", name="Active tariff", icon="mdi:calendar-clock", - value_fn=lambda data: str(data.active_tariff) - if data.active_tariff is not None - else None, device_class=SensorDeviceClass.ENUM, options=["1", "2", "3", "4"], ), Multi-line lambdas should be avoided. Maybe you can create a `_ensure_string` method that `return None if value is None else str(value)` ```suggestion value_fn=lambda data: _ensure_string(data.active_tariff), ``` class HomeWizardSensorEntityDescription( key="active_tariff", name="Active tariff", icon="mdi:calendar-clock", + value_fn=lambda data: ( + None if data.active_tariff is None else str(data.active_tariff) + ), device_class=SensorDeviceClass.ENUM, options=["1", "2", "3", "4"], ),
codereview_new_python_data_9775
def precision(self) -> int | None: return self._sensor_option_precision # Second priority, native precision - if self.native_precision is None: return None device_class = self.device_class native_unit_of_measurement = self.native_unit_of_measurement unit_of_measurement = self.unit_of_measurement - precision = self.native_precision if ( native_unit_of_measurement != unit_of_measurement No need to call the property method twice. As it is an integration implementation detail, it may actually involve data processing that is not needed twice. ```suggestion if precision := self.native_precision is None: ``` def precision(self) -> int | None: return self._sensor_option_precision # Second priority, native precision + if (precision := self.native_precision) is None: return None device_class = self.device_class native_unit_of_measurement = self.native_unit_of_measurement unit_of_measurement = self.unit_of_measurement if ( native_unit_of_measurement != unit_of_measurement
codereview_new_python_data_9776
def __init__( # create unique_id based on "Operational Instance Name" and endpoint/device type fab_id_hex = f"{server_info.compressed_fabric_id:016X}" node_id_hex = f"{node.node_id:016X}" - operational_instance_name = f"{fab_id_hex.upper()}-{node_id_hex.upper()}" # NOTE: use 'operational instance name' property of the node instance later self._attr_unique_id = ( f"{operational_instance_name}-" Upper should be guaranteed by the capital `X` in the formatting string. So this can be: ```suggestion fab_id_hex = f"{server_info.compressed_fabric_id:016X}" node_id_hex = f"{node.node_id:016X}" operational_instance_name = f"{fab_id_hex}-{node_id_hex}" ``` def __init__( # create unique_id based on "Operational Instance Name" and endpoint/device type fab_id_hex = f"{server_info.compressed_fabric_id:016X}" node_id_hex = f"{node.node_id:016X}" + operational_instance_name = f"{fab_id_hex}-{node_id_hex}" # NOTE: use 'operational instance name' property of the node instance later self._attr_unique_id = ( f"{operational_instance_name}-"
codereview_new_python_data_9777
def get_operational_instance_id( node: MatterNode, ) -> str: """Return `Operational Instance Name` for given MatterNode.""" - fab_id_hex = f"{server_info.compressed_fabric_id:016X}" node_id_hex = f"{node.node_id:016X}" # operational instance id matches the mdns advertisement for the node # this is the recommended ID to recognize a unique matter node (within a fabric) - return f"{fab_id_hex}-{node_id_hex}" def get_device_id( ```suggestion fabric_id_hex = f"{server_info.compressed_fabric_id:016X}" ``` def get_operational_instance_id( node: MatterNode, ) -> str: """Return `Operational Instance Name` for given MatterNode.""" + fabric_id_hex = f"{server_info.compressed_fabric_id:016X}" node_id_hex = f"{node.node_id:016X}" # operational instance id matches the mdns advertisement for the node # this is the recommended ID to recognize a unique matter node (within a fabric) + return f"{fabric_id_hex}-{node_id_hex}" def get_device_id(
codereview_new_python_data_9778
async def test_camera_with_stream_profile(hass, setup_config_entry): ) -@patch("axis.vapix.vapix.Params.image_format", return_value=None) -async def test_camera_disabled(hass, setup_config_entry): """Test that Axis camera platform is loaded properly but does not create camera entity.""" - assert len(hass.states.async_entity_ids(CAMERA_DOMAIN)) == 0 I recommend not mixing pytest fixtures with patch decorators. It's easy to make a mistake. How do we know the order of patch application vs fixture injection here? Does it matter? async def test_camera_with_stream_profile(hass, setup_config_entry): ) +async def test_camera_disabled(hass, prepare_config_entry): """Test that Axis camera platform is loaded properly but does not create camera entity.""" + with patch("axis.vapix.vapix.Params.image_format", new=None): + await prepare_config_entry() + assert len(hass.states.async_entity_ids(CAMERA_DOMAIN)) == 0
codereview_new_python_data_9779
async def test_camera_disabled(hass, prepare_config_entry): """Test that Axis camera platform is loaded properly but does not create camera entity.""" with patch("axis.vapix.vapix.Params.image_format", new=None): await prepare_config_entry() - assert len(hass.states.async_entity_ids(CAMERA_DOMAIN)) == 0 ```suggestion assert len(hass.states.async_entity_ids(CAMERA_DOMAIN)) == 0 ``` async def test_camera_disabled(hass, prepare_config_entry): """Test that Axis camera platform is loaded properly but does not create camera entity.""" with patch("axis.vapix.vapix.Params.image_format", new=None): await prepare_config_entry() + + assert len(hass.states.async_entity_ids(CAMERA_DOMAIN)) == 0
codereview_new_python_data_9780
async def async_step_discovery_confirm( errors: dict[str, str] | None = None if user_input is not None: try: - await self._async_try_connect(str(self.discovery.ip)) except RecoverableError as ex: _LOGGER.error(ex) errors = {"base": ex.error_code} ```suggestion await self._async_try_connect(self.discovery.ip) ``` async def async_step_discovery_confirm( errors: dict[str, str] | None = None if user_input is not None: try: + await self._async_try_connect(self.discovery.ip) except RecoverableError as ex: _LOGGER.error(ex) errors = {"base": ex.error_code}
codereview_new_python_data_9781
from .const import CONTENT_AUTH_EXPIRY_TIME, MediaClass, MediaType # Paths that we don't need to sign -PATHS_WITHOUT_AUTH = ( - "/api/google_assistant_sdk/audio/", - "/api/tts_proxy/", -) @callback While there is not a super high security bar here, uuid1 paths seem below the bar as my impression is those are on the easier side to predict. Is there a technical motivation for opting out of signed paths? from .const import CONTENT_AUTH_EXPIRY_TIME, MediaClass, MediaType # Paths that we don't need to sign +PATHS_WITHOUT_AUTH = ("/api/tts_proxy/",) @callback
codereview_new_python_data_9782
async def async_step_user( async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult: """Handle configuration by re-auth.""" - self._reauth_entry = await self.async_set_unique_id(entry_data[CONF_UNIQUE_ID]) return await self.async_step_user() ```suggestion self._reauth_entry = self.hass.config_entries.async_get_entry( self.context["entry_id"] ) ``` async def async_step_user( async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult: """Handle configuration by re-auth.""" + self._reauth_entry = self.hass.config_entries.async_get_entry( + self.context["entry_id"] + ) return await self.async_step_user()
codereview_new_python_data_9783
async def async_will_remove_from_hass(self) -> None: @async_log_errors async def async_update(self) -> None: """Get the latest status information from device.""" await self._receiver.async_update() if self._update_audyssey: await self._receiver.async_update_audyssey() When the telnet connection is up and running, we don't need to update the entire receiver anymore, but only attributes like interpret, title etc. which are not covered by telnet. This would save some API calls. ```suggestion """Get the latest status information from device.""" if self._receiver.telnet_connected is True and self._receiver.telnet_healthy is True await self._receiver.input.async_update_media_state() return ``` async def async_will_remove_from_hass(self) -> None: @async_log_errors async def async_update(self) -> None: """Get the latest status information from device.""" + if ( + self._receiver.telnet_connected is True + and self._receiver.telnet_healthy is True + ): + await self._receiver.input.async_update_media_state() + return + await self._receiver.async_update() if self._update_audyssey: await self._receiver.async_update_audyssey()
codereview_new_python_data_9784
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) - async def disconnect(event: Event) -> None: """Disconnect from Telnet.""" - if ( - entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) - and receiver is not None - ): await receiver.async_telnet_disconnect() - if entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET): entry.async_on_unload( - hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, disconnect) ) return True ```suggestion await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) use_telnet = entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) ``` async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) + use_telnet = entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) + async def _async_disconnect(event: Event) -> None: """Disconnect from Telnet.""" + if use_telnet and receiver is not None: await receiver.async_telnet_disconnect() + if use_telnet: entry.async_on_unload( + hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_disconnect) ) return True
codereview_new_python_data_9785
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) - async def disconnect(event: Event) -> None: """Disconnect from Telnet.""" - if ( - entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) - and receiver is not None - ): await receiver.async_telnet_disconnect() - if entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET): entry.async_on_unload( - hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, disconnect) ) return True ```suggestion use_telnet ``` async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) + use_telnet = entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) + async def _async_disconnect(event: Event) -> None: """Disconnect from Telnet.""" + if use_telnet and receiver is not None: await receiver.async_telnet_disconnect() + if use_telnet: entry.async_on_unload( + hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_disconnect) ) return True
codereview_new_python_data_9786
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) - async def disconnect(event: Event) -> None: """Disconnect from Telnet.""" - if ( - entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) - and receiver is not None - ): await receiver.async_telnet_disconnect() - if entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET): entry.async_on_unload( - hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, disconnect) ) return True ```suggestion async def _async_disconnect(event: Event) -> None: ``` Since Home Assistant has both sync and async function in the codebase we prefix ones that are safe to run in the event loop and do not do any blocking I/O with `async_` async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) + use_telnet = entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) + async def _async_disconnect(event: Event) -> None: """Disconnect from Telnet.""" + if use_telnet and receiver is not None: await receiver.async_telnet_disconnect() + if use_telnet: entry.async_on_unload( + hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_disconnect) ) return True
codereview_new_python_data_9787
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) - async def disconnect(event: Event) -> None: """Disconnect from Telnet.""" - if ( - entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) - and receiver is not None - ): await receiver.async_telnet_disconnect() - if entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET): entry.async_on_unload( - hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, disconnect) ) return True ```suggestion hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_disconnect) ``` async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) + use_telnet = entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) + async def _async_disconnect(event: Event) -> None: """Disconnect from Telnet.""" + if use_telnet and receiver is not None: await receiver.async_telnet_disconnect() + if use_telnet: entry.async_on_unload( + hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_disconnect) ) return True
codereview_new_python_data_9788
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) - async def disconnect(event: Event) -> None: """Disconnect from Telnet.""" - if ( - entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) - and receiver is not None - ): await receiver.async_telnet_disconnect() - if entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET): entry.async_on_unload( - hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, disconnect) ) return True ```suggestion if use_telnet: ``` async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: } await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) + use_telnet = entry.options.get(CONF_USE_TELNET, DEFAULT_USE_TELNET) + async def _async_disconnect(event: Event) -> None: """Disconnect from Telnet.""" + if use_telnet and receiver is not None: await receiver.async_telnet_disconnect() + if use_telnet: entry.async_on_unload( + hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_disconnect) ) return True
codereview_new_python_data_9789
def __init__( self._component_key = component_key self._key = key self._state_type = state_type - if entry_data.device_info is not None: - self._attr_has_entity_name = entry_data.device_info.friendly_name not in ( - None, - "", - ) async def async_added_to_hass(self) -> None: """Register callbacks.""" ```suggestion if entry_data.device_info is not None and entry_data.device_info.friendly_name: self._attr_has_entity_name = entry_data.device_info.friendly_name ``` Is a truth check ok here? def __init__( self._component_key = component_key self._key = key self._state_type = state_type + if entry_data.device_info is not None and entry_data.device_info.friendly_name: + self._attr_has_entity_name = entry_data.device_info.friendly_name async def async_added_to_hass(self) -> None: """Register callbacks."""
codereview_new_python_data_9790
def __init__( self._key = key self._state_type = state_type if entry_data.device_info is not None and entry_data.device_info.friendly_name: - self._attr_has_entity_name = entry_data.device_info.friendly_name async def async_added_to_hass(self) -> None: """Register callbacks.""" `friendly_name` is typed to be a `str`. I'm not sure where the mypy error is coming from. def __init__( self._key = key self._state_type = state_type if entry_data.device_info is not None and entry_data.device_info.friendly_name: + self._attr_has_entity_name = True async def async_added_to_hass(self) -> None: """Register callbacks."""
codereview_new_python_data_9791
def __init__( self._key = key self._state_type = state_type if entry_data.device_info is not None and entry_data.device_info.friendly_name: - self._attr_has_entity_name = entry_data.device_info.friendly_name async def async_added_to_hass(self) -> None: """Register callbacks.""" ```suggestion self._attr_has_entity_name = True ``` def __init__( self._key = key self._state_type = state_type if entry_data.device_info is not None and entry_data.device_info.friendly_name: + self._attr_has_entity_name = True async def async_added_to_hass(self) -> None: """Register callbacks."""
codereview_new_python_data_9792
def icon(self) -> str: class ISYEnableSwitchEntity(ISYAuxControlEntity, SwitchEntity): - """A representation of an ISY program switch.""" def __init__( self, ```suggestion """A representation of an ISY enable/disable switch.""" ``` def icon(self) -> str: class ISYEnableSwitchEntity(ISYAuxControlEntity, SwitchEntity): + """A representation of an ISY enable/disable switch.""" def __init__( self,
codereview_new_python_data_9793
async def async_modbus_setup( hubs = hass.data[DOMAIN] for name in hubs: if not await hubs[name].async_setup(): - return False # pragma: no cover hass.data[DOMAIN] = hub_collect = {} for conf_hub in config[DOMAIN]: I don't think we accept `no cover` You should add a test instead, or leave uncovered. async def async_modbus_setup( hubs = hass.data[DOMAIN] for name in hubs: if not await hubs[name].async_setup(): + return False hass.data[DOMAIN] = hub_collect = {} for conf_hub in config[DOMAIN]:
codereview_new_python_data_9794
def __init__( ) self.entity_description = description self._data_service = data_service - self._attr_available = False def update(self) -> None: """Get the status report from APCUPSd and set this entity's state.""" This makes the entity unavailable during startup, which seems incorrect? It should be available, but unknown? def __init__( ) self.entity_description = description self._data_service = data_service def update(self) -> None: """Get the status report from APCUPSd and set this entity's state."""
codereview_new_python_data_9967
def _RenderConfig(vm, # scale with the cluster. num_reduce_tasks = reduces_per_node * num_workers if _BLOCKSIZE_OVERRIDE.value: - block_size = _BLOCKSIZE_OVERRIDE * 1024 * 1024 if vm.scratch_disks: # TODO(pclay): support multiple scratch disks. A current suboptimal This is should just be `.value:` (no parentheses). def _RenderConfig(vm, # scale with the cluster. num_reduce_tasks = reduces_per_node * num_workers if _BLOCKSIZE_OVERRIDE.value: + block_size = _BLOCKSIZE_OVERRIDE.value * 1024 * 1024 if vm.scratch_disks: # TODO(pclay): support multiple scratch disks. A current suboptimal
codereview_new_python_data_9996
def _create_workspace(self, ws_2D=True, sample=True, xAx=True, yAxSpec=True, yAx if not yAxMt and not yAxSpec: ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE") LoadInstrument(ws, True, InstrumentName="TOFTOF") - ConvertSpectrumAxis(InputWorkspace=ws, OutputWorkspace="ws2", Target="theta", EMode="Direct") - ws2 = mtd["ws2"] - return ws2 if not yAxSpec and yAxMt: ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE") LoadInstrument(ws, True, InstrumentName="TOFTOF") ```suggestion output_ws = ConvertSpectrumAxis(InputWorkspace=ws, Target="theta", EMode="Direct") return output_ws ``` def _create_workspace(self, ws_2D=True, sample=True, xAx=True, yAxSpec=True, yAx if not yAxMt and not yAxSpec: ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE") LoadInstrument(ws, True, InstrumentName="TOFTOF") + ouput_ws = ConvertSpectrumAxis(InputWorkspace=ws, Target="theta", EMode="Direct") + return ouput_ws if not yAxSpec and yAxMt: ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE") LoadInstrument(ws, True, InstrumentName="TOFTOF")
codereview_new_python_data_9998
def PyInit(self): ) def PyExec(self): - ok2run = "" - try: - import matplotlib - except ImportError: - ok2run = "Problem importing matplotlib" - - if ok2run != "": - raise RuntimeError(ok2run) matplotlib.use("agg") import matplotlib.pyplot as plt I think we don't really need any of this `ok2run` stuff here. If it can't import `matplotlib` then a useful enough exception will be raised anyway. def PyInit(self): ) def PyExec(self): + import matplotlib + matplotlib.use("agg") import matplotlib.pyplot as plt
codereview_new_python_data_10000
def _urljoin(base, url): """ Join relative URLs to base URLs like urllib.parse.urljoin but support arbitrary URIs (esp. 'http+unix://'). """ parsed = urlparse(base) scheme = parsed.scheme return urlparse( - urljoin(parsed._replace(scheme='http').geturl(), url if parsed.path == '' else parsed.path + url) )._replace(scheme=scheme).geturl() I'm not sure i understand the end result impact here. I also don't understand why you do `parsed.path + url` instead of `url + parsed.path`. It would be preferred if you could provide a unittest in https://github.com/spotify/luigi/blob/master/test/rpc_test.py to show this behavior. def _urljoin(base, url): """ Join relative URLs to base URLs like urllib.parse.urljoin but support arbitrary URIs (esp. 'http+unix://'). + base part is fixed or mounted point, every url contains full base part. """ parsed = urlparse(base) scheme = parsed.scheme return urlparse( + urljoin(parsed._replace(scheme='http').geturl(), parsed.path + (url if url[0] == '/' else '/' + url)) )._replace(scheme=scheme).geturl()
codereview_new_python_data_10001
def custom_complete(complete_fn): """ def __init__(self, requirements, custom_complete=None): - super(DynamicRequirements, self).__init__() # store attributes self.requirements = requirements since Luigi only officially supports Python 3 now, this could be written `super().__init__()`. No need to change. Just pointing out. def custom_complete(complete_fn): """ def __init__(self, requirements, custom_complete=None): + super().__init__() # store attributes self.requirements = requirements
codereview_new_python_data_10067
def __init__(self, 'use `dist_backend` instead.') ioa_thrs = kwargs.pop('ioa_thrs', None) - if ioa_thrs is not None and 'iof_thrs' not in kwargs: - kwargs['iof_thrs'] = ioa_thrs warnings.warn( 'DeprecationWarning: The `ioa_thrs` parameter of ' '`OpenImagesMetric` is deprecated, use `iof_thrs` instead!') `iof_thrs` is an argument, not in **kwargs def __init__(self, 'use `dist_backend` instead.') ioa_thrs = kwargs.pop('ioa_thrs', None) + if ioa_thrs is not None: + iof_thrs = ioa_thrs warnings.warn( 'DeprecationWarning: The `ioa_thrs` parameter of ' '`OpenImagesMetric` is deprecated, use `iof_thrs` instead!')
codereview_new_python_data_10068
class DumpDetResults(DumpResults): segmentation masks into RLE format. Args: - keep_gt (bool): Whether dumped `gt_instances` simultaneously. It should be True if offline VOCMetric is used. Defaults to False. keep_gt_ignore (bool): Whether dumped `ignored_instances` simultaneously. It should be True if offline VOCMetric is used. ```suggestion keep_gt_ignore (bool): Whether to dump `ignored_instances` ``` class DumpDetResults(DumpResults): segmentation masks into RLE format. Args: + keep_gt (bool): Whether to dump `gt_instances` simultaneously. It should be True if offline VOCMetric is used. Defaults to False. keep_gt_ignore (bool): Whether dumped `ignored_instances` simultaneously. It should be True if offline VOCMetric is used.
codereview_new_python_data_10069
def parse_args(): help='Checkpoint file root path. If set, load checkpoint before test.') parser.add_argument('--img', default='demo/demo.jpg', help='Image file') parser.add_argument('--models', nargs='+', help='models name to inference') - - parser.add_argument( - '--inference-time', - action='store_true', - help='Test inference time by run 10 times for each model.') parser.add_argument( '--batch-size', type=int, Is this a redundant argument? def parse_args(): help='Checkpoint file root path. If set, load checkpoint before test.') parser.add_argument('--img', default='demo/demo.jpg', help='Image file') parser.add_argument('--models', nargs='+', help='models name to inference') parser.add_argument( '--batch-size', type=int,
codereview_new_python_data_10070
class DetDataPreprocessor(ImgDataPreprocessor): boxtype2tensor (bool): Whether to keep the ``BaseBoxes`` type of bboxes data or not. Defaults to True. non_blocking (bool): Whether block current process - when transferring data to device. New in version v0.3.0. - Defaults to False. batch_augments (list[dict], optional): Batch-level augmentations """ mmdet 3.0.0rc6 or mmengine v0.3.0? class DetDataPreprocessor(ImgDataPreprocessor): boxtype2tensor (bool): Whether to keep the ``BaseBoxes`` type of bboxes data or not. Defaults to True. non_blocking (bool): Whether block current process + when transferring data to device. Defaults to False. batch_augments (list[dict], optional): Batch-level augmentations """
codereview_new_python_data_10071
def __init__(self, def forward(self, x): x = self.conv1(x) - if x.dtype == torch.float16: - x = x.float() - x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1).half() - else: x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1) x = self.conv2(x) return x ```suggestion with torch.autocast(enabled=False): x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1) ``` def __init__(self, def forward(self, x): x = self.conv1(x) + with torch.autocast(enabled=False): x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1) x = self.conv2(x) return x
codereview_new_python_data_10072
# Copyright (c) OpenMMLab. All rights reserved. - -# ======================================== -# Modified by Shoufa Chen -# ======================================== -# Modified by Peize Sun, Rufeng Zhang -# Contact: {sunpeize, cxrfzhang}@foxmail.com -# -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - from mmdet.models import SingleStageDetector from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig This file seems to be created by us, thus, we only need to keep the copyright of open-mmlab. # Copyright (c) OpenMMLab. All rights reserved. from mmdet.models import SingleStageDetector from mmdet.registry import MODELS from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
codereview_new_python_data_10073
def init_weights(self) -> None: nn.init.xavier_normal_(p) # init_weights defined in MultiScaleDeformableAttention - for layer in self.encoder.layers: - attn = layer.self_attn - if isinstance(attn, MultiScaleDeformableAttention): - attn.init_weights() def forward(self, feats: List[Tensor]) -> Tuple[Tensor, Tensor]: """ I recommend using ``` for m in self.encoder.layers.modules(): if isinstance(attn, MultiScaleDeformableAttention): m.init_weights() ``` def init_weights(self) -> None: nn.init.xavier_normal_(p) # init_weights defined in MultiScaleDeformableAttention + for m in self.encoder.layers.modules(): + if isinstance(m, MultiScaleDeformableAttention): + m.init_weights() def forward(self, feats: List[Tensor]) -> Tuple[Tensor, Tensor]: """
codereview_new_python_data_10074
def calc_region(bbox: Tensor, Args: bbox (Tensor): Bboxes to calculate regions, shape (n, 4). ratio (float): Ratio of the output region. - featmap_size (tuple, Optional): Feature map size used for - clipping the boundary. Defaults to None. Returns: tuple: x1, y1, x2, y2 It would be better if we indicate it the featmap_size is in (height, width) order def calc_region(bbox: Tensor, Args: bbox (Tensor): Bboxes to calculate regions, shape (n, 4). ratio (float): Ratio of the output region. + featmap_size (tuple, Optional): Feature map size in (height, width) + order used for clipping the boundary. Defaults to None. Returns: tuple: x1, y1, x2, y2
codereview_new_python_data_10075
def _init_layers(self) -> None: # in DAB-DETR (prelu in transformer and relu in reg_branch) self.fc_reg = Linear(self.embed_dims, 4) - # Note function _load_from_state_dict is deleted without - # supporting refactor-DETR in mmdetection 2.x - def forward(self, hidden_states: Tensor) -> Tuple[Tensor]: """"Forward function. I can not remember. what does this mean. def _init_layers(self) -> None: # in DAB-DETR (prelu in transformer and relu in reg_branch) self.fc_reg = Linear(self.embed_dims, 4) def forward(self, hidden_states: Tensor) -> Tuple[Tensor]: """"Forward function.
codereview_new_python_data_10076
def main(): save_path = args.save_path suffix = os.path.splitext(save_path)[-1] - assert suffix in ['py', 'json', 'yml'] if not os.path.exists(os.path.split(save_path)[0]): os.makedirs(os.path.split(save_path)[0]) ```suggestion assert suffix in ['.py', '.json', '.yml'] ``` def main(): save_path = args.save_path suffix = os.path.splitext(save_path)[-1] + assert suffix in ['.py', '.json', '.yml'] if not os.path.exists(os.path.split(save_path)[0]): os.makedirs(os.path.split(save_path)[0])
codereview_new_python_data_10077
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None): model.cfg = config # save the config in the model for convenience model.to(device) model.eval() return model model.to(device);model.eval() are these two lines unnecessary? def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None): model.cfg = config # save the config in the model for convenience model.to(device) model.eval() + + if device == 'npu': + from mmcv.device.npu import NPUDataParallel + model = NPUDataParallel(model) + model.cfg = config + return model
codereview_new_python_data_10078
import mmcv import numpy as np import torch -from mmengine.registry import init_default_scope from ts.torch_handler.base_handler import BaseHandler from mmdet.apis import inference_detector, init_detector -init_default_scope('mmdet') - class MMdetHandler(BaseHandler): threshold = 0.5 this is a bit strange. need? import mmcv import numpy as np import torch from ts.torch_handler.base_handler import BaseHandler from mmdet.apis import inference_detector, init_detector class MMdetHandler(BaseHandler): threshold = 0.5
codereview_new_python_data_10079
def main(): dataset = DATASETS.build(cfg.test_dataloader.dataset) predictions = mmengine.load(args.pkl_results) - evaluator = Evaluator(cfg.test_evaluator) evaluator.dataset_meta = dataset.metainfo eval_results = evaluator.offline_evaluate(predictions) print(eval_results) The test set generally has no labels, and the offline evaluation test set is not reasonable, it should be val def main(): dataset = DATASETS.build(cfg.test_dataloader.dataset) predictions = mmengine.load(args.pkl_results) + evaluator = Evaluator(cfg.val_evaluator) evaluator.dataset_meta = dataset.metainfo eval_results = evaluator.offline_evaluate(predictions) print(eval_results)
codereview_new_python_data_10080
def transform(self, results: dict) -> dict: img_meta = {} for key in self.meta_keys: - assert key in results, f'`{key}` is not found in `results`' img_meta[key] = results[key] data_sample.set_metainfo(img_meta) Can also print the valid keys. ```suggestion assert key in results, f'`{key}` is not found in `results`, the valid keys are {list(results)}.' ``` def transform(self, results: dict) -> dict: img_meta = {} for key in self.meta_keys: + assert key in results, f'`{key}` is not found in `results`, the valid keys are {list(results)}.' img_meta[key] = results[key] data_sample.set_metainfo(img_meta)
codereview_new_python_data_10081
_base_ = [ - '../common/ms_3x_coco_instance.py', '../_base_/models/cascade-mask-rcnn_r50_fpn.py' ] ![image](https://user-images.githubusercontent.com/26483343/186651496-1cd7ae39-d5a7-4290-afba-5c478f697cfb.png) why `ms_3x_coco_instance.py`? Should be `ms-3x_coco-instance`? _base_ = [ + '../common/ms_3x_coco-instance.py', '../_base_/models/cascade-mask-rcnn_r50_fpn.py' ]
codereview_new_python_data_10082
def add_datasample( Args: name (str): The image identifier. image (np.ndarray): The image to draw. - data_sample (:obj:`DetDataSample`, optional):The - annotation and prediction data of every samples. Defaults to None. draw_gt (bool): Whether to draw GT DetDataSample. Default to True. draw_pred (bool): Whether to draw Prediction DetDataSample. ```suggestion A batch of data samples that contain annotations and predictions. ``` def add_datasample( Args: name (str): The image identifier. image (np.ndarray): The image to draw. + data_sample (:obj:`DetDataSample`, optional): A data + sample that contain annotations and predictions. Defaults to None. draw_gt (bool): Whether to draw GT DetDataSample. Default to True. draw_pred (bool): Whether to draw Prediction DetDataSample.
codereview_new_python_data_10083
def main(args): visualizer.add_datasample( 'result', img, - pred_sample=result, draw_gt=False, show=args.out_file is None, wait_time=0, `pred_sample=result` -> `data_sample=result` def main(args): visualizer.add_datasample( 'result', img, + data_sample=result, draw_gt=False, show=args.out_file is None, wait_time=0,
codereview_new_python_data_10084
class DetDataPreprocessor(ImgDataPreprocessor): rgb_to_bgr (bool): whether to convert image from RGB to RGB. Defaults to False. with_box_wrapped (bool): Whether to keep the ``BaseBoxes`` wrapper of - bboxes data. batch_augments (list[dict], optional): Batch-level augmentations """ ```suggestion bboxes data. Defaults to False. ``` class DetDataPreprocessor(ImgDataPreprocessor): rgb_to_bgr (bool): whether to convert image from RGB to RGB. Defaults to False. with_box_wrapped (bool): Whether to keep the ``BaseBoxes`` wrapper of + bboxes data. Defaults to False. batch_augments (list[dict], optional): Batch-level augmentations """
codereview_new_python_data_10085
def loss_by_feat(self, mlvl_kernel_preds: List[Tensor], num_pos = 0 for img_pos_masks in pos_masks: for lvl_img_pos_masks in img_pos_masks: num_pos += lvl_img_pos_masks.sum() loss_mask = [] for lvl_mask_preds, lvl_mask_targets in zip(mlvl_mask_preds, Are these two operations equivalent and is it safe? def loss_by_feat(self, mlvl_kernel_preds: List[Tensor], num_pos = 0 for img_pos_masks in pos_masks: for lvl_img_pos_masks in img_pos_masks: + # Fix `Tensor` object has no attribute `count_nonzero()` + # in PyTorch 1.6, the type of `lvl_img_pos_masks` + # should be `torch.bool`. num_pos += lvl_img_pos_masks.sum() loss_mask = [] for lvl_mask_preds, lvl_mask_targets in zip(mlvl_mask_preds,
codereview_new_python_data_10086
def get_ignores(self, dt_boxes, gt_boxes): class Image(object): - """Data structure for evaluation of CrowdHuman. Please refer to - https://github.com/Purkialo/CrowdDet for more details. Args: mode (int): Select the mode of evaluate. Valid mode include The reference here is not enough. Seems that the code is modified from https://github.com/Purkialo/CrowdDet/blob/master/lib/evaluate/APMRToolkits/image.py. If so, we should add a note here: ``` """Data structure for evaluation of CrowdHuman. Note: This implementation is modified from https://github.com/Purkialo/CrowdDet/blob/master/lib/evaluate/APMRToolkits/image.py Args: def get_ignores(self, dt_boxes, gt_boxes): class Image(object): + """Data structure for evaluation of CrowdHuman. + + Note: + This implementation is modified from https://github.com/Purkialo/ + CrowdDet/blob/master/lib/evaluate/APMRToolkits/image.py Args: mode (int): Select the mode of evaluate. Valid mode include
codereview_new_python_data_10087
def transform(self, results: dict) -> Union[dict, None]: tests.append((w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1])) if self.by_mask: gt_masks = results['gt_masks'] tests.append(gt_masks.areas >= self.min_gt_mask_area) if we delete L671-678, we may keep asserting logic in self.by_mask ? def transform(self, results: dict) -> Union[dict, None]: tests.append((w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1])) if self.by_mask: + assert 'gt_masks' in results gt_masks = results['gt_masks'] tests.append(gt_masks.areas >= self.min_gt_mask_area)
codereview_new_python_data_10116
def _read_next_step(self) -> EDRStep: StopIteration When end of auxiliary data set is reached. """ - SINGLE_FRAME = True if self.n_steps == 1 else False auxstep = self.auxstep new_step = self.step + 1 if new_step < self.n_steps: ```suggestion SINGLE_FRAME = self.n_steps == 1 ``` def _read_next_step(self) -> EDRStep: StopIteration When end of auxiliary data set is reached. """ + SINGLE_FRAME = self.n_steps == 1 auxstep = self.auxstep new_step = self.step + 1 if new_step < self.n_steps:
codereview_new_python_data_10117
def rmsd(a, b, weights=None, center=False, superposition=False): >>> u = mda.Universe(PSF, DCD) >>> bb = u.select_atoms('backbone') >>> A = bb.positions.copy() # coordinates of first frame - >>> last_frame = u.trajectory[-1] # forward to last frame >>> B = bb.positions.copy() # coordinates of last frame >>> rmsd(A, B, center=True) 6.838544558398293 Don't assign to `last_frame` just go forwards in place but other than that looks great! def rmsd(a, b, weights=None, center=False, superposition=False): >>> u = mda.Universe(PSF, DCD) >>> bb = u.select_atoms('backbone') >>> A = bb.positions.copy() # coordinates of first frame + >>> _ = u.trajectory[-1] # forward to last frame >>> B = bb.positions.copy() # coordinates of last frame >>> rmsd(A, B, center=True) 6.838544558398293
codereview_new_python_data_10118
def rmsd(a, b, weights=None, center=False, superposition=False): >>> rmsd(A, B, center=True) 6.838544558398293 .. versionchanged: 0.8.1 *center* keyword added .. versionchanged: 0.14.0 ```suggestion .. versionchanged: 0.8.1 ``` This is a slightly separate issue, but it looks like the versionchanged entries are being ignored because doctest need a blank line and so do versionchanged entries. I'll self merge this fix and see if it fixes the problem. def rmsd(a, b, weights=None, center=False, superposition=False): >>> rmsd(A, B, center=True) 6.838544558398293 + .. versionchanged: 0.8.1 *center* keyword added .. versionchanged: 0.14.0
codereview_new_python_data_10119
class TestLammpsDataPairIJ(LammpsBase): """Tests the reading of lammps .data topology file with a PairIJ Coeffs section """ - expected_attrs = ['types', 'resids', 'masses', 'bonds', 'angles', 'dihedrals', 'impropers'] ref_filename = LAMMPSdata_PairIJ ```suggestion ``` No worries about the darker linting, as long as it's not breaching PEP8 it ends up just `black` trying to enforce its style. That being said the above "whitespace on an empty line" is a PEP8 violation (I suspect the suggested edit here won't work well, essentially the empty line is fine, but it can't have any whitespaces on it) Sorry about this, as @orbeckst explains re: keeping maintenance costs down, we end up requiring a lot of weird formatting rules that do tend to be rather unfriendly to new contributors :( class TestLammpsDataPairIJ(LammpsBase): """Tests the reading of lammps .data topology file with a PairIJ Coeffs section """ + expected_attrs = ['types', 'resids', 'masses', 'bonds', 'angles', 'dihedrals', 'impropers'] ref_filename = LAMMPSdata_PairIJ
codereview_new_python_data_10120
def sequence_alignment(mobile, reference, match_score=2, mismatch_penalty=-1, AlignmentTuple = collections.namedtuple( "Alignment", ["seqA", "seqB", "score", "start", "end"]) - # extract sequences (there's no obvious way to get the character - # representation with gaps by other means from the new - # Bio.Align.PairwiseAlignment instance) - seqA, _, seqB, _ = topalignment.format().split("\n") - # start/stop are not particularly meaningful and there's no obvious way to # get the old pairwise2 start/stop from the new PairwiseAligner output. - return AlignmentTuple(seqA, seqB, topalignment.score, 0, max(reference.n_residues, mobile.n_residues)) def fasta2select(fastafilename, is_aligned=False, This only worked until Bio 1.79. Then they changed the format. I knew that parsing str was iffy but luckily our tests showed me right away _how_ iffy that was. I am now trying to find another way to get our old output. def sequence_alignment(mobile, reference, match_score=2, mismatch_penalty=-1, AlignmentTuple = collections.namedtuple( "Alignment", ["seqA", "seqB", "score", "start", "end"]) # start/stop are not particularly meaningful and there's no obvious way to # get the old pairwise2 start/stop from the new PairwiseAligner output. + return AlignmentTuple(topalignment[0], topalignment[1], + topalignment.score, 0, max(reference.n_residues, mobile.n_residues)) def fasta2select(fastafilename, is_aligned=False,
codereview_new_python_data_10121
def _read_frame(self, i): timestep = self._read_next_timestep() return timestep - def _read_next_timestep(self, ts=None): - # NOTE: TRR implements its own version - """copy next frame into timestep""" - if self._frame == self.n_frames - 1: - raise IOError(errno.EIO, 'trying to go over trajectory limit') - if ts is None: - ts = self.ts - if ts.has_positions: - frame = self._xdr.read_direct(ts.positions) - else: - frame = self._xdr.read() - self._frame += 1 - self._frame_to_ts(frame, ts) - return ts - def Writer(self, filename, n_atoms=None, **kwargs): """Return writer for trajectory format""" if n_atoms is None: ditch and implement individually in XTC and TRR def _read_frame(self, i): timestep = self._read_next_timestep() return timestep def Writer(self, filename, n_atoms=None, **kwargs): """Return writer for trajectory format""" if n_atoms is None:
codereview_new_python_data_10122
def _read_next_timestep(self, ts=None): if self._frame == self.n_frames - 1: raise IOError('trying to go over trajectory limit') if ts is None: - # use a copy to avoid that ts always points to the same reference - # removing this breaks lammps reader - ts = self.ts # why is this copy required ?? frame = self._file.read() self._frame += 1 self._frame_to_ts(frame, ts) Do the other readers not set self.ts? def _read_next_timestep(self, ts=None): if self._frame == self.n_frames - 1: raise IOError('trying to go over trajectory limit') if ts is None: + ts = self.ts frame = self._file.read() self._frame += 1 self._frame_to_ts(frame, ts)
codereview_new_python_data_10123
def _read_next_timestep(self, ts=None): if self._frame == self.n_frames - 1: raise IOError('trying to go over trajectory limit') if ts is None: - # use a copy to avoid that ts always points to the same reference - # removing this breaks lammps reader - ts = self.ts # why is this copy required ?? frame = self._file.read() self._frame += 1 self._frame_to_ts(frame, ts) Can this comment be removed? def _read_next_timestep(self, ts=None): if self._frame == self.n_frames - 1: raise IOError('trying to go over trajectory limit') if ts is None: + ts = self.ts frame = self._file.read() self._frame += 1 self._frame_to_ts(frame, ts)