id
stringlengths 30
32
| content
stringlengths 139
2.8k
|
|---|---|
codereview_new_python_data_12875
|
def log_error(msg: str) -> None:
*(
asyncio.create_task(
entry.async_setup(hass, integration=integration),
- name=f"setup entry {entry.title} {entry.domain} {entry.entry_id}",
)
for entry in hass.config_entries.async_entries(domain)
)
```suggestion
name=f"config entry setup {entry.title} {entry.domain} {entry.entry_id}",
```
def log_error(msg: str) -> None:
*(
asyncio.create_task(
entry.async_setup(hass, integration=integration),
+ name=f"config entry setup {entry.title} {entry.domain} {entry.entry_id}",
)
for entry in hass.config_entries.async_entries(domain)
)
|
codereview_new_python_data_12876
|
def __init__(
)
self._listeners: dict[CALLBACK_TYPE, tuple[CALLBACK_TYPE, object | None]] = {}
- job_name = f"DataUpdateCoordinator {name}"
if entry := self.config_entry:
job_name += f" {entry.title} {entry.domain} {entry.entry_id}"
self._job = HassJob(self._handle_refresh_interval, job_name)
Should we include `type(self).__name__` ? Data update coordinators are often also extended using inheritance.
def __init__(
)
self._listeners: dict[CALLBACK_TYPE, tuple[CALLBACK_TYPE, object | None]] = {}
+ job_name = "DataUpdateCoordinator"
+ type_name = type(self).__name__
+ if type_name != job_name:
+ job_name += f" {type_name}"
+ job_name += f" {name}"
if entry := self.config_entry:
job_name += f" {entry.title} {entry.domain} {entry.entry_id}"
self._job = HassJob(self._handle_refresh_interval, job_name)
|
codereview_new_python_data_12877
|
def __init__(
device: dict[str, Any],
) -> None:
"""Initialize the Livisi Climate."""
- super().__init__(config_entry, coordinator, device)
self._target_temperature_capability = self.capabilities["RoomSetpoint"]
self._temperature_capability = self.capabilities["RoomTemperature"]
self._humidity_capability = self.capabilities["RoomHumidity"]
- # For the livisi climate entities, the device should have the room name from
- # the livisi setup, as each livisi room gets exactly one VRCC device. The entity
- # name will always be some localized value of "Climate", so the full element name
- # in homeassistent will be in the form of "Bedroom Climate"
- self._attr_device_info["name"] = self._attr_device_info["suggested_area"]
-
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperature."""
response = await self.aio_livisi.async_vrcc_set_temperature(
This is confusing.
I think you should instead add an argument (eg. `*, use_room_as_device_name: bool = False`) to the entity `__init__` method and move the comment there.
Then here you can simply use:
`super().__init__(config_entry, coordinator, device, use_room_as_device_name=True)`
def __init__(
device: dict[str, Any],
) -> None:
"""Initialize the Livisi Climate."""
+ super().__init__(
+ config_entry, coordinator, device, use_room_as_device_name=True
+ )
self._target_temperature_capability = self.capabilities["RoomSetpoint"]
self._temperature_capability = self.capabilities["RoomTemperature"]
self._humidity_capability = self.capabilities["RoomHumidity"]
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperature."""
response = await self.aio_livisi.async_vrcc_set_temperature(
|
codereview_new_python_data_12878
|
def device_class(self) -> SensorDeviceClass | None:
def _numeric_state_expected(self) -> bool:
"""Return true if the sensor must be numeric."""
device_class = try_parse_enum(SensorDeviceClass, self.device_class)
- if device_class in {*NON_NUMERIC_DEVICE_CLASSES}:
return False
if (
self.state_class is not None
You don't need to make a copy if you're not adding `None` as a valid option.
```suggestion
if device_class in NON_NUMERIC_DEVICE_CLASSES:
```
def device_class(self) -> SensorDeviceClass | None:
def _numeric_state_expected(self) -> bool:
"""Return true if the sensor must be numeric."""
device_class = try_parse_enum(SensorDeviceClass, self.device_class)
+ if device_class in NON_NUMERIC_DEVICE_CLASSES:
return False
if (
self.state_class is not None
|
codereview_new_python_data_12879
|
def device_class(self) -> SensorDeviceClass | None:
@property
def _numeric_state_expected(self) -> bool:
"""Return true if the sensor must be numeric."""
device_class = try_parse_enum(SensorDeviceClass, self.device_class)
if device_class in NON_NUMERIC_DEVICE_CLASSES:
return False
```suggestion
# Note: the order of the checks needs to be kept aligned
# with the checks in `state` property.
device_class = try_parse_enum(SensorDeviceClass, self.device_class)
```
def device_class(self) -> SensorDeviceClass | None:
@property
def _numeric_state_expected(self) -> bool:
"""Return true if the sensor must be numeric."""
+ # Note: the order of the checks needs to be kept aligned
+ # with the checks in `state` property.
device_class = try_parse_enum(SensorDeviceClass, self.device_class)
if device_class in NON_NUMERIC_DEVICE_CLASSES:
return False
|
codereview_new_python_data_12880
|
class AuroraSensor(AuroraEntity, SensorEntity):
"""Implementation of an aurora sensor."""
_attr_native_unit_of_measurement = PERCENTAGE
@property
def native_value(self):
"""Return % chance the aurora is visible."""
return self.coordinator.data
-
- @property
- def state_class(self):
- """Return state class for measurement."""
- return SensorStateClass.MEASUREMENT
This should be a class attribute, like the unit of measurement.
class AuroraSensor(AuroraEntity, SensorEntity):
"""Implementation of an aurora sensor."""
_attr_native_unit_of_measurement = PERCENTAGE
+ _attr_state_class = SensorStateClass.MEASUREMENT
@property
def native_value(self):
"""Return % chance the aurora is visible."""
return self.coordinator.data
|
codereview_new_python_data_12881
|
LOGGER = logging.getLogger(__package__)
-PLATFORMS: Final = [Platform.SENSOR, Platform.BUTTON]
We usually try to keep these in alphabetical order.
```suggestion
PLATFORMS: Final = [Platform.BUTTON, Platform.SENSOR]
```
LOGGER = logging.getLogger(__package__)
+PLATFORMS: Final = [Platform.BUTTON, Platform.SENSOR]
|
codereview_new_python_data_12882
|
async def async_set_temperature(self, **kwargs: Any) -> None:
OverkizCommandParam.FURTHER_NOTICE,
)
- async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
return
`hvac_mode` should be of type `HVACMode`
```suggestion
async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None:
```
It seems that pylint doesn't pick it up because we are inside `climate_entities` namespace, but I think it should still be implemented.
async def async_set_temperature(self, **kwargs: Any) -> None:
OverkizCommandParam.FURTHER_NOTICE,
)
+ async def async_set_hvac_mode(self, hvac_mode: HVACMode) -> None:
"""Set new target hvac mode."""
return
|
codereview_new_python_data_12883
|
def __init__(
reolink_data: ReolinkData,
coordinator: DataUpdateCoordinator[_T],
) -> None:
- """Initialize ReolinkBaseCoordinatorEntity for a NVR entity without a channel."""
super().__init__(coordinator)
self._host = reolink_data.host
```suggestion
"""Initialize ReolinkBaseCoordinatorEntity."""
```
def __init__(
reolink_data: ReolinkData,
coordinator: DataUpdateCoordinator[_T],
) -> None:
+ """Initialize ReolinkBaseCoordinatorEntity."""
super().__init__(coordinator)
self._host = reolink_data.host
|
codereview_new_python_data_12884
|
def async_create_entity(event: ItemEvent, obj_id: str) -> None:
@callback
def async_options_updated() -> None:
- """Load new entities based on changed options.."""
async_add_unifi_entity(list(api_handler))
self.config_entry.async_on_unload(
```suggestion
"""Load new entities based on changed options."""
```
def async_create_entity(event: ItemEvent, obj_id: str) -> None:
@callback
def async_options_updated() -> None:
+ """Load new entities based on changed options."""
async_add_unifi_entity(list(api_handler))
self.config_entry.async_on_unload(
|
codereview_new_python_data_12885
|
def _async_init_flow(
# as ones in progress as it may cause additional device probing
# which can overload devices since zeroconf/ssdp updates can happen
# multiple times in the same minute
- if hass.config_entries.flow.async_has_matching_flow(domain, context, data):
- return None
-
- if hass.is_stopping:
return None
return hass.config_entries.flow.async_init(domain, context=context, data=data)
let's move this up, it's a tiny check and faster than has_matching_flow.
def _async_init_flow(
# as ones in progress as it may cause additional device probing
# which can overload devices since zeroconf/ssdp updates can happen
# multiple times in the same minute
+ if hass.is_stopping or hass.config_entries.flow.async_has_matching_flow(
+ domain, context, data
+ ):
return None
return hass.config_entries.flow.async_init(domain, context=context, data=data)
|
codereview_new_python_data_12886
|
def iter_schemas() -> Generator[MatterDiscoverySchema, None, None]:
"""Iterate over all available discovery schemas."""
for platform_schemas in DISCOVERY_SCHEMAS.values():
- for schema in platform_schemas:
- yield schema
@callback
```suggestion
yield from platform_schemas
```
def iter_schemas() -> Generator[MatterDiscoverySchema, None, None]:
"""Iterate over all available discovery schemas."""
for platform_schemas in DISCOVERY_SCHEMAS.values():
+ yield from platform_schemas
@callback
|
codereview_new_python_data_12887
|
async def setup_again(*_: Any) -> None:
await self._async_process_on_unload()
return
- except (asyncio.CancelledError, Exception): # pylint: disable=broad-except
_LOGGER.exception(
"Error setting up entry %s for %s", self.title, integration.domain
)
So, if the goal is to prevent integration to kill HA, there are more things to catch.
For example, an integration could now also raise an `SystemExit`, which would kill HA.
Shouldn't this just catch all? And thus have used `BaseException` instead of `Exception` in the first place?
```suggestion
except BaseException: # pylint: disable=broad-except
```
async def setup_again(*_: Any) -> None:
await self._async_process_on_unload()
return
+ # pylint: disable-next=broad-except
+ except (asyncio.CancelledError, SystemExit, Exception):
_LOGGER.exception(
"Error setting up entry %s for %s", self.title, integration.domain
)
|
codereview_new_python_data_12888
|
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
-from homeassistant.helpers.typing import ConfigType
-from .const import CONF_OBIHAI_HOST, PLATFORMS
-
-__all__ = [
- "CONF_OBIHAI_HOST",
-]
-
-
-def setup(hass: HomeAssistant, config: ConfigType) -> bool:
- """Set up the Obihai integration."""
- return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up from a config entry."""
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
- # Reload entry when its updated.
- entry.async_on_unload(entry.add_update_listener(async_reload_entry))
-
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
-
-
-async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
- """Reload the config entry when it changed."""
- await hass.config_entries.async_reload(entry.entry_id)
I don't think this is needed
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
+from .const import PLATFORMS
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up from a config entry."""
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
codereview_new_python_data_12889
|
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
-from homeassistant.helpers.typing import ConfigType
-from .const import CONF_OBIHAI_HOST, PLATFORMS
-
-__all__ = [
- "CONF_OBIHAI_HOST",
-]
-
-
-def setup(hass: HomeAssistant, config: ConfigType) -> bool:
- """Set up the Obihai integration."""
- return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up from a config entry."""
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
- # Reload entry when its updated.
- entry.async_on_unload(entry.add_update_listener(async_reload_entry))
-
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
-
-
-async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
- """Reload the config entry when it changed."""
- await hass.config_entries.async_reload(entry.entry_id)
No longer necessary
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
+from .const import PLATFORMS
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up from a config entry."""
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
|
codereview_new_python_data_12890
|
class ObihaiFlowHandler(ConfigFlow, domain=DOMAIN):
VERSION = 1
- def __init__(self) -> None:
- """Initialize."""
- self._host: str | None = None
-
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
`self._host` is never set
```suggestion
```
class ObihaiFlowHandler(ConfigFlow, domain=DOMAIN):
VERSION = 1
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
|
codereview_new_python_data_12891
|
LOGGER = logging.getLogger(__package__)
-CONF_OBIHAI_HOST: Final = "host"
PLATFORMS: Final = [Platform.SENSOR]
Why not use the global `CONF_HOST` constant?
LOGGER = logging.getLogger(__package__)
PLATFORMS: Final = [Platform.SENSOR]
|
codereview_new_python_data_12892
|
def icon(self):
return "mdi:restart-alert"
return "mdi:phone"
- def update(self) -> bool:
"""Update the sensor."""
if self._pyobihai.check_account():
services = self._pyobihai.get_state()
Why return a bool?
```suggestion
def update(self) -> None:
```
def icon(self):
return "mdi:restart-alert"
return "mdi:phone"
+ def update(self) -> None:
"""Update the sensor."""
if self._pyobihai.check_account():
services = self._pyobihai.get_state()
|
codereview_new_python_data_12893
|
def icon(self):
return "mdi:restart-alert"
return "mdi:phone"
- def update(self) -> bool:
"""Update the sensor."""
if self._pyobihai.check_account():
services = self._pyobihai.get_state()
You can return early here instead, and un-indent:
```suggestion
if not self._pyobihai.check_account():
self._state = None
return
```
def icon(self):
return "mdi:restart-alert"
return "mdi:phone"
+ def update(self) -> None:
"""Update the sensor."""
if self._pyobihai.check_account():
services = self._pyobihai.get_state()
|
codereview_new_python_data_12894
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
otbrdata = OTBRData(entry.data["url"], api)
try:
dataset = await otbrdata.get_active_dataset_tlvs()
- except (asyncio.TimeoutError, aiohttp.ClientError) as err:
raise ConfigEntryNotReady("Unable to connect") from err
if dataset:
await async_add_dataset(hass, entry.title, dataset.hex())
We should catch `OTBRError` too + update tests
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
otbrdata = OTBRData(entry.data["url"], api)
try:
dataset = await otbrdata.get_active_dataset_tlvs()
+ except (
+ HomeAssistantError,
+ aiohttp.ClientError,
+ asyncio.TimeoutError,
+ ) as err:
raise ConfigEntryNotReady("Unable to connect") from err
if dataset:
await async_add_dataset(hass, entry.title, dataset.hex())
|
codereview_new_python_data_12895
|
async def _node_id_from_ha_device_id(ha_device_id: str) -> int | None:
device = dev_reg.async_get(ha_device_id)
if device is None:
return None
- return await get_node_from_device_entry(hass, device)
async def open_commissioning_window(call: ServiceCall) -> None:
"""Open commissioning window on specific node."""
This returns the node, not the node id.
async def _node_id_from_ha_device_id(ha_device_id: str) -> int | None:
device = dev_reg.async_get(ha_device_id)
if device is None:
return None
+ if node := await get_node_from_device_entry(hass, device):
+ return node.node_id
+ return None
async def open_commissioning_window(call: ServiceCall) -> None:
"""Open commissioning window on specific node."""
|
codereview_new_python_data_12896
|
def __init__(self, entry_id: str, system: LiteJet, i: int, name: str) -> None:
identifiers={(DOMAIN, f"{entry_id}_keypad_{keypad_number}")},
name=f"Keypad #{keypad_number}",
manufacturer="Centralite",
)
async def async_added_to_hass(self) -> None:
```suggestion
manufacturer="Centralite",
via_device=(DOMAIN, f"{entry_id}_mcp")
```
def __init__(self, entry_id: str, system: LiteJet, i: int, name: str) -> None:
identifiers={(DOMAIN, f"{entry_id}_keypad_{keypad_number}")},
name=f"Keypad #{keypad_number}",
manufacturer="Centralite",
+ via_device=(DOMAIN, f"{entry_id}_mcp"),
)
async def async_added_to_hass(self) -> None:
|
codereview_new_python_data_12897
|
@callback
def exclude_attributes(hass: HomeAssistant) -> set[str]:
- """Exclude event_id from being recorded in the database."""
return {ATTR_EVENT_ID, ATTR_EVENT_SCORE}
```suggestion
"""Exclude event_id and event_score from being recorded in the database."""
```
@callback
def exclude_attributes(hass: HomeAssistant) -> set[str]:
+ """Exclude event_id and event_score from being recorded in the database."""
return {ATTR_EVENT_ID, ATTR_EVENT_SCORE}
|
codereview_new_python_data_12898
|
def exclude_attributes(hass: HomeAssistant) -> set[str]:
"Time to Empty",
"Time to Full Charge",
# Android mobile app
"Free internal storage",
"current",
"voltage",
Ideally we exclude `android.*` but we don't support wildcards here and I'm not 100% sure we want to do that so that might be something for a future pr to add `exclude_wildcard_attributes`
We may want to limit it to `startswith` for performance (or maybe even a pre compiled RE) .. that's not for this PR though
def exclude_attributes(hass: HomeAssistant) -> set[str]:
"Time to Empty",
"Time to Full Charge",
# Android mobile app
+ "in_vehicle",
+ "on_bicycle",
+ "on_foot",
+ "running",
+ "still",
+ "unknown",
+ "walking",
"Free internal storage",
"current",
"voltage",
|
codereview_new_python_data_12899
|
async def test_lru_increases_with_many_entities(
"""Test that the recorder's internal LRU cache increases with many entities."""
# We do not actually want to record 4096 entities, so we mock the recorder
# to not actually record anything.
- with patch(
- "homeassistant.components.recorder.Recorder._process_state_changed_event_into_session"
):
- for entity_idx in range(4096):
- hass.states.async_set(f"test.entity_{entity_idx}", "any")
- await async_wait_recording_done(hass)
-
- async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=10))
- await hass.async_block_till_done()
- current_entities_count = hass.states.async_entity_ids_count()
- assert recorder_mock._state_attributes_ids.get_size() == current_entities_count * 2
Why not patch `hass.states.async_entity_ids_count()` ?
async def test_lru_increases_with_many_entities(
"""Test that the recorder's internal LRU cache increases with many entities."""
# We do not actually want to record 4096 entities, so we mock the recorder
# to not actually record anything.
+ mock_entity_count = 4096
+ with patch.object(
+ hass.states, "async_entity_ids_count", return_value=mock_entity_count
):
+ async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=10))
+ await hass.async_block_till_done()
+ assert recorder_mock._state_attributes_ids.get_size() == mock_entity_count * 2
|
codereview_new_python_data_12900
|
CONF_IS_APPARENT_POWER = "is_apparent_power"
CONF_IS_AQI = "is_aqi"
-CONF_IS_ATMOSPHERIC_PRESSURE = "atmospheric_pressure"
CONF_IS_BATTERY_LEVEL = "is_battery_level"
CONF_IS_CO = "is_carbon_monoxide"
CONF_IS_CO2 = "is_carbon_dioxide"
The value for "atmospheric_pressure" is incorrect.
It is being adjusted in #88320
CONF_IS_APPARENT_POWER = "is_apparent_power"
CONF_IS_AQI = "is_aqi"
+CONF_IS_ATMOSPHERIC_PRESSURE = "is_atmospheric_pressure"
CONF_IS_BATTERY_LEVEL = "is_battery_level"
CONF_IS_CO = "is_carbon_monoxide"
CONF_IS_CO2 = "is_carbon_dioxide"
|
codereview_new_python_data_12901
|
JSON_ENCODE_EXCEPTIONS = (TypeError, ValueError)
JSON_DECODE_EXCEPTIONS = (orjson.JSONDecodeError,)
json_loads: Callable[[bytes | bytearray | memoryview | str], JsonValueType]
json_loads = orjson.loads
"""Parse JSON data."""
Maybe we should keep `WriteError` declared inside util to avoid breaking change
JSON_ENCODE_EXCEPTIONS = (TypeError, ValueError)
JSON_DECODE_EXCEPTIONS = (orjson.JSONDecodeError,)
+
+class SerializationError(HomeAssistantError):
+ """Error serializing the data to JSON."""
+
+
+class WriteError(HomeAssistantError):
+ """Error writing the data."""
+
+
json_loads: Callable[[bytes | bytearray | memoryview | str], JsonValueType]
json_loads = orjson.loads
"""Parse JSON data."""
|
codereview_new_python_data_12902
|
async def test_washer_sensor_values(
assert entry
assert entry.disabled
assert entry.disabled_by is entity_registry.RegistryEntryDisabler.INTEGRATION
- update_entry = registry.async_update_entity(
- entry.entity_id, **{"disabled_by": None}
- )
await hass.async_block_till_done()
assert update_entry != entry
assert update_entry.disabled is False
state = hass.states.get(state_id)
assert state is None
# Test the washer cycle states
mock_instance.get_machine_state.return_value = MachineState.RunningMainCycle
mock_instance.get_cycle_status_filling.return_value = True
Reload the config entry so we can test the state with the entity enabled below.
async def test_washer_sensor_values(
assert entry
assert entry.disabled
assert entry.disabled_by is entity_registry.RegistryEntryDisabler.INTEGRATION
+
+ update_entry = registry.async_update_entity(entry.entity_id, disabled_by=None)
await hass.async_block_till_done()
+
assert update_entry != entry
assert update_entry.disabled is False
state = hass.states.get(state_id)
assert state is None
+ await hass.config_entries.async_reload(entry.config_entry_id)
+ state = hass.states.get(state_id)
+ assert state is not None
+ assert state.state == "50"
+
# Test the washer cycle states
mock_instance.get_machine_state.return_value = MachineState.RunningMainCycle
mock_instance.get_cycle_status_filling.return_value = True
|
codereview_new_python_data_12903
|
# stubbing out for integrations that have
# not yet been updated for python 3.11
# but can still run on python 3.10
from asyncio import base_futures, constants, format_helpers
from asyncio.coroutines import _is_coroutine
import collections.abc
```suggestion
# but can still run on python 3.10
#
# Remove this once rflink, fido, and blackbird
# have had their libraries updated to remove
# asyncio.coroutine
```
# stubbing out for integrations that have
# not yet been updated for python 3.11
# but can still run on python 3.10
+#
+# Remove this once rflink, fido, and blackbird
+# have had their libraries updated to remove
+# asyncio.coroutine
from asyncio import base_futures, constants, format_helpers
from asyncio.coroutines import _is_coroutine
import collections.abc
|
codereview_new_python_data_12904
|
async def async_setup_entry(
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
- """Set up the lock platform for Dormakaba dKey."""
data: DormakabaDkeyData = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
DormakabaDkeyBinarySensor(data.coordinator, data.lock, description)
```suggestion
"""Set up the binary sensor platform for Dormakaba dKey."""
```
async def async_setup_entry(
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
+ """Set up the binary sensor platform for Dormakaba dKey."""
data: DormakabaDkeyData = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
DormakabaDkeyBinarySensor(data.coordinator, data.lock, description)
|
codereview_new_python_data_12905
|
def mock_storage(
orig_load = storage.Store._async_load
- async def mock_async_load(store: storage.Store) -> None:
"""Mock version of load."""
if store._data is None:
# No data to load
This will return the storage generic type of the stored data.
https://github.com/home-assistant/core/blob/9404a107b2c052660ce40bced894ec515b238938/homeassistant/helpers/storage.py#L119
def mock_storage(
orig_load = storage.Store._async_load
+ async def mock_async_load(
+ store: storage.Store,
+ ) -> dict[str, Any] | list[Any] | None:
"""Mock version of load."""
if store._data is None:
# No data to load
|
codereview_new_python_data_12906
|
class ClassTypeHintMatch:
"mock_device_tracker_conf": "list[Device]",
"mock_get_source_ip": "None",
"mock_hass_config": "None",
- "mock_zeroconf": "None",
"mock_hass_config_yaml": "None",
"mqtt_client_mock": "MqttMockPahoClient",
"mqtt_mock": "MqttMockHAClient",
"mqtt_mock_entry_no_yaml_config": "MqttMockHAClientGenerator",
```suggestion
"mock_hass_config": "None",
"mock_hass_config_yaml": "None",
"mock_zeroconf": "None",
```
class ClassTypeHintMatch:
"mock_device_tracker_conf": "list[Device]",
"mock_get_source_ip": "None",
"mock_hass_config": "None",
"mock_hass_config_yaml": "None",
+ "mock_zeroconf": "None",
"mqtt_client_mock": "MqttMockPahoClient",
"mqtt_mock": "MqttMockHAClient",
"mqtt_mock_entry_no_yaml_config": "MqttMockHAClientGenerator",
|
codereview_new_python_data_12907
|
async def test_wait_for_trigger_variables(hass: HomeAssistant) -> None:
actions = [
{
"alias": "variables",
- "variables": {"seconds": 0.01},
},
{
"alias": wait_alias,
"wait_for_trigger": {
- "platform": "state",
- "entity_id": "switch.test",
- "to": "off",
- "for": {"seconds": "{{ seconds }}"},
},
},
]
`async_wait_for_task_count` was really overly complicated, so it's good to see it go.
I think this change means the test is now stalling for 10ms, which is short enough that the test may be flaky?
The point of the test is to make sure trigger variables work. Can't we achieve that by templating something else in the `wait_for_action` than a friggin timeout?
async def test_wait_for_trigger_variables(hass: HomeAssistant) -> None:
actions = [
{
"alias": "variables",
+ "variables": {"state": "off"},
},
{
"alias": wait_alias,
"wait_for_trigger": {
+ "platform": "template",
+ "value_template": "{{ states.switch.test.state == state }}",
},
},
]
|
codereview_new_python_data_12908
|
class ReolinkSelectEntityDescription(
key="ptz_preset",
name="PTZ preset",
icon="mdi:pan",
- get_options=lambda api, ch: list(api.ptz_presets(ch).keys()),
supported=lambda api, ch: api.supported(ch, "ptz_presets"),
method=lambda api, ch, name: api.set_ptz_command(ch, preset=name),
),
Calling `.keys()` should not be needed.
```suggestion
get_options=lambda api, ch: list(api.ptz_presets(ch)),
```
class ReolinkSelectEntityDescription(
key="ptz_preset",
name="PTZ preset",
icon="mdi:pan",
+ get_options=lambda api, ch: list(api.ptz_presets(ch)),
supported=lambda api, ch: api.supported(ch, "ptz_presets"),
method=lambda api, ch, name: api.set_ptz_command(ch, preset=name),
),
|
codereview_new_python_data_12909
|
def _update_tilt(self, result):
@property
def is_closed(self) -> bool | None:
"""Return if the cover is closed."""
- if not self._position_template and not self._template and not self._optimistic:
return None
-
return self._position == 0
@property
Should this check instead be like this, since the templates may return an invalid response in which case the position is set to None:
```suggestion
if self._position is None:
return None
```
def _update_tilt(self, result):
@property
def is_closed(self) -> bool | None:
"""Return if the cover is closed."""
+ if self._position is None:
return None
return self._position == 0
@property
|
codereview_new_python_data_12910
|
async def async_turn_off(self, **kwargs: Any) -> None:
key="stowed",
name="Stowed",
device_class=SwitchDeviceClass.SWITCH,
- entity_registry_enabled_default=False,
value_fn=lambda data: data.status["state"] == "STOWED",
turn_on_fn=lambda coordinator: coordinator.async_stow_starlink(True),
turn_off_fn=lambda coordinator: coordinator.async_stow_starlink(False),
This one seems like its not going to update frequently?
When we thinking about frequently we are generally thinking about time in seconds.
The ones we really want disabled by default are things like RSSI, voltage, etc sensors that are updating every few seconds.
async def async_turn_off(self, **kwargs: Any) -> None:
key="stowed",
name="Stowed",
device_class=SwitchDeviceClass.SWITCH,
value_fn=lambda data: data.status["state"] == "STOWED",
turn_on_fn=lambda coordinator: coordinator.async_stow_starlink(True),
turn_off_fn=lambda coordinator: coordinator.async_stow_starlink(False),
|
codereview_new_python_data_12911
|
async def async_setup_entry(
class ReolinkUpdateEntity(ReolinkBaseCoordinatorEntity, UpdateEntity):
"""Update entity for a Netgear device."""
- _attr_has_entity_name = True
_attr_device_class = UpdateDeviceClass.FIRMWARE
_attr_supported_features = UpdateEntityFeature.INSTALL
You could move `_attr_has_entity_name` to base class I would think
async def async_setup_entry(
class ReolinkUpdateEntity(ReolinkBaseCoordinatorEntity, UpdateEntity):
"""Update entity for a Netgear device."""
_attr_device_class = UpdateDeviceClass.FIRMWARE
_attr_supported_features = UpdateEntityFeature.INSTALL
|
codereview_new_python_data_12912
|
async def async_handle(self, intent_obj: Intent) -> IntentResponse:
if not states:
raise IntentHandleError(
- "No entities matched for: " f"name={name}, " f"area={area}, ",
- f"domains={domains}, " f"device_classes={device_classes}",
)
response = await self.async_handle_states(intent_obj, states, area)
Weird formatting here.
async def async_handle(self, intent_obj: Intent) -> IntentResponse:
if not states:
raise IntentHandleError(
+ f"No entities matched for: name={name}, area={area}, domains={domains}, device_classes={device_classes}",
)
response = await self.async_handle_states(intent_obj, states, area)
|
codereview_new_python_data_12913
|
def _ha_orm_quote(mixed, ident):
made_url = sa.make_url(db_url)
db = made_url.database
engine = sa.create_engine(db_url)
- # Kill any open connections to the database before dropping it
# to ensure that InnoDB does not deadlock.
with engine.begin() as connection:
query = sa.text(
```suggestion
# Check for any open connections to the database before dropping it
```
def _ha_orm_quote(mixed, ident):
made_url = sa.make_url(db_url)
db = made_url.database
engine = sa.create_engine(db_url)
+ # Check for any open connections to the database before dropping it
# to ensure that InnoDB does not deadlock.
with engine.begin() as connection:
query = sa.text(
|
codereview_new_python_data_12914
|
def override_async_setup_entry() -> Generator[AsyncMock, None, None]:
@pytest.fixture
-def filled_device_registry(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dr.DeviceRegistry:
"""Fill device registry with mock devices."""
We're using the async api so we should make sure the fixture runs inside the event loop.
```suggestion
async def filled_device_registry(
```
def override_async_setup_entry() -> Generator[AsyncMock, None, None]:
@pytest.fixture
+async def filled_device_registry(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dr.DeviceRegistry:
"""Fill device registry with mock devices."""
|
codereview_new_python_data_12915
|
class ClassTypeHintMatch:
"caplog": "pytest.LogCaptureFixture",
"device_registry": "DeviceRegistry",
"entity_registry": "EntityRegistry",
- "issue_registry": "IssueRegistry",
"hass_client": "ClientSessionGenerator",
"hass_client_no_auth": "ClientSessionGenerator",
"hass_ws_client": "WebSocketGenerator",
"mqtt_client_mock": "MqttMockPahoClient",
"mqtt_mock": "MqttMockHAClient",
"mqtt_mock_entry_no_yaml_config": "MqttMockHAClientGenerator",
I think we should keep these in alphabetical order.
class ClassTypeHintMatch:
"caplog": "pytest.LogCaptureFixture",
"device_registry": "DeviceRegistry",
"entity_registry": "EntityRegistry",
"hass_client": "ClientSessionGenerator",
"hass_client_no_auth": "ClientSessionGenerator",
"hass_ws_client": "WebSocketGenerator",
+ "issue_registry": "IssueRegistry",
"mqtt_client_mock": "MqttMockPahoClient",
"mqtt_mock": "MqttMockHAClient",
"mqtt_mock_entry_no_yaml_config": "MqttMockHAClientGenerator",
|
codereview_new_python_data_12916
|
async def async_process_audio_stream(
@pytest.fixture
-def test_provider() -> None:
"""Test provider fixture."""
return TestProvider()
This fixture does not return `None`
async def async_process_audio_stream(
@pytest.fixture
+def test_provider():
"""Test provider fixture."""
return TestProvider()
|
codereview_new_python_data_12917
|
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
self._check_function(node, match, annotations)
# Check method matchers.
- if node.is_method():
- for match in _METHOD_MATCH["__any_class__"]:
- if not match.need_to_check_function(node):
- continue
- self._check_function(node, match, annotations)
visit_asyncfunctiondef = visit_functiondef
I'd extract `not is_method()` check from here.
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
self._check_function(node, match, annotations)
# Check method matchers.
+ for match in _METHOD_MATCH["__any_class__"]:
+ if not match.need_to_check_function(node) or not node.is_method():
+ continue
+ self._check_function(node, match, annotations)
visit_asyncfunctiondef = visit_functiondef
|
codereview_new_python_data_12918
|
async def async_setup_entry(
) -> None:
"""Set up the lock platform for Dormakaba dKey."""
data: DormakabaDkeyData = hass.data[DOMAIN][entry.entry_id]
- async_add_entities([DormakabaDkeyLock(data.coordinator, data.lock, entry.title)])
-class DormakabaDkeyLock(CoordinatorEntity, LockEntity):
"""Representation of Dormakaba dKey lock."""
_attr_has_entity_name = True
def __init__(
- self, coordinator: DataUpdateCoordinator, lock: DKEYLock, name: str
) -> None:
"""Initialize a Dormakaba dKey lock."""
super().__init__(coordinator)
```suggestion
self, coordinator: DataUpdateCoordinator[None], lock: DKEYLock, name: str
```
async def async_setup_entry(
) -> None:
"""Set up the lock platform for Dormakaba dKey."""
data: DormakabaDkeyData = hass.data[DOMAIN][entry.entry_id]
+ async_add_entities([DormakabaDkeyLock(data.coordinator, data.lock)])
+class DormakabaDkeyLock(CoordinatorEntity[DataUpdateCoordinator[None]], LockEntity):
"""Representation of Dormakaba dKey lock."""
_attr_has_entity_name = True
def __init__(
+ self, coordinator: DataUpdateCoordinator[None], lock: DKEYLock
) -> None:
"""Initialize a Dormakaba dKey lock."""
super().__init__(coordinator)
|
codereview_new_python_data_12919
|
async def async_setup_entry(
) -> None:
"""Set up the lock platform for Dormakaba dKey."""
data: DormakabaDkeyData = hass.data[DOMAIN][entry.entry_id]
- async_add_entities([DormakabaDkeyLock(data.coordinator, data.lock, entry.title)])
-class DormakabaDkeyLock(CoordinatorEntity, LockEntity):
"""Representation of Dormakaba dKey lock."""
_attr_has_entity_name = True
def __init__(
- self, coordinator: DataUpdateCoordinator, lock: DKEYLock, name: str
) -> None:
"""Initialize a Dormakaba dKey lock."""
super().__init__(coordinator)
The `name` parameter isn't used.
async def async_setup_entry(
) -> None:
"""Set up the lock platform for Dormakaba dKey."""
data: DormakabaDkeyData = hass.data[DOMAIN][entry.entry_id]
+ async_add_entities([DormakabaDkeyLock(data.coordinator, data.lock)])
+class DormakabaDkeyLock(CoordinatorEntity[DataUpdateCoordinator[None]], LockEntity):
"""Representation of Dormakaba dKey lock."""
_attr_has_entity_name = True
def __init__(
+ self, coordinator: DataUpdateCoordinator[None], lock: DKEYLock
) -> None:
"""Initialize a Dormakaba dKey lock."""
super().__init__(coordinator)
|
codereview_new_python_data_12920
|
class DormakabaDkeyData:
title: str
lock: DKEYLock
- coordinator: DataUpdateCoordinator
```suggestion
coordinator: DataUpdateCoordinator[None]
```
class DormakabaDkeyData:
title: str
lock: DKEYLock
+ coordinator: DataUpdateCoordinator[None]
|
codereview_new_python_data_12921
|
async def async_setup_entry(
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entry."""
- entities = list[ScreenLogicSensorEntity]()
coordinator = hass.data[DOMAIN][config_entry.entry_id]
equipment_flags = coordinator.gateway.get_data()[SL_DATA.KEY_CONFIG][
"equipment_flags"
This typing doesn't look right
async def async_setup_entry(
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entry."""
+ entities: list[ScreenLogicSensorEntity] = []
coordinator = hass.data[DOMAIN][config_entry.entry_id]
equipment_flags = coordinator.gateway.get_data()[SL_DATA.KEY_CONFIG][
"equipment_flags"
|
codereview_new_python_data_12922
|
async def async_setup_entry(
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entry."""
- entities = list[ScreenLogicBinarySensorEntity]()
coordinator = hass.data[DOMAIN][config_entry.entry_id]
# Generic binary sensor
This typing doesn't look right
async def async_setup_entry(
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entry."""
+ entities: list[ScreenLogicBinarySensorEntity] = []
coordinator = hass.data[DOMAIN][config_entry.entry_id]
# Generic binary sensor
|
codereview_new_python_data_12923
|
async def async_setup_entry(
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entry."""
- entities = list[ScreenLogicBinarySensorEntity]()
coordinator = hass.data[DOMAIN][config_entry.entry_id]
# Generic binary sensor
```suggestion
entities: list[ScreenLogicBinarySensorEntity] = []
```
async def async_setup_entry(
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up entry."""
+ entities: list[ScreenLogicBinarySensorEntity] = []
coordinator = hass.data[DOMAIN][config_entry.entry_id]
# Generic binary sensor
|
codereview_new_python_data_12924
|
# C=0: General purpose objects
# D=0: Free ID-numbers for utilities
# E=0 Ownership ID
- SensorEntityDescription(key="1-0:0.0.0*255", name="Ownership ID", icon="mdi:flash"),
# E=9: Electrity ID
SensorEntityDescription(
key="1-0:0.0.9*255", name="Electricity ID", icon="mdi:flash"
Is this value interesting for most users or should we disable it by default?
https://developers.home-assistant.io/docs/entity_registry_disabled_by#integrations-setting-default-value-of-disabled_by-for-new-entity-registry-entries
# C=0: General purpose objects
# D=0: Free ID-numbers for utilities
# E=0 Ownership ID
+ SensorEntityDescription(
+ key="1-0:0.0.0*255",
+ name="Ownership ID",
+ icon="mdi:flash",
+ entity_registry_enabled_default=False,
+ ),
# E=9: Electrity ID
SensorEntityDescription(
key="1-0:0.0.9*255", name="Electricity ID", icon="mdi:flash"
|
codereview_new_python_data_12925
|
def __init__(
) -> None:
"""Initialize the Yale Lock Device."""
super().__init__(coordinator, data)
- self._attr_code_format = "^\\d{" + str(code_format) + "}$"
self.lock_name: str = data["name"]
async def async_unlock(self, **kwargs: Any) -> None:
Please use f-strings...
```suggestion
self._attr_code_format = f"^\\d{{{code_format}}}$"
```
def __init__(
) -> None:
"""Initialize the Yale Lock Device."""
super().__init__(coordinator, data)
+ self._attr_code_format = f"^\\d{{{code_format}}}$"
self.lock_name: str = data["name"]
async def async_unlock(self, **kwargs: Any) -> None:
|
codereview_new_python_data_12926
|
async def async_step_hassio(self, discovery_info: HassioServiceInfo) -> FlowResu
api = python_otbr_api.OTBR(url, async_get_clientsession(self.hass), 10)
try:
if await api.get_active_dataset_tlvs() is None:
- await api.async_create_active_dataset(
python_otbr_api.OperationalDataSet(network_name="home-assistant")
)
- await api.async_set_enabled(True)
except python_otbr_api.OTBRError as exc:
_LOGGER.warning("Failed to communicate with OTBR@%s: %s", url, exc)
return self.async_abort(reason="unknown")
We should first ask the Thread integration if it has a preferred one. Do you want to do that in a follow-up PR?
async def async_step_hassio(self, discovery_info: HassioServiceInfo) -> FlowResu
api = python_otbr_api.OTBR(url, async_get_clientsession(self.hass), 10)
try:
if await api.get_active_dataset_tlvs() is None:
+ await api.create_active_dataset(
python_otbr_api.OperationalDataSet(network_name="home-assistant")
)
+ await api.set_enabled(True)
except python_otbr_api.OTBRError as exc:
_LOGGER.warning("Failed to communicate with OTBR@%s: %s", url, exc)
return self.async_abort(reason="unknown")
|
codereview_new_python_data_12927
|
class NAMSensorEntityDescription(SensorEntityDescription, NAMSensorRequiredKeysM
device_class=SensorDeviceClass.TIMESTAMP,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
- value=lambda sensors: utcnow()
- - timedelta(seconds=getattr(sensors, "uptime", 0)),
),
)
Multi-line lambdas should be avoided. Does this work?
```suggestion
value=lambda sensors: utcnow() - timedelta(seconds=sensors.uptime or 0),
```
class NAMSensorEntityDescription(SensorEntityDescription, NAMSensorRequiredKeysM
device_class=SensorDeviceClass.TIMESTAMP,
entity_registry_enabled_default=False,
entity_category=EntityCategory.DIAGNOSTIC,
+ value=lambda sensors: utcnow() - timedelta(seconds=sensors.uptime or 0),
),
)
|
codereview_new_python_data_12928
|
def is_numeric(self) -> bool:
"""Return true if the sensor is numeric."""
if (
self.state_class is not None
- or self.unit_of_measurement is not None
or self.native_precision is not None
):
return True
I think this should use the native unit.
```suggestion
or self.native_unit_of_measurement is not None
```
def is_numeric(self) -> bool:
"""Return true if the sensor is numeric."""
if (
self.state_class is not None
+ or self.native_unit_of_measurement is not None
or self.native_precision is not None
):
return True
|
codereview_new_python_data_12929
|
async def async_added_to_hass(self) -> None:
def message_received(message):
"""Handle new MQTT messages."""
if message.payload == "":
- # On empty payload, reset the sensor value or ignore update
- if self._attr_native_value is not None:
- self._attr_native_value = None
- else:
- return
elif self.entity_description.state is not None:
# Perform optional additional parsing
self._attr_native_value = self.entity_description.state(message.payload)
This can be simplified to just this:
```suggestion
self._attr_native_value = None
```
The state can always be written, it won't cause changes in case the state is already the same (unless forced, which doesn't seem to be the case here).
../Frenck
async def async_added_to_hass(self) -> None:
def message_received(message):
"""Handle new MQTT messages."""
if message.payload == "":
+ self._attr_native_value = None
elif self.entity_description.state is not None:
# Perform optional additional parsing
self._attr_native_value = self.entity_description.state(message.payload)
|
codereview_new_python_data_12930
|
async def async_setup_platform(
[
GeniusSwitch(broker, z)
for z in broker.client.zone_objs
- if 'type' in z.data and z.data["type"] == GH_ON_OFF_ZONE
]
)
```suggestion
if z.data.get("type") == GH_ON_OFF_ZONE
```
async def async_setup_platform(
[
GeniusSwitch(broker, z)
for z in broker.client.zone_objs
+ if z.data.get("type") == GH_ON_OFF_ZONE
]
)
|
codereview_new_python_data_12931
|
def system_set(cloud_enabled):
async def test_switch_handles_requesterror(
hass, mock_config_entry_data, mock_config_entry
):
- """Test entity pytest.raises HomeAssistantError when RequestError was raised."""
api = get_mock_device(product_type="HWE-SKT", firmware_version="3.02")
api.state = AsyncMock(
```suggestion
"""Test entity raises HomeAssistantError when RequestError was raised."""
```
def system_set(cloud_enabled):
async def test_switch_handles_requesterror(
hass, mock_config_entry_data, mock_config_entry
):
+ """Test entity raises HomeAssistantError when RequestError was raised."""
api = get_mock_device(product_type="HWE-SKT", firmware_version="3.02")
api.state = AsyncMock(
|
codereview_new_python_data_12932
|
def _migrate_columns_to_timestamp(
)
)
elif engine.dialect.name == SupportedDialect.MYSQL:
- # With MySQL we do this in chunks to avoid locking the table for too long.
# We also need to do this in a loop since we can't be sure that we have
# updated all rows in the table.
result: CursorResult | None = None
```suggestion
# With MySQL we do this in chunks to avoid hitting the `innodb_buffer_pool_size` limit
```
def _migrate_columns_to_timestamp(
)
)
elif engine.dialect.name == SupportedDialect.MYSQL:
+ # With MySQL we do this in chunks to avoid hitting the `innodb_buffer_pool_size` limit
# We also need to do this in a loop since we can't be sure that we have
# updated all rows in the table.
result: CursorResult | None = None
|
codereview_new_python_data_12933
|
class AttributeEnumMapping(NamedTuple):
attr_name=ATTR_MODE,
name="Mode",
set_method="set_mode",
- set_method_error_message="Setting the mode of the fan",
icon="mdi:fan",
translation_key="mode",
options=["silent", "auto", "favorite"],
```suggestion
set_method_error_message="Setting the mode of the fan failed.",
```
class AttributeEnumMapping(NamedTuple):
attr_name=ATTR_MODE,
name="Mode",
set_method="set_mode",
+ set_method_error_message="Setting the mode of the fan failed.",
icon="mdi:fan",
translation_key="mode",
options=["silent", "auto", "favorite"],
|
codereview_new_python_data_12934
|
class AttributeEnumMapping(NamedTuple):
attr_name=ATTR_MODE,
name="Mode",
set_method="set_mode",
- set_method_error_message="Setting the mode of the fan",
icon="mdi:fan",
translation_key="mode",
options=["silent", "auto", "favorite"],
Should these also be uppercased? Or rather, stay lowercase and the translated strings to be uppercased?
class AttributeEnumMapping(NamedTuple):
attr_name=ATTR_MODE,
name="Mode",
set_method="set_mode",
+ set_method_error_message="Setting the mode of the fan failed.",
icon="mdi:fan",
translation_key="mode",
options=["silent", "auto", "favorite"],
|
codereview_new_python_data_12935
|
async def async_step_user(
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
- return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
```suggestion
return self.async_create_entry(title="OpenAI Conversation", data=user_input)
```
async def async_step_user(
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
+ return self.async_create_entry(title="OpenAI Conversation", data=user_input)
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
|
codereview_new_python_data_12936
|
You are a conversational AI for a smart home named {{ ha_name }}.
If a user wants to control a device, reject the request and suggest using the Home Assistant UI.
-The people living in this smart home are:
-
-{% for state in states.person -%}
-- {{ state.name }} is {{state.state}}
-{%- endfor %}
-
An overview of the areas and the devices in this smart home:
{% for area in areas %}
{{ area.name }}:
Won't this mean we send the names of our users to a cloud service?
You are a conversational AI for a smart home named {{ ha_name }}.
If a user wants to control a device, reject the request and suggest using the Home Assistant UI.
An overview of the areas and the devices in this smart home:
{% for area in areas %}
{{ area.name }}:
|
codereview_new_python_data_12937
|
def _handle_coordinator_update(self) -> None:
class SwitchBotBlindTiltEntity(SwitchbotEntity, CoverEntity, RestoreEntity):
"""Representation of a Switchbot."""
- _device: switchbot.SwitchbotCurtain
_attr_device_class = CoverDeviceClass.CURTAIN
_attr_supported_features = (
CoverEntityFeature.OPEN_TILT
```suggestion
_device: switchbot.SwitchbotBlindTilt
```
def _handle_coordinator_update(self) -> None:
class SwitchBotBlindTiltEntity(SwitchbotEntity, CoverEntity, RestoreEntity):
"""Representation of a Switchbot."""
+ _device: switchbot.SwitchbotBlindTilt
_attr_device_class = CoverDeviceClass.CURTAIN
_attr_supported_features = (
CoverEntityFeature.OPEN_TILT
|
codereview_new_python_data_12938
|
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
- """Set up the Thread integration."""
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
extra space in doc string
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
+ """Set up the Thread integration."""
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
|
codereview_new_python_data_12939
|
class HoneywellSensorEntityDescription(
SENSOR_TYPES: tuple[HoneywellSensorEntityDescription, ...] = (
HoneywellSensorEntityDescription(
key=TEMPERATURE_STATUS_KEY,
- name="Outdoor Temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
value_fn=lambda device: device.outdoor_temperature,
```suggestion
name="Outdoor temperature",
```
class HoneywellSensorEntityDescription(
SENSOR_TYPES: tuple[HoneywellSensorEntityDescription, ...] = (
HoneywellSensorEntityDescription(
key=TEMPERATURE_STATUS_KEY,
+ name="Outdoor temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
value_fn=lambda device: device.outdoor_temperature,
|
codereview_new_python_data_12940
|
async def test_scene_activate(hass, scene):
assert state.attributes["icon"] == scene.icon
assert state.attributes["color"] == scene.color
assert state.attributes["location_id"] == scene.location_id
- # pylint: disable-next=protected-access
assert scene.execute.call_count == 1 # type: ignore
Not sure this would actually raise `protected-access`. Haven't checked though.
async def test_scene_activate(hass, scene):
assert state.attributes["icon"] == scene.icon
assert state.attributes["color"] == scene.color
assert state.attributes["location_id"] == scene.location_id
assert scene.execute.call_count == 1 # type: ignore
|
codereview_new_python_data_12941
|
async def async_step_user(
await self.async_set_unique_id(DOMAIN)
return self.async_create_entry(
title="Thread",
- data={"url": url},
)
data_schema = vol.Schema({CONF_URL: str})
Already guarded by the schema, so we can directly use it.
```suggestion
data=user_input,
```
async def async_step_user(
await self.async_set_unique_id(DOMAIN)
return self.async_create_entry(
title="Thread",
+ data=user_input,
)
data_schema = vol.Schema({CONF_URL: str})
|
codereview_new_python_data_12942
|
def __init__(
self.entity_description = entity_description
self._unsubscribes: list[Callable] = []
# for fast lookups we create a mapping to the attribute paths
self._attributes_map: dict[type, str] = {}
server_info = cast(ServerInfo, self.matter_client.server_info)
- # The server info is set when the client connects to the server.
- assert server_info is not None
# create unique_id based on "Operational Instance Name" and endpoint/device type
self._attr_unique_id = (
f"{get_operational_instance_id(server_info, self._node_device.node())}-"
We shouldn't both need to cast and assert.
def __init__(
self.entity_description = entity_description
self._unsubscribes: list[Callable] = []
# for fast lookups we create a mapping to the attribute paths
+ # The server info is set when the client connects to the server.
self._attributes_map: dict[type, str] = {}
server_info = cast(ServerInfo, self.matter_client.server_info)
# create unique_id based on "Operational Instance Name" and endpoint/device type
self._attr_unique_id = (
f"{get_operational_instance_id(server_info, self._node_device.node())}-"
|
codereview_new_python_data_12943
|
def get_operational_instance_id(
node: MatterNode,
) -> str:
"""Return `Operational Instance Name` for given MatterNode."""
- fab_id_hex = f"{server_info.compressed_fabric_id:016X}"
node_id_hex = f"{node.node_id:016X}"
# operational instance id matches the mdns advertisement for the node
# this is the recommended ID to recognize a unique matter node (within a fabric)
- return f"{fab_id_hex}-{node_id_hex}"
def get_device_id(
```suggestion
return f"{fabric_id_hex}-{node_id_hex}"
```
def get_operational_instance_id(
node: MatterNode,
) -> str:
"""Return `Operational Instance Name` for given MatterNode."""
+ fabric_id_hex = f"{server_info.compressed_fabric_id:016X}"
node_id_hex = f"{node.node_id:016X}"
# operational instance id matches the mdns advertisement for the node
# this is the recommended ID to recognize a unique matter node (within a fabric)
+ return f"{fabric_id_hex}-{node_id_hex}"
def get_device_id(
|
codereview_new_python_data_12944
|
def __init__(
description: NumberEntityDescription,
device_info: DeviceInfo | None,
) -> None:
- """Initialize the ISY Backlight Select entity."""
super().__init__(node, control, unique_id, description, device_info)
self._attr_native_value = 0
```suggestion
"""Initialize the ISY Backlight number entity."""
```
def __init__(
description: NumberEntityDescription,
device_info: DeviceInfo | None,
) -> None:
+ """Initialize the ISY Backlight number entity."""
super().__init__(node, control, unique_id, description, device_info)
self._attr_native_value = 0
|
codereview_new_python_data_12945
|
async def _telnet_callback(self, zone, event, parameter):
if zone != self._receiver.zone:
return
- self.async_schedule_update_ha_state(False)
async def async_will_remove_from_hass(self) -> None:
"""Clean up the entity."""
```suggestion
self.async_write_ha_state()
```
Assuming the object already has the new state, we don't need to schedule an update and can just write the state.
async def _telnet_callback(self, zone, event, parameter):
if zone != self._receiver.zone:
return
+ self.async_write_ha_state()
async def async_will_remove_from_hass(self) -> None:
"""Clean up the entity."""
|
codereview_new_python_data_12946
|
def device_class(self) -> SensorDeviceClass | None:
if self.entity_description.device_class_fn:
# Note: using self.state could infloop here.
return self.entity_description.device_class_fn(self.native_value)
- return self.entity_description.device_class
@property
def last_reset(self) -> datetime | None:
```suggestion
return super().device_class
```
def device_class(self) -> SensorDeviceClass | None:
if self.entity_description.device_class_fn:
# Note: using self.state could infloop here.
return self.entity_description.device_class_fn(self.native_value)
+ return super().device_class
@property
def last_reset(self) -> datetime | None:
|
codereview_new_python_data_12947
|
async def test_integration_reload(
@pytest.mark.parametrize("do_config", [{}])
async def test_integration_reload_failed(hass, caplog, mock_modbus) -> None:
- """Test setup fails."""
caplog.set_level(logging.INFO)
caplog.clear()
Minor tweak - looks good otherwise.
```suggestion
"""Run test for integration connect failure on reload."""
```
async def test_integration_reload(
@pytest.mark.parametrize("do_config", [{}])
async def test_integration_reload_failed(hass, caplog, mock_modbus) -> None:
+ """Run test for integration connect failure on reload."""
caplog.set_level(logging.INFO)
caplog.clear()
|
codereview_new_python_data_13011
|
def _RenderConfig(vm,
# scale with the cluster.
num_reduce_tasks = reduces_per_node * num_workers
if _BLOCKSIZE_OVERRIDE.value:
- block_size = _BLOCKSIZE_OVERRIDE * 1024 * 1024
if vm.scratch_disks:
# TODO(pclay): support multiple scratch disks. A current suboptimal
```suggestion
block_size = _BLOCKSIZE_OVERRIDE.value * 1024 * 1024
```
def _RenderConfig(vm,
# scale with the cluster.
num_reduce_tasks = reduces_per_node * num_workers
if _BLOCKSIZE_OVERRIDE.value:
+ block_size = _BLOCKSIZE_OVERRIDE.value * 1024 * 1024
if vm.scratch_disks:
# TODO(pclay): support multiple scratch disks. A current suboptimal
|
codereview_new_python_data_13012
|
def _PullJobMetrics(self, force_refresh=False):
return
# Raise exception if job id not available
if self.job_id is None:
- raise ValueError('Unable to pull job metrics. Job ID not available')
cmd = util.GcloudCommand(self, 'dataflow', 'metrics',
'list', self.job_id)
Let's better raise this error in the SubmitJob method, where you parse the ID (to make it fail fast). Also I wouldn't use ValueError here, it implies that the error is raised because a bad argument was passed (which I don't think it's necessarily the case), a generic Exception would do fine here.
def _PullJobMetrics(self, force_refresh=False):
return
# Raise exception if job id not available
if self.job_id is None:
+ raise Exception('Unable to pull job metrics. Job ID not available')
cmd = util.GcloudCommand(self, 'dataflow', 'metrics',
'list', self.job_id)
|
codereview_new_python_data_13019
|
def _create_workspace(self, ws_2D=True, sample=True, xAx=True, yAxSpec=True, yAx
if not yAxMt and not yAxSpec:
ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE")
LoadInstrument(ws, True, InstrumentName="TOFTOF")
- ouput_ws = ConvertSpectrumAxis(InputWorkspace=ws, Target="theta", EMode="Direct")
- return ouput_ws
if not yAxSpec and yAxMt:
ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE")
LoadInstrument(ws, True, InstrumentName="TOFTOF")
```suggestion
output_ws = ConvertSpectrumAxis(InputWorkspace=ws, Target="theta", EMode="Direct")
return output_ws
```
My spelling mistake!
def _create_workspace(self, ws_2D=True, sample=True, xAx=True, yAxSpec=True, yAx
if not yAxMt and not yAxSpec:
ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE")
LoadInstrument(ws, True, InstrumentName="TOFTOF")
+ output_ws = ConvertSpectrumAxis(InputWorkspace=ws, Target="theta", EMode="Direct")
+ return output_ws
if not yAxSpec and yAxMt:
ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE")
LoadInstrument(ws, True, InstrumentName="TOFTOF")
|
codereview_new_python_data_13022
|
def waterfall_reverse_line_order(self):
for cap in errorbar_cap_lines:
ax.add_line(cap)
- if LooseVersion("3.7") > LooseVersion(matplotlib.__version__) >= LooseVersion("3.2"):
- for line_fill in fills:
- if line_fill not in ax.collections:
- ax.add_collection(line_fill)
- else:
- raise NotImplementedError(
- "ArtistList will become an immutable tuple in matplotlib 3.7 and thus, " "this code doesn't work anymore."
- )
ax.collections.reverse()
ax.update_waterfall(x, y)
I wonder if we can just remove the version checks here and replace with a comment. Presumably the code will break in 3.7 anyway so we don't need to raise our own exception.
def waterfall_reverse_line_order(self):
for cap in errorbar_cap_lines:
ax.add_line(cap)
+
+ """ArtistList will become an immutable tuple in matplotlib 3.7 which will prevent iterating through line_fill"""
+ for line_fill in fills:
+ if line_fill not in ax.collections:
+ ax.add_collection(line_fill)
+
ax.collections.reverse()
ax.update_waterfall(x, y)
|
codereview_new_python_data_13028
|
def plot(
legend = ax.get_legend()
if legend is not None:
legend.set_visible(show_legend)
- # Stop legend messing with the tight layout
legend.set_in_layout(False)
# Can't have a waterfall plot with only one line.
```suggestion
# Stop legend interfering with the tight layout
```
def plot(
legend = ax.get_legend()
if legend is not None:
legend.set_visible(show_legend)
+ # Stop legend interfering with the tight layout
legend.set_in_layout(False)
# Can't have a waterfall plot with only one line.
|
codereview_new_python_data_13031
|
from qtpy.QtCore import Slot
from qtpy.QtGui import QIcon
class DetachableTabWidget(QtWidgets.QTabWidget):
"""
I think we should keep this acknowledgement of the "borrowed" code
from qtpy.QtCore import Slot
from qtpy.QtGui import QIcon
+"""
+Original code by user Blackwood, Jan 2018.
+https://stackoverflow.com/questions/47267195/in-pyqt4-is-it-possible-to-detach-tabs-from-a-qtabwidget
+"""
+
class DetachableTabWidget(QtWidgets.QTabWidget):
"""
|
codereview_new_python_data_13037
|
def _cut_names(self, cut: str):
return xcut_name, ycut_name, help_msg
- @staticmethod
- def get_dim_indices(slice_point: List[float], qdims: List[int], transpose: bool) -> Tuple[int]:
- """Returns the indices of the selected x and y axes. It also returns the index of the first non x or y axis."""
- xdim, ydim = WorkspaceInfo.display_indices(slice_point, transpose)
- # Get the index of the first axis which is not x or y. Here it is called z.
- zdim = next(i for i, v in enumerate(slice_point) if v is not None and i in qdims)
- return xdim, ydim, zdim
-
- def get_hkl_from_full_point(self, full_point: List[float], qdims: List[int], xdim: int, ydim: int, zdim: int):
"""Gets the values of h, k and l from a full point which can include 3 or more dimensions."""
basis_transform = self.get_proj_matrix()
if basis_transform is None:
return 0.0, 0.0, 0.0
- # Get the values for h, k and l
- hkl_point = tuple(full_point[q_i] for q_i in qdims)
- h_i, k_i, l_i = np.argsort([xdim, ydim, zdim])
- return (
- basis_transform[:, h_i] * hkl_point[h_i] + basis_transform[:, k_i] * hkl_point[k_i] + basis_transform[:, l_i] * hkl_point[l_i]
- )
# private functions
Not sure this is working as expected - this assumes there is only one Q-dim with a slicepoint (see screenshot).
`xdim` and `ydim` may be non-Q dimensions as well - but I think that's handled OK...
def _cut_names(self, cut: str):
return xcut_name, ycut_name, help_msg
+ def get_hkl_from_full_point(self, full_point: List[float], qdims_i: List[int]):
"""Gets the values of h, k and l from a full point which can include 3 or more dimensions."""
basis_transform = self.get_proj_matrix()
if basis_transform is None:
return 0.0, 0.0, 0.0
+ # Get the values for the h, k and l projections
+ hkl_projection = tuple(full_point[q_i] for q_i in qdims_i)
+ return np.matmul(basis_transform, hkl_projection)
# private functions
|
codereview_new_python_data_13038
|
def get_hkl_from_full_point(self, full_point: List[float], qdims_i: List[int]):
if basis_transform is None:
return 0.0, 0.0, 0.0
- # Get the values for the h, k and l projections
- hkl_projection = tuple(full_point[q_i] for q_i in qdims_i)
- return np.matmul(basis_transform, hkl_projection)
# private functions
Really minor but I think `hkl_projection` is not a good name, perhaps `q_xyz`? Recall this is the Q vector in the frame of the sliceviewer axes.
def get_hkl_from_full_point(self, full_point: List[float], qdims_i: List[int]):
if basis_transform is None:
return 0.0, 0.0, 0.0
+ # Get the x, y, z values for the q dimensions
+ q_xyz = tuple(full_point[q_i] for q_i in qdims_i)
+ return np.matmul(basis_transform, q_xyz)
# private functions
|
codereview_new_python_data_13043
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
-# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
```suggestion
# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
```
# Mantid Repository : https://github.com/mantidproject/mantid
#
+# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
|
codereview_new_python_data_13044
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
-# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
```suggestion
# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
```
# Mantid Repository : https://github.com/mantidproject/mantid
#
+# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
|
codereview_new_python_data_13045
|
def _loadRun(self, run: str) -> MatrixWorkspace:
self.log().information('Loaded workspace ')
return ws, monitor_ws
- def _applyCalibration(self, ws, calibration_filepath):
alg = self.createChildAlgorithm('ReflectometryISISCalibration')
alg.setProperty("InputWorkspace", ws)
alg.setProperty("CalibrationFile", calibration_filepath)
Could we add some type annotations (like on Line 77) to this method?
def _loadRun(self, run: str) -> MatrixWorkspace:
self.log().information('Loaded workspace ')
return ws, monitor_ws
+ def _applyCalibration(self, ws: MatrixWorkspace, calibration_filepath: str) -> MatrixWorkspace:
alg = self.createChildAlgorithm('ReflectometryISISCalibration')
alg.setProperty("InputWorkspace", ws)
alg.setProperty("CalibrationFile", calibration_filepath)
|
codereview_new_python_data_13046
|
def test_algorithm_fails_for_invalid_block_names(self):
Instrument='INTER',
GetLiveValueAlgorithm='GetFakeLiveInstrumentValueInvalidNames')
- def test_algorithm_fails_if_theta_is_zero(self):
workspace = self._run_algorithm_with_zero_theta()
expected = ['CloneWorkspace', 'LoadInstrument', 'GetFakeLiveInstrumentValuesWithZeroTheta',
'GetFakeLiveInstrumentValuesWithZeroTheta', 'GetFakeLiveInstrumentValuesWithZeroTheta',
I think this should say something like `test_algorithm_passes_if_theta_is_zero` or similar.
def test_algorithm_fails_for_invalid_block_names(self):
Instrument='INTER',
GetLiveValueAlgorithm='GetFakeLiveInstrumentValueInvalidNames')
+ def test_reduction_works_if_theta_is_zero(self):
workspace = self._run_algorithm_with_zero_theta()
expected = ['CloneWorkspace', 'LoadInstrument', 'GetFakeLiveInstrumentValuesWithZeroTheta',
'GetFakeLiveInstrumentValuesWithZeroTheta', 'GetFakeLiveInstrumentValuesWithZeroTheta',
|
codereview_new_python_data_13047
|
def plot_real_lattice_vectors(plot_axes, real_lattice, colors):
def calculate_lattice_vectors(workspace):
ub_matrix = np.array(workspace.sample().getOrientedLattice().getUB())
- hkl = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
q_sample = np.matmul(ub_matrix, hkl)
goniometer = workspace.getRun().getGoniometer().getR()
reciprocal_lattice_vectors = np.matmul(goniometer, q_sample) # QLab
Perhaps worth a comment here that the lattice vectors are in columns. Not sure the numpy syntax helps because it suggests they're in rows. Later on you pull out individual columns when plotting the arrows
def plot_real_lattice_vectors(plot_axes, real_lattice, colors):
def calculate_lattice_vectors(workspace):
ub_matrix = np.array(workspace.sample().getOrientedLattice().getUB())
+ hkl = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) # h, k and l are column vectors in this matrix
q_sample = np.matmul(ub_matrix, hkl)
goniometer = workspace.getRun().getGoniometer().getR()
reciprocal_lattice_vectors = np.matmul(goniometer, q_sample) # QLab
|
codereview_new_python_data_13048
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
-# Copyright © 202 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
I'm getting pedantic. Small typo in the year here
# Mantid Repository : https://github.com/mantidproject/mantid
#
+# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
|
codereview_new_python_data_13051
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
-# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
Since this is a new file, the copyright could be 2022.
# Mantid Repository : https://github.com/mantidproject/mantid
#
+# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
|
codereview_new_python_data_13052
|
def test_hide_button_hides_window(self, mock_qappthread):
fig_manager = FigureManagerWorkbench(canvas, 1)
# This is only called when show() is called on the figure manager, so we have to manually call it here.
fig_manager.toolbar.set_buttons_visibility(fig)
fig_manager.toolbar.hide_plot()
self.assertTrue(fig_manager.window.isHidden())
```suggestion
fig_manager.window.show()
fig_manager.toolbar.hide_plot()
```
At the moment this test will always pass, even if figuremanager line 360 is removed, as the window seems to be hidden by default in the tests here. I think you need to show the window for this to actually check that hide on the toolbar takes effect.
def test_hide_button_hides_window(self, mock_qappthread):
fig_manager = FigureManagerWorkbench(canvas, 1)
# This is only called when show() is called on the figure manager, so we have to manually call it here.
fig_manager.toolbar.set_buttons_visibility(fig)
+ fig_manager.window.show()
fig_manager.toolbar.hide_plot()
self.assertTrue(fig_manager.window.isHidden())
|
codereview_new_python_data_13054
|
def keyPressEvent(self, event):
QTableView.keyPressEvent(self, event)
self._key_handler._row_selected()
- def set_concise(self, bl):
- self.is_concise = bl
-
def enable_sorting(self, sort_role: int):
"""
Turn on column sorting by clicking headers
Might be mistaken but this function and the attribute `is_concise` don't appear to be used?
def keyPressEvent(self, event):
QTableView.keyPressEvent(self, event)
self._key_handler._row_selected()
def enable_sorting(self, sort_role: int):
"""
Turn on column sorting by clicking headers
|
codereview_new_python_data_13055
|
def on_visibility_changed(self, visible):
def on_normalise_checked(self, checked):
"""
Transmit the normalisation checkbox state to the model.
"""
self._model.normalise(checked)
self._update_plot(True)
Docstring for `checked` is missing here.
def on_visibility_changed(self, visible):
def on_normalise_checked(self, checked):
"""
Transmit the normalisation checkbox state to the model.
+
+ Args:
+ checked (bool): True when the checkbox is checked
"""
self._model.normalise(checked)
self._update_plot(True)
|
codereview_new_python_data_13056
|
def block_end(file_obj: BufferedReader, *, msg: Sequence[str]) -> bool:
Checks for msg which terminates block.
:param file_obj: file object from which we read
- :param msg: list of messages which can end kpoint block.
:returns: True if end of block otherwise False
"""
for item in msg:
```suggestion
:param msg: list of messages that can end kpoint block.
```
def block_end(file_obj: BufferedReader, *, msg: Sequence[str]) -> bool:
Checks for msg which terminates block.
:param file_obj: file object from which we read
+ :param msg: list of messages that can end kpoint block.
:returns: True if end of block otherwise False
"""
for item in msg:
|
codereview_new_python_data_13057
|
def is_normalised(self) -> bool:
if not np.allclose(np.ones(displacements.shape[1]),
norm(norm(displacements, axis=0), axis=1)):
return False
- else:
- return True
def extract(self):
extracted = {"unit_cell": self.unit_cell,
this else does not match up with anything
def is_normalised(self) -> bool:
if not np.allclose(np.ones(displacements.shape[1]),
norm(norm(displacements, axis=0), axis=1)):
return False
+
+ return True
def extract(self):
extracted = {"unit_cell": self.unit_cell,
|
codereview_new_python_data_13058
|
def update_line_plot_limits(self):
"""
# ensure plot labels are in sync with main axes
self._axx.relim()
self._axy.relim()
def update_line_plot_labels(self):
"""
How were these causing a problem? It seems to behave fine on a nightly version for me.
def update_line_plot_limits(self):
"""
# ensure plot labels are in sync with main axes
self._axx.relim()
+ self._axx.autoscale(axis='y')
self._axy.relim()
+ self._axy.autoscale(axis='x')
def update_line_plot_labels(self):
"""
|
codereview_new_python_data_13059
|
def eiWavelengthUpdateEvent(self):
self.dimensionWidget.set_editMax4(upperBound)
def instrumentUpdateEvent(self):
- changeToElastic = True if self.masterDict['instrument'] in ['WAND\u00B2'] else False
- if changeToElastic:
- self.dimensionWidget.toggleDeltaE(False)
- self.instrumentElastic = True
- elif self.instrumentElastic:
- self.dimensionWidget.toggleDeltaE(True)
- self.instrumentElastic = False
self.eiWavelengthUpdateEvent()
@QtCore.Slot(dict)
This can be written as ` changeToElastic = self.masterDict['instrument'] in ['WAND\u00B2']`
def eiWavelengthUpdateEvent(self):
self.dimensionWidget.set_editMax4(upperBound)
def instrumentUpdateEvent(self):
+ changeToElastic = self.masterDict['instrument'] in ['WAND\u00B2']
+ if changeToElastic != self.instrumentElastic:
+ self.instrumentElastic = changeToElastic
+ self.dimensionWidget.toggleDeltaE(not changeToElastic)
self.eiWavelengthUpdateEvent()
@QtCore.Slot(dict)
|
codereview_new_python_data_13060
|
def eiWavelengthUpdateEvent(self):
self.dimensionWidget.set_editMax4(upperBound)
def instrumentUpdateEvent(self):
- changeToElastic = True if self.masterDict['instrument'] in ['WAND\u00B2'] else False
- if changeToElastic:
- self.dimensionWidget.toggleDeltaE(False)
- self.instrumentElastic = True
- elif self.instrumentElastic:
- self.dimensionWidget.toggleDeltaE(True)
- self.instrumentElastic = False
self.eiWavelengthUpdateEvent()
@QtCore.Slot(dict)
all this if else is unnecessary. Try something like:
```
self.instrumentElastic = changeToElastic
self.dimensionWidget.toggleDeltaE(not changeToElastic)
```
def eiWavelengthUpdateEvent(self):
self.dimensionWidget.set_editMax4(upperBound)
def instrumentUpdateEvent(self):
+ changeToElastic = self.masterDict['instrument'] in ['WAND\u00B2']
+ if changeToElastic != self.instrumentElastic:
+ self.instrumentElastic = changeToElastic
+ self.dimensionWidget.toggleDeltaE(not changeToElastic)
self.eiWavelengthUpdateEvent()
@QtCore.Slot(dict)
|
codereview_new_python_data_13061
|
def get_proj_matrix(self):
# for histo try to find axes from log
try:
expt_info = ws.getExperimentInfo(0)
- trans_matrix = np.array(expt_info.run().get(PROJ_MATRIX_LOG_NAME).value, dtype=float).reshape(3, 3)
- ndims = ws.getNumDims()
- i_qdims = [idim for idim in range(ndims) if ws.getDimension(idim).getMDFrame().isQ()]
- irow_end = min(3, len(i_qdims))
- for icol, idim in enumerate(i_qdims):
- # copy first irow_end components of basis vec are always Q
- proj_matrix[0:irow_end, icol] = trans_matrix[0:irow_end, icol]
except (AttributeError, KeyError, ValueError):
# revert back to orthogonal projection
proj_matrix = np.eye(3)
I'm not sure this check is required is `W_MATRIX` is only set on 3D workspace with 3 Q-dimensions (as in `ConvertWANDSCDToQ`). If it is necessary (or just to be safe) then I believe `ndim`, `i_qdims` and `irow_end` variables are all still valid as set above L489 (and don't need to be set again).
def get_proj_matrix(self):
# for histo try to find axes from log
try:
expt_info = ws.getExperimentInfo(0)
+ proj_matrix = np.array(expt_info.run().get(PROJ_MATRIX_LOG_NAME).value, dtype=float).reshape(3, 3)
except (AttributeError, KeyError, ValueError):
# revert back to orthogonal projection
proj_matrix = np.eye(3)
|
codereview_new_python_data_13062
|
def test_calculate_axes_angles_uses_W_if_available_MDEvent(self):
axes_angles = model.get_axes_angles(force_orthogonal=True)
self.assertAlmostEqual(axes_angles[1, 2], np.pi / 2, delta=1e-10)
- def test_calculate_axes_angles_uses_W_if_available_MDHisto(self):
#test MD histo
ws = _create_mock_workspace(IMDHistoWorkspace,
SpecialCoordinateSystem.HKL,
So in this test it takes the values from the basis vectors of the workspace is that correct (because it checks the basis vectors first and in this case they are valid)? If so this test would probably be better named something similar to
`test_calculate_axes_angles_uses_basis_vectors_even_if_WMatrix_log_available_MDHisto`
def test_calculate_axes_angles_uses_W_if_available_MDEvent(self):
axes_angles = model.get_axes_angles(force_orthogonal=True)
self.assertAlmostEqual(axes_angles[1, 2], np.pi / 2, delta=1e-10)
+ def test_calculate_axes_angles_uses_basis_vectors_even_if_WMatrix_log_available_MDHisto(self):
#test MD histo
ws = _create_mock_workspace(IMDHistoWorkspace,
SpecialCoordinateSystem.HKL,
|
codereview_new_python_data_13064
|
def get_default_scale_norm(self):
scale = self.conf.get(SCALENORM, type=str)
if scale == 'Power' and self.conf.has(POWERSCALE):
- exponent = self.conf.get(POWERSCALE)
scale = (scale, exponent)
scale = "SymmetricLog10" if scale == 'Log' else scale
I think this line needs `type=str` as well
def get_default_scale_norm(self):
scale = self.conf.get(SCALENORM, type=str)
if scale == 'Power' and self.conf.has(POWERSCALE):
+ exponent = self.conf.get(POWERSCALE, type=str)
scale = (scale, exponent)
scale = "SymmetricLog10" if scale == 'Log' else scale
|
codereview_new_python_data_13065
|
def generate_ts_pdf(run_number, focus_file_path, sample_details, placzek_order,
raw_ws = mantid.Load(Filename='POLARIS'+str(run_number))
sample_geometry_json = sample_details.generate_sample_geometry()
- # import pydevd_pycharm
- # pydevd_pycharm.settrace('localhost', port=8080, stdoutToServer=True, stderrToServer=True)
sample_material_json = sample_details.generate_sample_material()
self_scattering_correction = mantid.TotScatCalculateSelfScattering(
These 2 debug lines need removing
def generate_ts_pdf(run_number, focus_file_path, sample_details, placzek_order,
raw_ws = mantid.Load(Filename='POLARIS'+str(run_number))
sample_geometry_json = sample_details.generate_sample_geometry()
sample_material_json = sample_details.generate_sample_material()
self_scattering_correction = mantid.TotScatCalculateSelfScattering(
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.