Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
MonopriceOptionsFlowHandler.__init__ | (self, config_entry) | Initialize. | Initialize. | def __init__(self, config_entry):
"""Initialize."""
self.config_entry = config_entry | [
"def",
"__init__",
"(",
"self",
",",
"config_entry",
")",
":",
"self",
".",
"config_entry",
"=",
"config_entry"
] | [
113,
4
] | [
115,
40
] | python | en | ['en', 'en', 'it'] | False |
MonopriceOptionsFlowHandler.async_step_init | (self, user_input=None) | Manage the options. | Manage the options. | async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(
title="", data={CONF_SOURCES: _sources_from_config(user_input)}
)
previous_sources = self._previous_sources()
options = {
_key_for_source(idx + 1, source, previous_sources): str
for idx, source in enumerate(SOURCES)
}
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(options),
) | [
"async",
"def",
"async_step_init",
"(",
"self",
",",
"user_input",
"=",
"None",
")",
":",
"if",
"user_input",
"is",
"not",
"None",
":",
"return",
"self",
".",
"async_create_entry",
"(",
"title",
"=",
"\"\"",
",",
"data",
"=",
"{",
"CONF_SOURCES",
":",
"_sources_from_config",
"(",
"user_input",
")",
"}",
")",
"previous_sources",
"=",
"self",
".",
"_previous_sources",
"(",
")",
"options",
"=",
"{",
"_key_for_source",
"(",
"idx",
"+",
"1",
",",
"source",
",",
"previous_sources",
")",
":",
"str",
"for",
"idx",
",",
"source",
"in",
"enumerate",
"(",
"SOURCES",
")",
"}",
"return",
"self",
".",
"async_show_form",
"(",
"step_id",
"=",
"\"init\"",
",",
"data_schema",
"=",
"vol",
".",
"Schema",
"(",
"options",
")",
",",
")"
] | [
126,
4
] | [
143,
9
] | python | en | ['en', 'en', 'en'] | True |
async_setup_platform | (hass, config, async_add_entities, discovery_info=None) | Find and return LightWave lights. | Find and return LightWave lights. | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Find and return LightWave lights."""
if discovery_info is None:
return
entities = []
lwlink = hass.data[LIGHTWAVE_LINK]
for device_id, device_config in discovery_info.items():
name = device_config[CONF_NAME]
serial = device_config[CONF_SERIAL]
entities.append(LightwaveTrv(name, device_id, lwlink, serial))
async_add_entities(entities) | [
"async",
"def",
"async_setup_platform",
"(",
"hass",
",",
"config",
",",
"async_add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"if",
"discovery_info",
"is",
"None",
":",
"return",
"entities",
"=",
"[",
"]",
"lwlink",
"=",
"hass",
".",
"data",
"[",
"LIGHTWAVE_LINK",
"]",
"for",
"device_id",
",",
"device_config",
"in",
"discovery_info",
".",
"items",
"(",
")",
":",
"name",
"=",
"device_config",
"[",
"CONF_NAME",
"]",
"serial",
"=",
"device_config",
"[",
"CONF_SERIAL",
"]",
"entities",
".",
"append",
"(",
"LightwaveTrv",
"(",
"name",
",",
"device_id",
",",
"lwlink",
",",
"serial",
")",
")",
"async_add_entities",
"(",
"entities",
")"
] | [
15,
0
] | [
28,
32
] | python | en | ['en', 'da', 'en'] | True |
LightwaveTrv.__init__ | (self, name, device_id, lwlink, serial) | Initialize LightwaveTrv entity. | Initialize LightwaveTrv entity. | def __init__(self, name, device_id, lwlink, serial):
"""Initialize LightwaveTrv entity."""
self._name = name
self._device_id = device_id
self._state = None
self._current_temperature = None
self._target_temperature = None
self._hvac_action = None
self._lwlink = lwlink
self._serial = serial
# inhibit is used to prevent race condition on update. If non zero, skip next update cycle.
self._inhibit = 0 | [
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"device_id",
",",
"lwlink",
",",
"serial",
")",
":",
"self",
".",
"_name",
"=",
"name",
"self",
".",
"_device_id",
"=",
"device_id",
"self",
".",
"_state",
"=",
"None",
"self",
".",
"_current_temperature",
"=",
"None",
"self",
".",
"_target_temperature",
"=",
"None",
"self",
".",
"_hvac_action",
"=",
"None",
"self",
".",
"_lwlink",
"=",
"lwlink",
"self",
".",
"_serial",
"=",
"serial",
"# inhibit is used to prevent race condition on update. If non zero, skip next update cycle.",
"self",
".",
"_inhibit",
"=",
"0"
] | [
34,
4
] | [
45,
25
] | python | da | ['en', 'da', 'it'] | False |
LightwaveTrv.supported_features | (self) | Flag supported features. | Flag supported features. | def supported_features(self):
"""Flag supported features."""
return SUPPORT_TARGET_TEMPERATURE | [
"def",
"supported_features",
"(",
"self",
")",
":",
"return",
"SUPPORT_TARGET_TEMPERATURE"
] | [
48,
4
] | [
50,
41
] | python | en | ['da', 'en', 'en'] | True |
LightwaveTrv.update | (self) | Communicate with a Lightwave RTF Proxy to get state. | Communicate with a Lightwave RTF Proxy to get state. | def update(self):
"""Communicate with a Lightwave RTF Proxy to get state."""
(temp, targ, _, trv_output) = self._lwlink.read_trv_status(self._serial)
if temp is not None:
self._current_temperature = temp
if targ is not None:
if self._inhibit == 0:
self._target_temperature = targ
if targ == 0:
# TRV off
self._target_temperature = None
if targ >= 40:
# Call for heat mode, or TRV in a fixed position
self._target_temperature = None
else:
# Done the job - use proxy next iteration
self._inhibit = 0
if trv_output is not None:
if trv_output > 0:
self._hvac_action = CURRENT_HVAC_HEAT
else:
self._hvac_action = CURRENT_HVAC_OFF | [
"def",
"update",
"(",
"self",
")",
":",
"(",
"temp",
",",
"targ",
",",
"_",
",",
"trv_output",
")",
"=",
"self",
".",
"_lwlink",
".",
"read_trv_status",
"(",
"self",
".",
"_serial",
")",
"if",
"temp",
"is",
"not",
"None",
":",
"self",
".",
"_current_temperature",
"=",
"temp",
"if",
"targ",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_inhibit",
"==",
"0",
":",
"self",
".",
"_target_temperature",
"=",
"targ",
"if",
"targ",
"==",
"0",
":",
"# TRV off",
"self",
".",
"_target_temperature",
"=",
"None",
"if",
"targ",
">=",
"40",
":",
"# Call for heat mode, or TRV in a fixed position",
"self",
".",
"_target_temperature",
"=",
"None",
"else",
":",
"# Done the job - use proxy next iteration",
"self",
".",
"_inhibit",
"=",
"0",
"if",
"trv_output",
"is",
"not",
"None",
":",
"if",
"trv_output",
">",
"0",
":",
"self",
".",
"_hvac_action",
"=",
"CURRENT_HVAC_HEAT",
"else",
":",
"self",
".",
"_hvac_action",
"=",
"CURRENT_HVAC_OFF"
] | [
52,
4
] | [
73,
52
] | python | en | ['en', 'en', 'en'] | True |
LightwaveTrv.name | (self) | Lightwave trv name. | Lightwave trv name. | def name(self):
"""Lightwave trv name."""
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_name"
] | [
76,
4
] | [
78,
25
] | python | en | ['en', 'da', 'en'] | True |
LightwaveTrv.current_temperature | (self) | Property giving the current room temperature. | Property giving the current room temperature. | def current_temperature(self):
"""Property giving the current room temperature."""
return self._current_temperature | [
"def",
"current_temperature",
"(",
"self",
")",
":",
"return",
"self",
".",
"_current_temperature"
] | [
81,
4
] | [
83,
40
] | python | en | ['en', 'en', 'en'] | True |
LightwaveTrv.target_temperature | (self) | Target room temperature. | Target room temperature. | def target_temperature(self):
"""Target room temperature."""
if self._inhibit > 0:
# If we get an update before the new temp has
# propagated, the target temp is set back to the
# old target on the next poll, showing a false
# reading temporarily.
self._target_temperature = self._inhibit
return self._target_temperature | [
"def",
"target_temperature",
"(",
"self",
")",
":",
"if",
"self",
".",
"_inhibit",
">",
"0",
":",
"# If we get an update before the new temp has",
"# propagated, the target temp is set back to the",
"# old target on the next poll, showing a false",
"# reading temporarily.",
"self",
".",
"_target_temperature",
"=",
"self",
".",
"_inhibit",
"return",
"self",
".",
"_target_temperature"
] | [
86,
4
] | [
94,
39
] | python | en | ['en', 'la', 'en'] | True |
LightwaveTrv.hvac_modes | (self) | HVAC modes. | HVAC modes. | def hvac_modes(self):
"""HVAC modes."""
return [HVAC_MODE_HEAT, HVAC_MODE_OFF] | [
"def",
"hvac_modes",
"(",
"self",
")",
":",
"return",
"[",
"HVAC_MODE_HEAT",
",",
"HVAC_MODE_OFF",
"]"
] | [
97,
4
] | [
99,
46
] | python | en | ['en', 'hi-Latn', 'en'] | False |
LightwaveTrv.hvac_action | (self) | HVAC action. | HVAC action. | def hvac_action(self):
"""HVAC action."""
return self._hvac_action | [
"def",
"hvac_action",
"(",
"self",
")",
":",
"return",
"self",
".",
"_hvac_action"
] | [
107,
4
] | [
109,
32
] | python | en | ['en', 'ja', 'en'] | False |
LightwaveTrv.temperature_unit | (self) | Set temperature unit. | Set temperature unit. | def temperature_unit(self):
"""Set temperature unit."""
return TEMP_CELSIUS | [
"def",
"temperature_unit",
"(",
"self",
")",
":",
"return",
"TEMP_CELSIUS"
] | [
122,
4
] | [
124,
27
] | python | en | ['es', 'la', 'en'] | False |
LightwaveTrv.target_temperature_step | (self) | Set temperature step. | Set temperature step. | def target_temperature_step(self):
"""Set temperature step."""
return 0.5 | [
"def",
"target_temperature_step",
"(",
"self",
")",
":",
"return",
"0.5"
] | [
127,
4
] | [
129,
18
] | python | en | ['en', 'la', 'en'] | True |
LightwaveTrv.set_temperature | (self, **kwargs) | Set TRV target temperature. | Set TRV target temperature. | def set_temperature(self, **kwargs):
"""Set TRV target temperature."""
if ATTR_TEMPERATURE in kwargs:
self._target_temperature = kwargs[ATTR_TEMPERATURE]
self._inhibit = self._target_temperature
self._lwlink.set_temperature(
self._device_id, self._target_temperature, self._name
) | [
"def",
"set_temperature",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"ATTR_TEMPERATURE",
"in",
"kwargs",
":",
"self",
".",
"_target_temperature",
"=",
"kwargs",
"[",
"ATTR_TEMPERATURE",
"]",
"self",
".",
"_inhibit",
"=",
"self",
".",
"_target_temperature",
"self",
".",
"_lwlink",
".",
"set_temperature",
"(",
"self",
".",
"_device_id",
",",
"self",
".",
"_target_temperature",
",",
"self",
".",
"_name",
")"
] | [
131,
4
] | [
138,
9
] | python | en | ['en', 'la', 'en'] | True |
LightwaveTrv.async_set_hvac_mode | (self, hvac_mode) | Set HVAC Mode for TRV. | Set HVAC Mode for TRV. | async def async_set_hvac_mode(self, hvac_mode):
"""Set HVAC Mode for TRV.""" | [
"async",
"def",
"async_set_hvac_mode",
"(",
"self",
",",
"hvac_mode",
")",
":"
] | [
140,
4
] | [
141,
36
] | python | da | ['da', 'pt', 'en'] | False |
async_get_registry | (hass: HomeAssistantType) | Return zha device storage instance. | Return zha device storage instance. | async def async_get_registry(hass: HomeAssistantType) -> ZhaStorage:
"""Return zha device storage instance."""
task = hass.data.get(DATA_REGISTRY)
if task is None:
async def _load_reg() -> ZhaStorage:
registry = ZhaStorage(hass)
await registry.async_load()
return registry
task = hass.data[DATA_REGISTRY] = hass.async_create_task(_load_reg())
return cast(ZhaStorage, await task) | [
"async",
"def",
"async_get_registry",
"(",
"hass",
":",
"HomeAssistantType",
")",
"->",
"ZhaStorage",
":",
"task",
"=",
"hass",
".",
"data",
".",
"get",
"(",
"DATA_REGISTRY",
")",
"if",
"task",
"is",
"None",
":",
"async",
"def",
"_load_reg",
"(",
")",
"->",
"ZhaStorage",
":",
"registry",
"=",
"ZhaStorage",
"(",
"hass",
")",
"await",
"registry",
".",
"async_load",
"(",
")",
"return",
"registry",
"task",
"=",
"hass",
".",
"data",
"[",
"DATA_REGISTRY",
"]",
"=",
"hass",
".",
"async_create_task",
"(",
"_load_reg",
"(",
")",
")",
"return",
"cast",
"(",
"ZhaStorage",
",",
"await",
"task",
")"
] | [
130,
0
] | [
143,
39
] | python | br | ['br', 'pl', 'en'] | False |
ZhaStorage.__init__ | (self, hass: HomeAssistantType) | Initialize the zha device storage. | Initialize the zha device storage. | def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize the zha device storage."""
self.hass: HomeAssistantType = hass
self.devices: MutableMapping[str, ZhaDeviceEntry] = {}
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY) | [
"def",
"__init__",
"(",
"self",
",",
"hass",
":",
"HomeAssistantType",
")",
"->",
"None",
":",
"self",
".",
"hass",
":",
"HomeAssistantType",
"=",
"hass",
"self",
".",
"devices",
":",
"MutableMapping",
"[",
"str",
",",
"ZhaDeviceEntry",
"]",
"=",
"{",
"}",
"self",
".",
"_store",
"=",
"hass",
".",
"helpers",
".",
"storage",
".",
"Store",
"(",
"STORAGE_VERSION",
",",
"STORAGE_KEY",
")"
] | [
35,
4
] | [
39,
78
] | python | br | ['br', 'pl', 'en'] | False |
ZhaStorage.async_create_device | (self, device: ZhaDeviceType) | Create a new ZhaDeviceEntry. | Create a new ZhaDeviceEntry. | def async_create_device(self, device: ZhaDeviceType) -> ZhaDeviceEntry:
"""Create a new ZhaDeviceEntry."""
device_entry: ZhaDeviceEntry = ZhaDeviceEntry(
name=device.name, ieee=str(device.ieee), last_seen=device.last_seen
)
self.devices[device_entry.ieee] = device_entry
self.async_schedule_save()
return device_entry | [
"def",
"async_create_device",
"(",
"self",
",",
"device",
":",
"ZhaDeviceType",
")",
"->",
"ZhaDeviceEntry",
":",
"device_entry",
":",
"ZhaDeviceEntry",
"=",
"ZhaDeviceEntry",
"(",
"name",
"=",
"device",
".",
"name",
",",
"ieee",
"=",
"str",
"(",
"device",
".",
"ieee",
")",
",",
"last_seen",
"=",
"device",
".",
"last_seen",
")",
"self",
".",
"devices",
"[",
"device_entry",
".",
"ieee",
"]",
"=",
"device_entry",
"self",
".",
"async_schedule_save",
"(",
")",
"return",
"device_entry"
] | [
42,
4
] | [
49,
27
] | python | en | ['en', 'en', 'en'] | True |
ZhaStorage.async_get_or_create_device | (self, device: ZhaDeviceType) | Create a new ZhaDeviceEntry. | Create a new ZhaDeviceEntry. | def async_get_or_create_device(self, device: ZhaDeviceType) -> ZhaDeviceEntry:
"""Create a new ZhaDeviceEntry."""
ieee_str: str = str(device.ieee)
if ieee_str in self.devices:
return self.devices[ieee_str]
return self.async_create_device(device) | [
"def",
"async_get_or_create_device",
"(",
"self",
",",
"device",
":",
"ZhaDeviceType",
")",
"->",
"ZhaDeviceEntry",
":",
"ieee_str",
":",
"str",
"=",
"str",
"(",
"device",
".",
"ieee",
")",
"if",
"ieee_str",
"in",
"self",
".",
"devices",
":",
"return",
"self",
".",
"devices",
"[",
"ieee_str",
"]",
"return",
"self",
".",
"async_create_device",
"(",
"device",
")"
] | [
52,
4
] | [
57,
47
] | python | en | ['en', 'en', 'en'] | True |
ZhaStorage.async_create_or_update_device | (self, device: ZhaDeviceType) | Create or update a ZhaDeviceEntry. | Create or update a ZhaDeviceEntry. | def async_create_or_update_device(self, device: ZhaDeviceType) -> ZhaDeviceEntry:
"""Create or update a ZhaDeviceEntry."""
if str(device.ieee) in self.devices:
return self.async_update_device(device)
return self.async_create_device(device) | [
"def",
"async_create_or_update_device",
"(",
"self",
",",
"device",
":",
"ZhaDeviceType",
")",
"->",
"ZhaDeviceEntry",
":",
"if",
"str",
"(",
"device",
".",
"ieee",
")",
"in",
"self",
".",
"devices",
":",
"return",
"self",
".",
"async_update_device",
"(",
"device",
")",
"return",
"self",
".",
"async_create_device",
"(",
"device",
")"
] | [
60,
4
] | [
64,
47
] | python | en | ['en', 'en', 'en'] | True |
ZhaStorage.async_delete_device | (self, device: ZhaDeviceType) | Delete ZhaDeviceEntry. | Delete ZhaDeviceEntry. | def async_delete_device(self, device: ZhaDeviceType) -> None:
"""Delete ZhaDeviceEntry."""
ieee_str: str = str(device.ieee)
if ieee_str in self.devices:
del self.devices[ieee_str]
self.async_schedule_save() | [
"def",
"async_delete_device",
"(",
"self",
",",
"device",
":",
"ZhaDeviceType",
")",
"->",
"None",
":",
"ieee_str",
":",
"str",
"=",
"str",
"(",
"device",
".",
"ieee",
")",
"if",
"ieee_str",
"in",
"self",
".",
"devices",
":",
"del",
"self",
".",
"devices",
"[",
"ieee_str",
"]",
"self",
".",
"async_schedule_save",
"(",
")"
] | [
67,
4
] | [
72,
38
] | python | en | ['en', 'sr', 'pt'] | False |
ZhaStorage.async_update_device | (self, device: ZhaDeviceType) | Update name of ZhaDeviceEntry. | Update name of ZhaDeviceEntry. | def async_update_device(self, device: ZhaDeviceType) -> ZhaDeviceEntry:
"""Update name of ZhaDeviceEntry."""
ieee_str: str = str(device.ieee)
old = self.devices[ieee_str]
if old is not None and device.last_seen is None:
return
changes = {}
changes["last_seen"] = device.last_seen
new = self.devices[ieee_str] = attr.evolve(old, **changes)
self.async_schedule_save()
return new | [
"def",
"async_update_device",
"(",
"self",
",",
"device",
":",
"ZhaDeviceType",
")",
"->",
"ZhaDeviceEntry",
":",
"ieee_str",
":",
"str",
"=",
"str",
"(",
"device",
".",
"ieee",
")",
"old",
"=",
"self",
".",
"devices",
"[",
"ieee_str",
"]",
"if",
"old",
"is",
"not",
"None",
"and",
"device",
".",
"last_seen",
"is",
"None",
":",
"return",
"changes",
"=",
"{",
"}",
"changes",
"[",
"\"last_seen\"",
"]",
"=",
"device",
".",
"last_seen",
"new",
"=",
"self",
".",
"devices",
"[",
"ieee_str",
"]",
"=",
"attr",
".",
"evolve",
"(",
"old",
",",
"*",
"*",
"changes",
")",
"self",
".",
"async_schedule_save",
"(",
")",
"return",
"new"
] | [
75,
4
] | [
88,
18
] | python | en | ['en', 'en', 'en'] | True |
ZhaStorage.async_load | (self) | Load the registry of zha device entries. | Load the registry of zha device entries. | async def async_load(self) -> None:
"""Load the registry of zha device entries."""
data = await self._store.async_load()
devices: "OrderedDict[str, ZhaDeviceEntry]" = OrderedDict()
if data is not None:
for device in data["devices"]:
devices[device["ieee"]] = ZhaDeviceEntry(
name=device["name"],
ieee=device["ieee"],
last_seen=device.get("last_seen"),
)
self.devices = devices | [
"async",
"def",
"async_load",
"(",
"self",
")",
"->",
"None",
":",
"data",
"=",
"await",
"self",
".",
"_store",
".",
"async_load",
"(",
")",
"devices",
":",
"\"OrderedDict[str, ZhaDeviceEntry]\"",
"=",
"OrderedDict",
"(",
")",
"if",
"data",
"is",
"not",
"None",
":",
"for",
"device",
"in",
"data",
"[",
"\"devices\"",
"]",
":",
"devices",
"[",
"device",
"[",
"\"ieee\"",
"]",
"]",
"=",
"ZhaDeviceEntry",
"(",
"name",
"=",
"device",
"[",
"\"name\"",
"]",
",",
"ieee",
"=",
"device",
"[",
"\"ieee\"",
"]",
",",
"last_seen",
"=",
"device",
".",
"get",
"(",
"\"last_seen\"",
")",
",",
")",
"self",
".",
"devices",
"=",
"devices"
] | [
90,
4
] | [
104,
30
] | python | en | ['en', 'en', 'en'] | True |
ZhaStorage.async_schedule_save | (self) | Schedule saving the registry of zha devices. | Schedule saving the registry of zha devices. | def async_schedule_save(self) -> None:
"""Schedule saving the registry of zha devices."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY) | [
"def",
"async_schedule_save",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"_store",
".",
"async_delay_save",
"(",
"self",
".",
"_data_to_save",
",",
"SAVE_DELAY",
")"
] | [
107,
4
] | [
109,
68
] | python | en | ['en', 'en', 'en'] | True |
ZhaStorage.async_save | (self) | Save the registry of zha devices. | Save the registry of zha devices. | async def async_save(self) -> None:
"""Save the registry of zha devices."""
await self._store.async_save(self._data_to_save()) | [
"async",
"def",
"async_save",
"(",
"self",
")",
"->",
"None",
":",
"await",
"self",
".",
"_store",
".",
"async_save",
"(",
"self",
".",
"_data_to_save",
"(",
")",
")"
] | [
111,
4
] | [
113,
58
] | python | en | ['en', 'en', 'en'] | True |
ZhaStorage._data_to_save | (self) | Return data for the registry of zha devices to store in a file. | Return data for the registry of zha devices to store in a file. | def _data_to_save(self) -> dict:
"""Return data for the registry of zha devices to store in a file."""
data = {}
data["devices"] = [
{"name": entry.name, "ieee": entry.ieee, "last_seen": entry.last_seen}
for entry in self.devices.values()
if entry.last_seen and (time.time() - entry.last_seen) < TOMBSTONE_LIFETIME
]
return data | [
"def",
"_data_to_save",
"(",
"self",
")",
"->",
"dict",
":",
"data",
"=",
"{",
"}",
"data",
"[",
"\"devices\"",
"]",
"=",
"[",
"{",
"\"name\"",
":",
"entry",
".",
"name",
",",
"\"ieee\"",
":",
"entry",
".",
"ieee",
",",
"\"last_seen\"",
":",
"entry",
".",
"last_seen",
"}",
"for",
"entry",
"in",
"self",
".",
"devices",
".",
"values",
"(",
")",
"if",
"entry",
".",
"last_seen",
"and",
"(",
"time",
".",
"time",
"(",
")",
"-",
"entry",
".",
"last_seen",
")",
"<",
"TOMBSTONE_LIFETIME",
"]",
"return",
"data"
] | [
116,
4
] | [
126,
19
] | python | en | ['en', 'en', 'en'] | True |
_async_reproduce_state | (
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) | Reproduce a single state. | Reproduce a single state. | async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
) | [
"async",
"def",
"_async_reproduce_state",
"(",
"hass",
":",
"HomeAssistantType",
",",
"state",
":",
"State",
",",
"*",
",",
"context",
":",
"Optional",
"[",
"Context",
"]",
"=",
"None",
",",
"reproduce_options",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"cur_state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"state",
".",
"entity_id",
")",
"if",
"cur_state",
"is",
"None",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Unable to find entity %s\"",
",",
"state",
".",
"entity_id",
")",
"return",
"if",
"state",
".",
"state",
"not",
"in",
"VALID_STATES",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Invalid state specified for %s: %s\"",
",",
"state",
".",
"entity_id",
",",
"state",
".",
"state",
")",
"return",
"# Return if we are already at the right state.",
"if",
"cur_state",
".",
"state",
"==",
"state",
".",
"state",
":",
"return",
"service_data",
"=",
"{",
"ATTR_ENTITY_ID",
":",
"state",
".",
"entity_id",
"}",
"if",
"state",
".",
"state",
"==",
"STATE_ON",
":",
"service",
"=",
"SERVICE_TURN_ON",
"elif",
"state",
".",
"state",
"==",
"STATE_OFF",
":",
"service",
"=",
"SERVICE_TURN_OFF",
"await",
"hass",
".",
"services",
".",
"async_call",
"(",
"DOMAIN",
",",
"service",
",",
"service_data",
",",
"context",
"=",
"context",
",",
"blocking",
"=",
"True",
")"
] | [
22,
0
] | [
55,
5
] | python | en | ['en', 'en', 'en'] | True |
async_reproduce_states | (
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) | Reproduce Switch states. | Reproduce Switch states. | async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Switch states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
) | [
"async",
"def",
"async_reproduce_states",
"(",
"hass",
":",
"HomeAssistantType",
",",
"states",
":",
"Iterable",
"[",
"State",
"]",
",",
"*",
",",
"context",
":",
"Optional",
"[",
"Context",
"]",
"=",
"None",
",",
"reproduce_options",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"=",
"None",
",",
")",
"->",
"None",
":",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"(",
"_async_reproduce_state",
"(",
"hass",
",",
"state",
",",
"context",
"=",
"context",
",",
"reproduce_options",
"=",
"reproduce_options",
")",
"for",
"state",
"in",
"states",
")",
")"
] | [
58,
0
] | [
73,
5
] | python | en | ['en', 'en', 'en'] | True |
test_air_con | (hass) | Test creation of aircon climate. | Test creation of aircon climate. | async def test_air_con(hass):
"""Test creation of aircon climate."""
await async_init_integration(hass)
state = hass.states.get("climate.air_conditioning")
assert state.state == "cool"
expected_attributes = {
"current_humidity": 60.9,
"current_temperature": 24.8,
"fan_mode": "auto",
"fan_modes": ["auto", "high", "medium", "low"],
"friendly_name": "Air Conditioning",
"hvac_action": "cooling",
"hvac_modes": ["off", "auto", "heat", "cool", "heat_cool", "dry", "fan_only"],
"max_temp": 31.0,
"min_temp": 16.0,
"preset_mode": "home",
"preset_modes": ["away", "home"],
"supported_features": 25,
"target_temp_step": 1,
"temperature": 17.8,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items()) | [
"async",
"def",
"test_air_con",
"(",
"hass",
")",
":",
"await",
"async_init_integration",
"(",
"hass",
")",
"state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"\"climate.air_conditioning\"",
")",
"assert",
"state",
".",
"state",
"==",
"\"cool\"",
"expected_attributes",
"=",
"{",
"\"current_humidity\"",
":",
"60.9",
",",
"\"current_temperature\"",
":",
"24.8",
",",
"\"fan_mode\"",
":",
"\"auto\"",
",",
"\"fan_modes\"",
":",
"[",
"\"auto\"",
",",
"\"high\"",
",",
"\"medium\"",
",",
"\"low\"",
"]",
",",
"\"friendly_name\"",
":",
"\"Air Conditioning\"",
",",
"\"hvac_action\"",
":",
"\"cooling\"",
",",
"\"hvac_modes\"",
":",
"[",
"\"off\"",
",",
"\"auto\"",
",",
"\"heat\"",
",",
"\"cool\"",
",",
"\"heat_cool\"",
",",
"\"dry\"",
",",
"\"fan_only\"",
"]",
",",
"\"max_temp\"",
":",
"31.0",
",",
"\"min_temp\"",
":",
"16.0",
",",
"\"preset_mode\"",
":",
"\"home\"",
",",
"\"preset_modes\"",
":",
"[",
"\"away\"",
",",
"\"home\"",
"]",
",",
"\"supported_features\"",
":",
"25",
",",
"\"target_temp_step\"",
":",
"1",
",",
"\"temperature\"",
":",
"17.8",
",",
"}",
"# Only test for a subset of attributes in case",
"# HA changes the implementation and a new one appears",
"assert",
"all",
"(",
"item",
"in",
"state",
".",
"attributes",
".",
"items",
"(",
")",
"for",
"item",
"in",
"expected_attributes",
".",
"items",
"(",
")",
")"
] | [
5,
0
] | [
31,
88
] | python | en | ['en', 'en', 'en'] | True |
test_heater | (hass) | Test creation of heater climate. | Test creation of heater climate. | async def test_heater(hass):
"""Test creation of heater climate."""
await async_init_integration(hass)
state = hass.states.get("climate.baseboard_heater")
assert state.state == "heat"
expected_attributes = {
"current_humidity": 45.2,
"current_temperature": 20.6,
"friendly_name": "Baseboard Heater",
"hvac_action": "idle",
"hvac_modes": ["off", "auto", "heat"],
"max_temp": 31.0,
"min_temp": 16.0,
"preset_mode": "home",
"preset_modes": ["away", "home"],
"supported_features": 17,
"target_temp_step": 1,
"temperature": 20.5,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items()) | [
"async",
"def",
"test_heater",
"(",
"hass",
")",
":",
"await",
"async_init_integration",
"(",
"hass",
")",
"state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"\"climate.baseboard_heater\"",
")",
"assert",
"state",
".",
"state",
"==",
"\"heat\"",
"expected_attributes",
"=",
"{",
"\"current_humidity\"",
":",
"45.2",
",",
"\"current_temperature\"",
":",
"20.6",
",",
"\"friendly_name\"",
":",
"\"Baseboard Heater\"",
",",
"\"hvac_action\"",
":",
"\"idle\"",
",",
"\"hvac_modes\"",
":",
"[",
"\"off\"",
",",
"\"auto\"",
",",
"\"heat\"",
"]",
",",
"\"max_temp\"",
":",
"31.0",
",",
"\"min_temp\"",
":",
"16.0",
",",
"\"preset_mode\"",
":",
"\"home\"",
",",
"\"preset_modes\"",
":",
"[",
"\"away\"",
",",
"\"home\"",
"]",
",",
"\"supported_features\"",
":",
"17",
",",
"\"target_temp_step\"",
":",
"1",
",",
"\"temperature\"",
":",
"20.5",
",",
"}",
"# Only test for a subset of attributes in case",
"# HA changes the implementation and a new one appears",
"assert",
"all",
"(",
"item",
"in",
"state",
".",
"attributes",
".",
"items",
"(",
")",
"for",
"item",
"in",
"expected_attributes",
".",
"items",
"(",
")",
")"
] | [
34,
0
] | [
58,
88
] | python | en | ['en', 'en', 'en'] | True |
test_smartac_with_swing | (hass) | Test creation of smart ac with swing climate. | Test creation of smart ac with swing climate. | async def test_smartac_with_swing(hass):
"""Test creation of smart ac with swing climate."""
await async_init_integration(hass)
state = hass.states.get("climate.air_conditioning_with_swing")
assert state.state == "auto"
expected_attributes = {
"current_humidity": 42.3,
"current_temperature": 20.9,
"fan_mode": "auto",
"fan_modes": ["auto", "high", "medium", "low"],
"friendly_name": "Air Conditioning with swing",
"hvac_action": "heating",
"hvac_modes": ["off", "auto", "heat", "cool", "heat_cool", "dry", "fan_only"],
"max_temp": 30.0,
"min_temp": 16.0,
"preset_mode": "home",
"preset_modes": ["away", "home"],
"swing_modes": ["ON", "OFF"],
"supported_features": 57,
"target_temp_step": 1.0,
"temperature": 20.0,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items()) | [
"async",
"def",
"test_smartac_with_swing",
"(",
"hass",
")",
":",
"await",
"async_init_integration",
"(",
"hass",
")",
"state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"\"climate.air_conditioning_with_swing\"",
")",
"assert",
"state",
".",
"state",
"==",
"\"auto\"",
"expected_attributes",
"=",
"{",
"\"current_humidity\"",
":",
"42.3",
",",
"\"current_temperature\"",
":",
"20.9",
",",
"\"fan_mode\"",
":",
"\"auto\"",
",",
"\"fan_modes\"",
":",
"[",
"\"auto\"",
",",
"\"high\"",
",",
"\"medium\"",
",",
"\"low\"",
"]",
",",
"\"friendly_name\"",
":",
"\"Air Conditioning with swing\"",
",",
"\"hvac_action\"",
":",
"\"heating\"",
",",
"\"hvac_modes\"",
":",
"[",
"\"off\"",
",",
"\"auto\"",
",",
"\"heat\"",
",",
"\"cool\"",
",",
"\"heat_cool\"",
",",
"\"dry\"",
",",
"\"fan_only\"",
"]",
",",
"\"max_temp\"",
":",
"30.0",
",",
"\"min_temp\"",
":",
"16.0",
",",
"\"preset_mode\"",
":",
"\"home\"",
",",
"\"preset_modes\"",
":",
"[",
"\"away\"",
",",
"\"home\"",
"]",
",",
"\"swing_modes\"",
":",
"[",
"\"ON\"",
",",
"\"OFF\"",
"]",
",",
"\"supported_features\"",
":",
"57",
",",
"\"target_temp_step\"",
":",
"1.0",
",",
"\"temperature\"",
":",
"20.0",
",",
"}",
"# Only test for a subset of attributes in case",
"# HA changes the implementation and a new one appears",
"assert",
"all",
"(",
"item",
"in",
"state",
".",
"attributes",
".",
"items",
"(",
")",
"for",
"item",
"in",
"expected_attributes",
".",
"items",
"(",
")",
")"
] | [
61,
0
] | [
88,
88
] | python | en | ['en', 'en', 'en'] | True |
shift_tokens_right | (input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int) |
Shift input ids one token to the right.
|
Shift input ids one token to the right.
| def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids | [
"def",
"shift_tokens_right",
"(",
"input_ids",
":",
"torch",
".",
"Tensor",
",",
"pad_token_id",
":",
"int",
",",
"decoder_start_token_id",
":",
"int",
")",
":",
"shifted_input_ids",
"=",
"input_ids",
".",
"new_zeros",
"(",
"input_ids",
".",
"shape",
")",
"shifted_input_ids",
"[",
":",
",",
"1",
":",
"]",
"=",
"input_ids",
"[",
":",
",",
":",
"-",
"1",
"]",
".",
"clone",
"(",
")",
"shifted_input_ids",
"[",
":",
",",
"0",
"]",
"=",
"decoder_start_token_id",
"assert",
"pad_token_id",
"is",
"not",
"None",
",",
"\"self.model.config.pad_token_id has to be defined.\"",
"# replace possible -100 values in labels by `pad_token_id`",
"shifted_input_ids",
".",
"masked_fill_",
"(",
"shifted_input_ids",
"==",
"-",
"100",
",",
"pad_token_id",
")",
"return",
"shifted_input_ids"
] | [
61,
0
] | [
73,
28
] | python | en | ['en', 'error', 'th'] | False |
_make_causal_mask | (input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0) |
Make causal mask used for bi-directional self-attention.
|
Make causal mask used for bi-directional self-attention.
| def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | [
"def",
"_make_causal_mask",
"(",
"input_ids_shape",
":",
"torch",
".",
"Size",
",",
"dtype",
":",
"torch",
".",
"dtype",
",",
"past_key_values_length",
":",
"int",
"=",
"0",
")",
":",
"bsz",
",",
"tgt_len",
"=",
"input_ids_shape",
"mask",
"=",
"torch",
".",
"full",
"(",
"(",
"tgt_len",
",",
"tgt_len",
")",
",",
"float",
"(",
"\"-inf\"",
")",
")",
"mask_cond",
"=",
"torch",
".",
"arange",
"(",
"mask",
".",
"size",
"(",
"-",
"1",
")",
")",
"mask",
".",
"masked_fill_",
"(",
"mask_cond",
"<",
"(",
"mask_cond",
"+",
"1",
")",
".",
"view",
"(",
"mask",
".",
"size",
"(",
"-",
"1",
")",
",",
"1",
")",
",",
"0",
")",
"mask",
"=",
"mask",
".",
"to",
"(",
"dtype",
")",
"if",
"past_key_values_length",
">",
"0",
":",
"mask",
"=",
"torch",
".",
"cat",
"(",
"[",
"torch",
".",
"zeros",
"(",
"tgt_len",
",",
"past_key_values_length",
",",
"dtype",
"=",
"dtype",
")",
",",
"mask",
"]",
",",
"dim",
"=",
"-",
"1",
")",
"return",
"mask",
"[",
"None",
",",
"None",
",",
":",
",",
":",
"]",
".",
"expand",
"(",
"bsz",
",",
"1",
",",
"tgt_len",
",",
"tgt_len",
"+",
"past_key_values_length",
")"
] | [
77,
0
] | [
89,
91
] | python | en | ['en', 'error', 'th'] | False |
_expand_mask | (mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None) |
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
| def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) | [
"def",
"_expand_mask",
"(",
"mask",
":",
"torch",
".",
"Tensor",
",",
"dtype",
":",
"torch",
".",
"dtype",
",",
"tgt_len",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
":",
"bsz",
",",
"src_len",
"=",
"mask",
".",
"size",
"(",
")",
"tgt_len",
"=",
"tgt_len",
"if",
"tgt_len",
"is",
"not",
"None",
"else",
"src_len",
"expanded_mask",
"=",
"mask",
"[",
":",
",",
"None",
",",
"None",
",",
":",
"]",
".",
"expand",
"(",
"bsz",
",",
"1",
",",
"tgt_len",
",",
"src_len",
")",
".",
"to",
"(",
"dtype",
")",
"inverted_mask",
"=",
"1.0",
"-",
"expanded_mask",
"return",
"inverted_mask",
".",
"masked_fill",
"(",
"inverted_mask",
".",
"bool",
"(",
")",
",",
"torch",
".",
"finfo",
"(",
"dtype",
")",
".",
"min",
")"
] | [
93,
0
] | [
104,
82
] | python | en | ['en', 'error', 'th'] | False |
MarianSinusoidalPositionalEmbedding._init_weight | (out: nn.Parameter) |
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
|
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
| def _init_weight(out: nn.Parameter):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
return out | [
"def",
"_init_weight",
"(",
"out",
":",
"nn",
".",
"Parameter",
")",
":",
"n_pos",
",",
"dim",
"=",
"out",
".",
"shape",
"position_enc",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"pos",
"/",
"np",
".",
"power",
"(",
"10000",
",",
"2",
"*",
"(",
"j",
"//",
"2",
")",
"/",
"dim",
")",
"for",
"j",
"in",
"range",
"(",
"dim",
")",
"]",
"for",
"pos",
"in",
"range",
"(",
"n_pos",
")",
"]",
")",
"out",
".",
"requires_grad",
"=",
"False",
"# set early to avoid an error in pytorch-1.8+",
"sentinel",
"=",
"dim",
"//",
"2",
"if",
"dim",
"%",
"2",
"==",
"0",
"else",
"(",
"dim",
"//",
"2",
")",
"+",
"1",
"out",
"[",
":",
",",
"0",
":",
"sentinel",
"]",
"=",
"torch",
".",
"FloatTensor",
"(",
"np",
".",
"sin",
"(",
"position_enc",
"[",
":",
",",
"0",
":",
":",
"2",
"]",
")",
")",
"out",
"[",
":",
",",
"sentinel",
":",
"]",
"=",
"torch",
".",
"FloatTensor",
"(",
"np",
".",
"cos",
"(",
"position_enc",
"[",
":",
",",
"1",
":",
":",
"2",
"]",
")",
")",
"out",
".",
"detach_",
"(",
")",
"return",
"out"
] | [
115,
4
] | [
129,
18
] | python | en | ['en', 'error', 'th'] | False |
MarianSinusoidalPositionalEmbedding.forward | (self, input_ids_shape: torch.Size, past_key_values_length: int = 0) | `input_ids_shape` is expected to be [bsz x seqlen]. | `input_ids_shape` is expected to be [bsz x seqlen]. | def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions) | [
"def",
"forward",
"(",
"self",
",",
"input_ids_shape",
":",
"torch",
".",
"Size",
",",
"past_key_values_length",
":",
"int",
"=",
"0",
")",
":",
"bsz",
",",
"seq_len",
"=",
"input_ids_shape",
"[",
":",
"2",
"]",
"positions",
"=",
"torch",
".",
"arange",
"(",
"past_key_values_length",
",",
"past_key_values_length",
"+",
"seq_len",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"device",
"=",
"self",
".",
"weight",
".",
"device",
")",
"return",
"super",
"(",
")",
".",
"forward",
"(",
"positions",
")"
] | [
132,
4
] | [
138,
41
] | python | en | ['en', 'en', 'en'] | True |
MarianAttention.forward | (
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) | Input shape: Batch x Time x Channel | Input shape: Batch x Time x Channel | def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value | [
"def",
"forward",
"(",
"self",
",",
"hidden_states",
":",
"torch",
".",
"Tensor",
",",
"key_value_states",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"past_key_value",
":",
"Optional",
"[",
"Tuple",
"[",
"torch",
".",
"Tensor",
"]",
"]",
"=",
"None",
",",
"attention_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"layer_head_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"output_attentions",
":",
"bool",
"=",
"False",
",",
")",
"->",
"Tuple",
"[",
"torch",
".",
"Tensor",
",",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
",",
"Optional",
"[",
"Tuple",
"[",
"torch",
".",
"Tensor",
"]",
"]",
"]",
":",
"# if key_value_states are provided this layer is used as a cross-attention layer",
"# for the decoder",
"is_cross_attention",
"=",
"key_value_states",
"is",
"not",
"None",
"bsz",
",",
"tgt_len",
",",
"embed_dim",
"=",
"hidden_states",
".",
"size",
"(",
")",
"# get query proj",
"query_states",
"=",
"self",
".",
"q_proj",
"(",
"hidden_states",
")",
"*",
"self",
".",
"scaling",
"# get key, value proj",
"if",
"is_cross_attention",
"and",
"past_key_value",
"is",
"not",
"None",
":",
"# reuse k,v, cross_attentions",
"key_states",
"=",
"past_key_value",
"[",
"0",
"]",
"value_states",
"=",
"past_key_value",
"[",
"1",
"]",
"elif",
"is_cross_attention",
":",
"# cross_attentions",
"key_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"k_proj",
"(",
"key_value_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"value_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"v_proj",
"(",
"key_value_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"elif",
"past_key_value",
"is",
"not",
"None",
":",
"# reuse k, v, self_attention",
"key_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"k_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"value_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"v_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"key_states",
"=",
"torch",
".",
"cat",
"(",
"[",
"past_key_value",
"[",
"0",
"]",
",",
"key_states",
"]",
",",
"dim",
"=",
"2",
")",
"value_states",
"=",
"torch",
".",
"cat",
"(",
"[",
"past_key_value",
"[",
"1",
"]",
",",
"value_states",
"]",
",",
"dim",
"=",
"2",
")",
"else",
":",
"# self_attention",
"key_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"k_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"value_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"v_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"if",
"self",
".",
"is_decoder",
":",
"# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.",
"# Further calls to cross_attention layer can then reuse all cross-attention",
"# key/value_states (first \"if\" case)",
"# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of",
"# all previous decoder key/value_states. Further calls to uni-directional self-attention",
"# can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)",
"# if encoder bi-directional self-attention `past_key_value` is always `None`",
"past_key_value",
"=",
"(",
"key_states",
",",
"value_states",
")",
"proj_shape",
"=",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"-",
"1",
",",
"self",
".",
"head_dim",
")",
"query_states",
"=",
"self",
".",
"_shape",
"(",
"query_states",
",",
"tgt_len",
",",
"bsz",
")",
".",
"view",
"(",
"*",
"proj_shape",
")",
"key_states",
"=",
"key_states",
".",
"view",
"(",
"*",
"proj_shape",
")",
"value_states",
"=",
"value_states",
".",
"view",
"(",
"*",
"proj_shape",
")",
"src_len",
"=",
"key_states",
".",
"size",
"(",
"1",
")",
"attn_weights",
"=",
"torch",
".",
"bmm",
"(",
"query_states",
",",
"key_states",
".",
"transpose",
"(",
"1",
",",
"2",
")",
")",
"assert",
"attn_weights",
".",
"size",
"(",
")",
"==",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
",",
")",
",",
"f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}\"",
"if",
"attention_mask",
"is",
"not",
"None",
":",
"assert",
"attention_mask",
".",
"size",
"(",
")",
"==",
"(",
"bsz",
",",
"1",
",",
"tgt_len",
",",
"src_len",
",",
")",
",",
"f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"",
"attn_weights",
"=",
"attn_weights",
".",
"view",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"+",
"attention_mask",
"attn_weights",
"=",
"attn_weights",
".",
"view",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"attn_weights",
"=",
"F",
".",
"softmax",
"(",
"attn_weights",
",",
"dim",
"=",
"-",
"1",
")",
"if",
"layer_head_mask",
"is",
"not",
"None",
":",
"assert",
"layer_head_mask",
".",
"size",
"(",
")",
"==",
"(",
"self",
".",
"num_heads",
",",
")",
",",
"f\"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}\"",
"attn_weights",
"=",
"layer_head_mask",
".",
"view",
"(",
"1",
",",
"-",
"1",
",",
"1",
",",
"1",
")",
"*",
"attn_weights",
".",
"view",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"attn_weights",
"=",
"attn_weights",
".",
"view",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"if",
"output_attentions",
":",
"# this operation is a bit akward, but it's required to",
"# make sure that attn_weights keeps its gradient.",
"# In order to do so, attn_weights have to reshaped",
"# twice and have to be reused in the following",
"attn_weights_reshaped",
"=",
"attn_weights",
".",
"view",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"attn_weights",
"=",
"attn_weights_reshaped",
".",
"view",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"else",
":",
"attn_weights_reshaped",
"=",
"None",
"attn_probs",
"=",
"F",
".",
"dropout",
"(",
"attn_weights",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"attn_output",
"=",
"torch",
".",
"bmm",
"(",
"attn_probs",
",",
"value_states",
")",
"assert",
"attn_output",
".",
"size",
"(",
")",
"==",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"self",
".",
"head_dim",
",",
")",
",",
"f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}\"",
"attn_output",
"=",
"(",
"attn_output",
".",
"view",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"self",
".",
"head_dim",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
".",
"reshape",
"(",
"bsz",
",",
"tgt_len",
",",
"embed_dim",
")",
")",
"attn_output",
"=",
"self",
".",
"out_proj",
"(",
"attn_output",
")",
"return",
"attn_output",
",",
"attn_weights_reshaped",
",",
"past_key_value"
] | [
172,
4
] | [
281,
65
] | python | en | ['en', 'pl', 'en'] | True |
MarianEncoderLayer.forward | (
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
) |
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
|
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
| def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs | [
"def",
"forward",
"(",
"self",
",",
"hidden_states",
":",
"torch",
".",
"Tensor",
",",
"attention_mask",
":",
"torch",
".",
"Tensor",
",",
"layer_head_mask",
":",
"torch",
".",
"Tensor",
",",
"output_attentions",
":",
"bool",
"=",
"False",
",",
")",
":",
"residual",
"=",
"hidden_states",
"hidden_states",
",",
"attn_weights",
",",
"_",
"=",
"self",
".",
"self_attn",
"(",
"hidden_states",
"=",
"hidden_states",
",",
"attention_mask",
"=",
"attention_mask",
",",
"layer_head_mask",
"=",
"layer_head_mask",
",",
"output_attentions",
"=",
"output_attentions",
",",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"self_attn_layer_norm",
"(",
"hidden_states",
")",
"residual",
"=",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"activation_fn",
"(",
"self",
".",
"fc1",
"(",
"hidden_states",
")",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"activation_dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"self",
".",
"fc2",
"(",
"hidden_states",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"final_layer_norm",
"(",
"hidden_states",
")",
"if",
"hidden_states",
".",
"dtype",
"==",
"torch",
".",
"float16",
"and",
"(",
"torch",
".",
"isinf",
"(",
"hidden_states",
")",
".",
"any",
"(",
")",
"or",
"torch",
".",
"isnan",
"(",
"hidden_states",
")",
".",
"any",
"(",
")",
")",
":",
"clamp_value",
"=",
"torch",
".",
"finfo",
"(",
"hidden_states",
".",
"dtype",
")",
".",
"max",
"-",
"1000",
"hidden_states",
"=",
"torch",
".",
"clamp",
"(",
"hidden_states",
",",
"min",
"=",
"-",
"clamp_value",
",",
"max",
"=",
"clamp_value",
")",
"outputs",
"=",
"(",
"hidden_states",
",",
")",
"if",
"output_attentions",
":",
"outputs",
"+=",
"(",
"attn_weights",
",",
")",
"return",
"outputs"
] | [
302,
4
] | [
350,
22
] | python | en | ['en', 'error', 'th'] | False |
MarianDecoderLayer.forward | (
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
encoder_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) |
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
|
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
| def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
encoder_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=encoder_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs | [
"def",
"forward",
"(",
"self",
",",
"hidden_states",
":",
"torch",
".",
"Tensor",
",",
"attention_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"encoder_hidden_states",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"encoder_attention_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"layer_head_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"encoder_layer_head_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"past_key_value",
":",
"Optional",
"[",
"Tuple",
"[",
"torch",
".",
"Tensor",
"]",
"]",
"=",
"None",
",",
"output_attentions",
":",
"Optional",
"[",
"bool",
"]",
"=",
"False",
",",
"use_cache",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
")",
":",
"residual",
"=",
"hidden_states",
"# Self Attention",
"# decoder uni-directional self-attention cached key/values tuple is at positions 1,2",
"self_attn_past_key_value",
"=",
"past_key_value",
"[",
":",
"2",
"]",
"if",
"past_key_value",
"is",
"not",
"None",
"else",
"None",
"# add present self-attn cache to positions 1,2 of present_key_value tuple",
"hidden_states",
",",
"self_attn_weights",
",",
"present_key_value",
"=",
"self",
".",
"self_attn",
"(",
"hidden_states",
"=",
"hidden_states",
",",
"past_key_value",
"=",
"self_attn_past_key_value",
",",
"attention_mask",
"=",
"attention_mask",
",",
"layer_head_mask",
"=",
"layer_head_mask",
",",
"output_attentions",
"=",
"output_attentions",
",",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"self_attn_layer_norm",
"(",
"hidden_states",
")",
"# Cross-Attention Block",
"cross_attn_present_key_value",
"=",
"None",
"cross_attn_weights",
"=",
"None",
"if",
"encoder_hidden_states",
"is",
"not",
"None",
":",
"residual",
"=",
"hidden_states",
"# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple",
"cross_attn_past_key_value",
"=",
"past_key_value",
"[",
"-",
"2",
":",
"]",
"if",
"past_key_value",
"is",
"not",
"None",
"else",
"None",
"hidden_states",
",",
"cross_attn_weights",
",",
"cross_attn_present_key_value",
"=",
"self",
".",
"encoder_attn",
"(",
"hidden_states",
"=",
"hidden_states",
",",
"key_value_states",
"=",
"encoder_hidden_states",
",",
"attention_mask",
"=",
"encoder_attention_mask",
",",
"layer_head_mask",
"=",
"encoder_layer_head_mask",
",",
"past_key_value",
"=",
"cross_attn_past_key_value",
",",
"output_attentions",
"=",
"output_attentions",
",",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"encoder_attn_layer_norm",
"(",
"hidden_states",
")",
"# add cross-attn to positions 3,4 of present_key_value tuple",
"present_key_value",
"=",
"present_key_value",
"+",
"cross_attn_present_key_value",
"# Fully Connected",
"residual",
"=",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"activation_fn",
"(",
"self",
".",
"fc1",
"(",
"hidden_states",
")",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"activation_dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"self",
".",
"fc2",
"(",
"hidden_states",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"final_layer_norm",
"(",
"hidden_states",
")",
"outputs",
"=",
"(",
"hidden_states",
",",
")",
"if",
"output_attentions",
":",
"outputs",
"+=",
"(",
"self_attn_weights",
",",
"cross_attn_weights",
")",
"if",
"use_cache",
":",
"outputs",
"+=",
"(",
"present_key_value",
",",
")",
"return",
"outputs"
] | [
381,
4
] | [
467,
22
] | python | en | ['en', 'error', 'th'] | False |
MarianEncoder.forward | (
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) | r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.MarianTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
| r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it. | def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.MarianTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
) | [
"def",
"forward",
"(",
"self",
",",
"input_ids",
"=",
"None",
",",
"attention_mask",
"=",
"None",
",",
"head_mask",
"=",
"None",
",",
"inputs_embeds",
"=",
"None",
",",
"output_attentions",
"=",
"None",
",",
"output_hidden_states",
"=",
"None",
",",
"return_dict",
"=",
"None",
",",
")",
":",
"output_attentions",
"=",
"output_attentions",
"if",
"output_attentions",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_attentions",
"output_hidden_states",
"=",
"(",
"output_hidden_states",
"if",
"output_hidden_states",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_hidden_states",
")",
"return_dict",
"=",
"return_dict",
"if",
"return_dict",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"use_return_dict",
"# retrieve input_ids and inputs_embeds",
"if",
"input_ids",
"is",
"not",
"None",
"and",
"inputs_embeds",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"You cannot specify both input_ids and inputs_embeds at the same time\"",
")",
"elif",
"input_ids",
"is",
"not",
"None",
":",
"input_shape",
"=",
"input_ids",
".",
"size",
"(",
")",
"input_ids",
"=",
"input_ids",
".",
"view",
"(",
"-",
"1",
",",
"input_shape",
"[",
"-",
"1",
"]",
")",
"elif",
"inputs_embeds",
"is",
"not",
"None",
":",
"input_shape",
"=",
"inputs_embeds",
".",
"size",
"(",
")",
"[",
":",
"-",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"You have to specify either input_ids or inputs_embeds\"",
")",
"if",
"inputs_embeds",
"is",
"None",
":",
"inputs_embeds",
"=",
"self",
".",
"embed_tokens",
"(",
"input_ids",
")",
"*",
"self",
".",
"embed_scale",
"embed_pos",
"=",
"self",
".",
"embed_positions",
"(",
"input_shape",
")",
"hidden_states",
"=",
"inputs_embeds",
"+",
"embed_pos",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"# expand attention_mask",
"if",
"attention_mask",
"is",
"not",
"None",
":",
"# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]",
"attention_mask",
"=",
"_expand_mask",
"(",
"attention_mask",
",",
"inputs_embeds",
".",
"dtype",
")",
"encoder_states",
"=",
"(",
")",
"if",
"output_hidden_states",
"else",
"None",
"all_attentions",
"=",
"(",
")",
"if",
"output_attentions",
"else",
"None",
"# check if head_mask has a correct number of layers specified if desired",
"if",
"head_mask",
"is",
"not",
"None",
":",
"assert",
"head_mask",
".",
"size",
"(",
")",
"[",
"0",
"]",
"==",
"(",
"len",
"(",
"self",
".",
"layers",
")",
")",
",",
"f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"",
"for",
"idx",
",",
"encoder_layer",
"in",
"enumerate",
"(",
"self",
".",
"layers",
")",
":",
"if",
"output_hidden_states",
":",
"encoder_states",
"=",
"encoder_states",
"+",
"(",
"hidden_states",
",",
")",
"# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)",
"dropout_probability",
"=",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
"if",
"self",
".",
"training",
"and",
"(",
"dropout_probability",
"<",
"self",
".",
"layerdrop",
")",
":",
"# skip the layer",
"layer_outputs",
"=",
"(",
"None",
",",
"None",
")",
"else",
":",
"if",
"getattr",
"(",
"self",
".",
"config",
",",
"\"gradient_checkpointing\"",
",",
"False",
")",
"and",
"self",
".",
"training",
":",
"def",
"create_custom_forward",
"(",
"module",
")",
":",
"def",
"custom_forward",
"(",
"*",
"inputs",
")",
":",
"return",
"module",
"(",
"*",
"inputs",
",",
"output_attentions",
")",
"return",
"custom_forward",
"layer_outputs",
"=",
"torch",
".",
"utils",
".",
"checkpoint",
".",
"checkpoint",
"(",
"create_custom_forward",
"(",
"encoder_layer",
")",
",",
"hidden_states",
",",
"attention_mask",
",",
"(",
"head_mask",
"[",
"idx",
"]",
"if",
"head_mask",
"is",
"not",
"None",
"else",
"None",
")",
",",
")",
"else",
":",
"layer_outputs",
"=",
"encoder_layer",
"(",
"hidden_states",
",",
"attention_mask",
",",
"layer_head_mask",
"=",
"(",
"head_mask",
"[",
"idx",
"]",
"if",
"head_mask",
"is",
"not",
"None",
"else",
"None",
")",
",",
"output_attentions",
"=",
"output_attentions",
",",
")",
"hidden_states",
"=",
"layer_outputs",
"[",
"0",
"]",
"if",
"output_attentions",
":",
"all_attentions",
"=",
"all_attentions",
"+",
"(",
"layer_outputs",
"[",
"1",
"]",
",",
")",
"if",
"output_hidden_states",
":",
"encoder_states",
"=",
"encoder_states",
"+",
"(",
"hidden_states",
",",
")",
"if",
"not",
"return_dict",
":",
"return",
"tuple",
"(",
"v",
"for",
"v",
"in",
"[",
"hidden_states",
",",
"encoder_states",
",",
"all_attentions",
"]",
"if",
"v",
"is",
"not",
"None",
")",
"return",
"BaseModelOutput",
"(",
"last_hidden_state",
"=",
"hidden_states",
",",
"hidden_states",
"=",
"encoder_states",
",",
"attentions",
"=",
"all_attentions",
")"
] | [
656,
4
] | [
783,
9
] | python | cy | ['en', 'cy', 'hi'] | False |
MarianDecoder.forward | (
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) | r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.MarianTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
| r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it. | def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.MarianTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
encoder_head_mask[idx] if encoder_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
) | [
"def",
"forward",
"(",
"self",
",",
"input_ids",
"=",
"None",
",",
"attention_mask",
"=",
"None",
",",
"encoder_hidden_states",
"=",
"None",
",",
"encoder_attention_mask",
"=",
"None",
",",
"head_mask",
"=",
"None",
",",
"encoder_head_mask",
"=",
"None",
",",
"past_key_values",
"=",
"None",
",",
"inputs_embeds",
"=",
"None",
",",
"use_cache",
"=",
"None",
",",
"output_attentions",
"=",
"None",
",",
"output_hidden_states",
"=",
"None",
",",
"return_dict",
"=",
"None",
",",
")",
":",
"output_attentions",
"=",
"output_attentions",
"if",
"output_attentions",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_attentions",
"output_hidden_states",
"=",
"(",
"output_hidden_states",
"if",
"output_hidden_states",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_hidden_states",
")",
"use_cache",
"=",
"use_cache",
"if",
"use_cache",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"use_cache",
"return_dict",
"=",
"return_dict",
"if",
"return_dict",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"use_return_dict",
"# retrieve input_ids and inputs_embeds",
"if",
"input_ids",
"is",
"not",
"None",
"and",
"inputs_embeds",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\"",
")",
"elif",
"input_ids",
"is",
"not",
"None",
":",
"input_shape",
"=",
"input_ids",
".",
"size",
"(",
")",
"input_ids",
"=",
"input_ids",
".",
"view",
"(",
"-",
"1",
",",
"input_shape",
"[",
"-",
"1",
"]",
")",
"elif",
"inputs_embeds",
"is",
"not",
"None",
":",
"input_shape",
"=",
"inputs_embeds",
".",
"size",
"(",
")",
"[",
":",
"-",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"You have to specify either decoder_input_ids or decoder_inputs_embeds\"",
")",
"# past_key_values_length",
"past_key_values_length",
"=",
"past_key_values",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"shape",
"[",
"2",
"]",
"if",
"past_key_values",
"is",
"not",
"None",
"else",
"0",
"if",
"inputs_embeds",
"is",
"None",
":",
"inputs_embeds",
"=",
"self",
".",
"embed_tokens",
"(",
"input_ids",
")",
"*",
"self",
".",
"embed_scale",
"attention_mask",
"=",
"self",
".",
"_prepare_decoder_attention_mask",
"(",
"attention_mask",
",",
"input_shape",
",",
"inputs_embeds",
",",
"past_key_values_length",
")",
"# expand encoder attention mask",
"if",
"encoder_hidden_states",
"is",
"not",
"None",
"and",
"encoder_attention_mask",
"is",
"not",
"None",
":",
"# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]",
"encoder_attention_mask",
"=",
"_expand_mask",
"(",
"encoder_attention_mask",
",",
"inputs_embeds",
".",
"dtype",
",",
"tgt_len",
"=",
"input_shape",
"[",
"-",
"1",
"]",
")",
"# embed positions",
"positions",
"=",
"self",
".",
"embed_positions",
"(",
"input_shape",
",",
"past_key_values_length",
")",
"hidden_states",
"=",
"inputs_embeds",
"+",
"positions",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"# decoder layers",
"all_hidden_states",
"=",
"(",
")",
"if",
"output_hidden_states",
"else",
"None",
"all_self_attns",
"=",
"(",
")",
"if",
"output_attentions",
"else",
"None",
"all_cross_attentions",
"=",
"(",
")",
"if",
"(",
"output_attentions",
"and",
"encoder_hidden_states",
"is",
"not",
"None",
")",
"else",
"None",
"next_decoder_cache",
"=",
"(",
")",
"if",
"use_cache",
"else",
"None",
"# check if head_mask has a correct number of layers specified if desired",
"if",
"head_mask",
"is",
"not",
"None",
":",
"assert",
"head_mask",
".",
"size",
"(",
")",
"[",
"0",
"]",
"==",
"(",
"len",
"(",
"self",
".",
"layers",
")",
")",
",",
"f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"",
"for",
"idx",
",",
"decoder_layer",
"in",
"enumerate",
"(",
"self",
".",
"layers",
")",
":",
"# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)",
"if",
"output_hidden_states",
":",
"all_hidden_states",
"+=",
"(",
"hidden_states",
",",
")",
"dropout_probability",
"=",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
"if",
"self",
".",
"training",
"and",
"(",
"dropout_probability",
"<",
"self",
".",
"layerdrop",
")",
":",
"continue",
"past_key_value",
"=",
"past_key_values",
"[",
"idx",
"]",
"if",
"past_key_values",
"is",
"not",
"None",
"else",
"None",
"if",
"getattr",
"(",
"self",
".",
"config",
",",
"\"gradient_checkpointing\"",
",",
"False",
")",
"and",
"self",
".",
"training",
":",
"if",
"use_cache",
":",
"logger",
".",
"warn",
"(",
"\"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"",
"\"`use_cache=False`...\"",
")",
"use_cache",
"=",
"False",
"def",
"create_custom_forward",
"(",
"module",
")",
":",
"def",
"custom_forward",
"(",
"*",
"inputs",
")",
":",
"# None for past_key_value",
"return",
"module",
"(",
"*",
"inputs",
",",
"output_attentions",
",",
"use_cache",
")",
"return",
"custom_forward",
"layer_outputs",
"=",
"torch",
".",
"utils",
".",
"checkpoint",
".",
"checkpoint",
"(",
"create_custom_forward",
"(",
"decoder_layer",
")",
",",
"hidden_states",
",",
"attention_mask",
",",
"encoder_hidden_states",
",",
"encoder_attention_mask",
",",
"head_mask",
"[",
"idx",
"]",
"if",
"head_mask",
"is",
"not",
"None",
"else",
"None",
",",
"encoder_head_mask",
"[",
"idx",
"]",
"if",
"encoder_head_mask",
"is",
"not",
"None",
"else",
"None",
",",
"None",
",",
")",
"else",
":",
"layer_outputs",
"=",
"decoder_layer",
"(",
"hidden_states",
",",
"attention_mask",
"=",
"attention_mask",
",",
"encoder_hidden_states",
"=",
"encoder_hidden_states",
",",
"encoder_attention_mask",
"=",
"encoder_attention_mask",
",",
"layer_head_mask",
"=",
"(",
"head_mask",
"[",
"idx",
"]",
"if",
"head_mask",
"is",
"not",
"None",
"else",
"None",
")",
",",
"encoder_layer_head_mask",
"=",
"(",
"encoder_head_mask",
"[",
"idx",
"]",
"if",
"encoder_head_mask",
"is",
"not",
"None",
"else",
"None",
")",
",",
"past_key_value",
"=",
"past_key_value",
",",
"output_attentions",
"=",
"output_attentions",
",",
"use_cache",
"=",
"use_cache",
",",
")",
"hidden_states",
"=",
"layer_outputs",
"[",
"0",
"]",
"if",
"use_cache",
":",
"next_decoder_cache",
"+=",
"(",
"layer_outputs",
"[",
"3",
"if",
"output_attentions",
"else",
"1",
"]",
",",
")",
"if",
"output_attentions",
":",
"all_self_attns",
"+=",
"(",
"layer_outputs",
"[",
"1",
"]",
",",
")",
"if",
"encoder_hidden_states",
"is",
"not",
"None",
":",
"all_cross_attentions",
"+=",
"(",
"layer_outputs",
"[",
"2",
"]",
",",
")",
"# add hidden states from the last decoder layer",
"if",
"output_hidden_states",
":",
"all_hidden_states",
"+=",
"(",
"hidden_states",
",",
")",
"next_cache",
"=",
"next_decoder_cache",
"if",
"use_cache",
"else",
"None",
"if",
"not",
"return_dict",
":",
"return",
"tuple",
"(",
"v",
"for",
"v",
"in",
"[",
"hidden_states",
",",
"next_cache",
",",
"all_hidden_states",
",",
"all_self_attns",
",",
"all_cross_attentions",
"]",
"if",
"v",
"is",
"not",
"None",
")",
"return",
"BaseModelOutputWithPastAndCrossAttentions",
"(",
"last_hidden_state",
"=",
"hidden_states",
",",
"past_key_values",
"=",
"next_cache",
",",
"hidden_states",
"=",
"all_hidden_states",
",",
"attentions",
"=",
"all_self_attns",
",",
"cross_attentions",
"=",
"all_cross_attentions",
",",
")"
] | [
841,
4
] | [
1047,
9
] | python | cy | ['en', 'cy', 'hi'] | False |
MarianForCausalLM.forward | (
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) | r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.MarianTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,
config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
Example::
>>> from transformers import MarianTokenizer, MarianForCausalLM
>>> tokenizer = MarianTokenizer.from_pretrained('facebook/bart-large')
>>> model = MarianForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
| r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it. | def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.MarianTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,
config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
Example::
>>> from transformers import MarianTokenizer, MarianForCausalLM
>>> tokenizer = MarianTokenizer.from_pretrained('facebook/bart-large')
>>> model = MarianForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
encoder_head_mask=encoder_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
) | [
"def",
"forward",
"(",
"self",
",",
"input_ids",
"=",
"None",
",",
"attention_mask",
"=",
"None",
",",
"encoder_hidden_states",
"=",
"None",
",",
"encoder_attention_mask",
"=",
"None",
",",
"head_mask",
"=",
"None",
",",
"encoder_head_mask",
"=",
"None",
",",
"past_key_values",
"=",
"None",
",",
"inputs_embeds",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"use_cache",
"=",
"None",
",",
"output_attentions",
"=",
"None",
",",
"output_hidden_states",
"=",
"None",
",",
"return_dict",
"=",
"None",
",",
")",
":",
"output_attentions",
"=",
"output_attentions",
"if",
"output_attentions",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_attentions",
"output_hidden_states",
"=",
"(",
"output_hidden_states",
"if",
"output_hidden_states",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_hidden_states",
")",
"return_dict",
"=",
"return_dict",
"if",
"return_dict",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"use_return_dict",
"# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)",
"outputs",
"=",
"self",
".",
"model",
".",
"decoder",
"(",
"input_ids",
"=",
"input_ids",
",",
"attention_mask",
"=",
"attention_mask",
",",
"encoder_hidden_states",
"=",
"encoder_hidden_states",
",",
"encoder_attention_mask",
"=",
"encoder_attention_mask",
",",
"head_mask",
"=",
"head_mask",
",",
"encoder_head_mask",
"=",
"encoder_head_mask",
",",
"past_key_values",
"=",
"past_key_values",
",",
"inputs_embeds",
"=",
"inputs_embeds",
",",
"use_cache",
"=",
"use_cache",
",",
"output_attentions",
"=",
"output_attentions",
",",
"output_hidden_states",
"=",
"output_hidden_states",
",",
"return_dict",
"=",
"return_dict",
",",
")",
"logits",
"=",
"self",
".",
"lm_head",
"(",
"outputs",
"[",
"0",
"]",
")",
"loss",
"=",
"None",
"if",
"labels",
"is",
"not",
"None",
":",
"loss_fct",
"=",
"CrossEntropyLoss",
"(",
")",
"loss",
"=",
"loss_fct",
"(",
"logits",
".",
"view",
"(",
"-",
"1",
",",
"self",
".",
"config",
".",
"vocab_size",
")",
",",
"labels",
".",
"view",
"(",
"-",
"1",
")",
")",
"if",
"not",
"return_dict",
":",
"output",
"=",
"(",
"logits",
",",
")",
"+",
"outputs",
"[",
"1",
":",
"]",
"return",
"(",
"loss",
",",
")",
"+",
"output",
"if",
"loss",
"is",
"not",
"None",
"else",
"output",
"return",
"CausalLMOutputWithCrossAttentions",
"(",
"loss",
"=",
"loss",
",",
"logits",
"=",
"logits",
",",
"past_key_values",
"=",
"outputs",
".",
"past_key_values",
",",
"hidden_states",
"=",
"outputs",
".",
"hidden_states",
",",
"attentions",
"=",
"outputs",
".",
"attentions",
",",
"cross_attentions",
"=",
"outputs",
".",
"cross_attentions",
",",
")"
] | [
1390,
4
] | [
1525,
9
] | python | cy | ['en', 'cy', 'hi'] | False |
XLNetTokenizer._tokenize | (self, text, sample=False) | Tokenize a string. | Tokenize a string. | def _tokenize(self, text, sample=False):
""" Tokenize a string. """
text = self.preprocess_text(text)
if not sample:
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces | [
"def",
"_tokenize",
"(",
"self",
",",
"text",
",",
"sample",
"=",
"False",
")",
":",
"text",
"=",
"self",
".",
"preprocess_text",
"(",
"text",
")",
"if",
"not",
"sample",
":",
"pieces",
"=",
"self",
".",
"sp_model",
".",
"EncodeAsPieces",
"(",
"text",
")",
"else",
":",
"pieces",
"=",
"self",
".",
"sp_model",
".",
"SampleEncodeAsPieces",
"(",
"text",
",",
"64",
",",
"0.1",
")",
"new_pieces",
"=",
"[",
"]",
"for",
"piece",
"in",
"pieces",
":",
"if",
"len",
"(",
"piece",
")",
">",
"1",
"and",
"piece",
"[",
"-",
"1",
"]",
"==",
"str",
"(",
"\",\"",
")",
"and",
"piece",
"[",
"-",
"2",
"]",
".",
"isdigit",
"(",
")",
":",
"cur_pieces",
"=",
"self",
".",
"sp_model",
".",
"EncodeAsPieces",
"(",
"piece",
"[",
":",
"-",
"1",
"]",
".",
"replace",
"(",
"SPIECE_UNDERLINE",
",",
"\"\"",
")",
")",
"if",
"piece",
"[",
"0",
"]",
"!=",
"SPIECE_UNDERLINE",
"and",
"cur_pieces",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"SPIECE_UNDERLINE",
":",
"if",
"len",
"(",
"cur_pieces",
"[",
"0",
"]",
")",
"==",
"1",
":",
"cur_pieces",
"=",
"cur_pieces",
"[",
"1",
":",
"]",
"else",
":",
"cur_pieces",
"[",
"0",
"]",
"=",
"cur_pieces",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"cur_pieces",
".",
"append",
"(",
"piece",
"[",
"-",
"1",
"]",
")",
"new_pieces",
".",
"extend",
"(",
"cur_pieces",
")",
"else",
":",
"new_pieces",
".",
"append",
"(",
"piece",
")",
"return",
"new_pieces"
] | [
190,
4
] | [
212,
25
] | python | en | ['en', 'gl', 'en'] | True |
XLNetTokenizer._convert_token_to_id | (self, token) | Converts a token (str) in an id using the vocab. | Converts a token (str) in an id using the vocab. | def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.sp_model.PieceToId(token) | [
"def",
"_convert_token_to_id",
"(",
"self",
",",
"token",
")",
":",
"return",
"self",
".",
"sp_model",
".",
"PieceToId",
"(",
"token",
")"
] | [
214,
4
] | [
216,
45
] | python | en | ['en', 'en', 'en'] | True |
XLNetTokenizer._convert_id_to_token | (self, index) | Converts an index (integer) in a token (str) using the vocab. | Converts an index (integer) in a token (str) using the vocab. | def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index) | [
"def",
"_convert_id_to_token",
"(",
"self",
",",
"index",
")",
":",
"return",
"self",
".",
"sp_model",
".",
"IdToPiece",
"(",
"index",
")"
] | [
218,
4
] | [
220,
45
] | python | en | ['en', 'en', 'en'] | True |
XLNetTokenizer.convert_tokens_to_string | (self, tokens) | Converts a sequence of tokens (strings for sub-words) in a single string. | Converts a sequence of tokens (strings for sub-words) in a single string. | def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string | [
"def",
"convert_tokens_to_string",
"(",
"self",
",",
"tokens",
")",
":",
"out_string",
"=",
"\"\"",
".",
"join",
"(",
"tokens",
")",
".",
"replace",
"(",
"SPIECE_UNDERLINE",
",",
"\" \"",
")",
".",
"strip",
"(",
")",
"return",
"out_string"
] | [
222,
4
] | [
225,
25
] | python | en | ['en', 'en', 'en'] | True |
XLNetTokenizer.build_inputs_with_special_tokens | (
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) |
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLNet sequence has the following format:
- single sequence: ``X <sep> <cls>``
- pair of sequences: ``A <sep> B <sep> <cls>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLNet sequence has the following format: | def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLNet sequence has the following format:
- single sequence: ``X <sep> <cls>``
- pair of sequences: ``A <sep> B <sep> <cls>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return token_ids_0 + sep + cls
return token_ids_0 + sep + token_ids_1 + sep + cls | [
"def",
"build_inputs_with_special_tokens",
"(",
"self",
",",
"token_ids_0",
":",
"List",
"[",
"int",
"]",
",",
"token_ids_1",
":",
"Optional",
"[",
"List",
"[",
"int",
"]",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"int",
"]",
":",
"sep",
"=",
"[",
"self",
".",
"sep_token_id",
"]",
"cls",
"=",
"[",
"self",
".",
"cls_token_id",
"]",
"if",
"token_ids_1",
"is",
"None",
":",
"return",
"token_ids_0",
"+",
"sep",
"+",
"cls",
"return",
"token_ids_0",
"+",
"sep",
"+",
"token_ids_1",
"+",
"sep",
"+",
"cls"
] | [
227,
4
] | [
250,
58
] | python | en | ['en', 'error', 'th'] | False |
XLNetTokenizer.get_special_tokens_mask | (
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) |
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method. | def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1, 1]
return ([0] * len(token_ids_0)) + [1, 1] | [
"def",
"get_special_tokens_mask",
"(",
"self",
",",
"token_ids_0",
":",
"List",
"[",
"int",
"]",
",",
"token_ids_1",
":",
"Optional",
"[",
"List",
"[",
"int",
"]",
"]",
"=",
"None",
",",
"already_has_special_tokens",
":",
"bool",
"=",
"False",
")",
"->",
"List",
"[",
"int",
"]",
":",
"if",
"already_has_special_tokens",
":",
"if",
"token_ids_1",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"You should not supply a second sequence if the provided sequence of \"",
"\"ids is already formatted with special tokens for the model.\"",
")",
"return",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"1",
"if",
"x",
"in",
"[",
"self",
".",
"sep_token_id",
",",
"self",
".",
"cls_token_id",
"]",
"else",
"0",
",",
"token_ids_0",
")",
")",
"if",
"token_ids_1",
"is",
"not",
"None",
":",
"return",
"(",
"[",
"0",
"]",
"*",
"len",
"(",
"token_ids_0",
")",
")",
"+",
"[",
"1",
"]",
"+",
"(",
"[",
"0",
"]",
"*",
"len",
"(",
"token_ids_1",
")",
")",
"+",
"[",
"1",
",",
"1",
"]",
"return",
"(",
"[",
"0",
"]",
"*",
"len",
"(",
"token_ids_0",
")",
")",
"+",
"[",
"1",
",",
"1",
"]"
] | [
252,
4
] | [
281,
48
] | python | en | ['en', 'error', 'th'] | False |
XLNetTokenizer.create_token_type_ids_from_sequences | (
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) |
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
sequence pair mask has the following format: | def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls_segment_id = [2]
if token_ids_1 is None:
return len(token_ids_0 + sep) * [0] + cls_segment_id
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id | [
"def",
"create_token_type_ids_from_sequences",
"(",
"self",
",",
"token_ids_0",
":",
"List",
"[",
"int",
"]",
",",
"token_ids_1",
":",
"Optional",
"[",
"List",
"[",
"int",
"]",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"int",
"]",
":",
"sep",
"=",
"[",
"self",
".",
"sep_token_id",
"]",
"cls_segment_id",
"=",
"[",
"2",
"]",
"if",
"token_ids_1",
"is",
"None",
":",
"return",
"len",
"(",
"token_ids_0",
"+",
"sep",
")",
"*",
"[",
"0",
"]",
"+",
"cls_segment_id",
"return",
"len",
"(",
"token_ids_0",
"+",
"sep",
")",
"*",
"[",
"0",
"]",
"+",
"len",
"(",
"token_ids_1",
"+",
"sep",
")",
"*",
"[",
"1",
"]",
"+",
"cls_segment_id"
] | [
283,
4
] | [
312,
91
] | python | en | ['en', 'error', 'th'] | False |
create_learning_rate_scheduler | (
factors="constant * linear_warmup * rsqrt_decay",
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000,
) | Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by "*" that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {"learning_rate": float}, the
step-dependent lr.
| Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by "*" that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {"learning_rate": float}, the
step-dependent lr.
| def create_learning_rate_scheduler(
factors="constant * linear_warmup * rsqrt_decay",
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000,
):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by "*" that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {"learning_rate": float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split("*")]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == "constant":
ret *= base_learning_rate
elif name == "linear_warmup":
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == "rsqrt_decay":
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == "rsqrt_normalized_decay":
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == "decay_every":
ret *= decay_factor ** (step // steps_per_decay)
elif name == "cosine_decay":
progress = jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError("Unknown factor %s." % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn | [
"def",
"create_learning_rate_scheduler",
"(",
"factors",
"=",
"\"constant * linear_warmup * rsqrt_decay\"",
",",
"base_learning_rate",
"=",
"0.5",
",",
"warmup_steps",
"=",
"1000",
",",
"decay_factor",
"=",
"0.5",
",",
"steps_per_decay",
"=",
"20000",
",",
"steps_per_cycle",
"=",
"100000",
",",
")",
":",
"factors",
"=",
"[",
"n",
".",
"strip",
"(",
")",
"for",
"n",
"in",
"factors",
".",
"split",
"(",
"\"*\"",
")",
"]",
"def",
"step_fn",
"(",
"step",
")",
":",
"\"\"\"Step to learning rate function.\"\"\"",
"ret",
"=",
"1.0",
"for",
"name",
"in",
"factors",
":",
"if",
"name",
"==",
"\"constant\"",
":",
"ret",
"*=",
"base_learning_rate",
"elif",
"name",
"==",
"\"linear_warmup\"",
":",
"ret",
"*=",
"jnp",
".",
"minimum",
"(",
"1.0",
",",
"step",
"/",
"warmup_steps",
")",
"elif",
"name",
"==",
"\"rsqrt_decay\"",
":",
"ret",
"/=",
"jnp",
".",
"sqrt",
"(",
"jnp",
".",
"maximum",
"(",
"step",
",",
"warmup_steps",
")",
")",
"elif",
"name",
"==",
"\"rsqrt_normalized_decay\"",
":",
"ret",
"*=",
"jnp",
".",
"sqrt",
"(",
"warmup_steps",
")",
"ret",
"/=",
"jnp",
".",
"sqrt",
"(",
"jnp",
".",
"maximum",
"(",
"step",
",",
"warmup_steps",
")",
")",
"elif",
"name",
"==",
"\"decay_every\"",
":",
"ret",
"*=",
"decay_factor",
"**",
"(",
"step",
"//",
"steps_per_decay",
")",
"elif",
"name",
"==",
"\"cosine_decay\"",
":",
"progress",
"=",
"jnp",
".",
"maximum",
"(",
"0.0",
",",
"(",
"step",
"-",
"warmup_steps",
")",
"/",
"float",
"(",
"steps_per_cycle",
")",
")",
"ret",
"*=",
"jnp",
".",
"maximum",
"(",
"0.0",
",",
"0.5",
"*",
"(",
"1.0",
"+",
"jnp",
".",
"cos",
"(",
"jnp",
".",
"pi",
"*",
"(",
"progress",
"%",
"1.0",
")",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown factor %s.\"",
"%",
"name",
")",
"return",
"jnp",
".",
"asarray",
"(",
"ret",
",",
"dtype",
"=",
"jnp",
".",
"float32",
")",
"return",
"step_fn"
] | [
276,
0
] | [
327,
18
] | python | en | ['en', 'en', 'en'] | True |
compute_metrics | (logits, labels, weights, label_smoothing=0.0) | Compute summary metrics. | Compute summary metrics. | def compute_metrics(logits, labels, weights, label_smoothing=0.0):
"""Compute summary metrics."""
loss, normalizer = cross_entropy(logits, labels, weights, label_smoothing)
acc, _ = accuracy(logits, labels, weights)
metrics = {"loss": loss, "accuracy": acc, "normalizer": normalizer}
metrics = jax.lax.psum(metrics, axis_name="batch")
return metrics | [
"def",
"compute_metrics",
"(",
"logits",
",",
"labels",
",",
"weights",
",",
"label_smoothing",
"=",
"0.0",
")",
":",
"loss",
",",
"normalizer",
"=",
"cross_entropy",
"(",
"logits",
",",
"labels",
",",
"weights",
",",
"label_smoothing",
")",
"acc",
",",
"_",
"=",
"accuracy",
"(",
"logits",
",",
"labels",
",",
"weights",
")",
"metrics",
"=",
"{",
"\"loss\"",
":",
"loss",
",",
"\"accuracy\"",
":",
"acc",
",",
"\"normalizer\"",
":",
"normalizer",
"}",
"metrics",
"=",
"jax",
".",
"lax",
".",
"psum",
"(",
"metrics",
",",
"axis_name",
"=",
"\"batch\"",
")",
"return",
"metrics"
] | [
330,
0
] | [
336,
18
] | python | en | ['en', 'et', 'en'] | True |
accuracy | (logits, targets, weights=None) | Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length]
Returns:
Tuple of scalar loss and batch normalizing factor.
| Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length]
Returns:
Tuple of scalar loss and batch normalizing factor.
| def accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError(
"Incorrect shapes. Got shape %s logits and %s targets" % (str(logits.shape), str(targets.shape))
)
loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)
loss *= weights
return loss.sum(), weights.sum() | [
"def",
"accuracy",
"(",
"logits",
",",
"targets",
",",
"weights",
"=",
"None",
")",
":",
"if",
"logits",
".",
"ndim",
"!=",
"targets",
".",
"ndim",
"+",
"1",
":",
"raise",
"ValueError",
"(",
"\"Incorrect shapes. Got shape %s logits and %s targets\"",
"%",
"(",
"str",
"(",
"logits",
".",
"shape",
")",
",",
"str",
"(",
"targets",
".",
"shape",
")",
")",
")",
"loss",
"=",
"jnp",
".",
"equal",
"(",
"jnp",
".",
"argmax",
"(",
"logits",
",",
"axis",
"=",
"-",
"1",
")",
",",
"targets",
")",
"loss",
"*=",
"weights",
"return",
"loss",
".",
"sum",
"(",
")",
",",
"weights",
".",
"sum",
"(",
")"
] | [
339,
0
] | [
356,
36
] | python | en | ['en', 'en', 'en'] | True |
cross_entropy | (logits, targets, weights=None, label_smoothing=0.0) | Compute cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length]
label_smoothing: label smoothing constant, used to determine the on and off values.
Returns:
Tuple of scalar loss and batch normalizing factor.
| Compute cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length]
label_smoothing: label smoothing constant, used to determine the on and off values.
Returns:
Tuple of scalar loss and batch normalizing factor.
| def cross_entropy(logits, targets, weights=None, label_smoothing=0.0):
"""Compute cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length]
label_smoothing: label smoothing constant, used to determine the on and off values.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError(
"Incorrect shapes. Got shape %s logits and %s targets" % (str(logits.shape), str(targets.shape))
)
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)
)
soft_targets = common_utils.onehot(targets, vocab_size, on_value=confidence, off_value=low_confidence)
loss = -jnp.sum(soft_targets * log_softmax(logits), axis=-1)
loss = loss - normalizing_constant
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
else:
normalizing_factor = np.prod(targets.shape)
return loss.sum(), normalizing_factor | [
"def",
"cross_entropy",
"(",
"logits",
",",
"targets",
",",
"weights",
"=",
"None",
",",
"label_smoothing",
"=",
"0.0",
")",
":",
"if",
"logits",
".",
"ndim",
"!=",
"targets",
".",
"ndim",
"+",
"1",
":",
"raise",
"ValueError",
"(",
"\"Incorrect shapes. Got shape %s logits and %s targets\"",
"%",
"(",
"str",
"(",
"logits",
".",
"shape",
")",
",",
"str",
"(",
"targets",
".",
"shape",
")",
")",
")",
"vocab_size",
"=",
"logits",
".",
"shape",
"[",
"-",
"1",
"]",
"confidence",
"=",
"1.0",
"-",
"label_smoothing",
"low_confidence",
"=",
"(",
"1.0",
"-",
"confidence",
")",
"/",
"(",
"vocab_size",
"-",
"1",
")",
"normalizing_constant",
"=",
"-",
"(",
"confidence",
"*",
"jnp",
".",
"log",
"(",
"confidence",
")",
"+",
"(",
"vocab_size",
"-",
"1",
")",
"*",
"low_confidence",
"*",
"jnp",
".",
"log",
"(",
"low_confidence",
"+",
"1e-20",
")",
")",
"soft_targets",
"=",
"common_utils",
".",
"onehot",
"(",
"targets",
",",
"vocab_size",
",",
"on_value",
"=",
"confidence",
",",
"off_value",
"=",
"low_confidence",
")",
"loss",
"=",
"-",
"jnp",
".",
"sum",
"(",
"soft_targets",
"*",
"log_softmax",
"(",
"logits",
")",
",",
"axis",
"=",
"-",
"1",
")",
"loss",
"=",
"loss",
"-",
"normalizing_constant",
"if",
"weights",
"is",
"not",
"None",
":",
"loss",
"=",
"loss",
"*",
"weights",
"normalizing_factor",
"=",
"weights",
".",
"sum",
"(",
")",
"else",
":",
"normalizing_factor",
"=",
"np",
".",
"prod",
"(",
"targets",
".",
"shape",
")",
"return",
"loss",
".",
"sum",
"(",
")",
",",
"normalizing_factor"
] | [
359,
0
] | [
391,
41
] | python | en | ['en', 'en', 'en'] | True |
eval_step | (params, batch) |
Calculate evaluation metrics on a batch.
|
Calculate evaluation metrics on a batch.
| def eval_step(params, batch):
"""
Calculate evaluation metrics on a batch.
"""
targets = batch.pop("labels")
# Hide away tokens which doesn't participate in the optimization
token_mask = jnp.where(targets > 0, 1.0, 0.0)
logits = model(**batch, params=params, train=False)[0]
return compute_metrics(logits, targets, token_mask) | [
"def",
"eval_step",
"(",
"params",
",",
"batch",
")",
":",
"targets",
"=",
"batch",
".",
"pop",
"(",
"\"labels\"",
")",
"# Hide away tokens which doesn't participate in the optimization",
"token_mask",
"=",
"jnp",
".",
"where",
"(",
"targets",
">",
"0",
",",
"1.0",
",",
"0.0",
")",
"logits",
"=",
"model",
"(",
"*",
"*",
"batch",
",",
"params",
"=",
"params",
",",
"train",
"=",
"False",
")",
"[",
"0",
"]",
"return",
"compute_metrics",
"(",
"logits",
",",
"targets",
",",
"token_mask",
")"
] | [
417,
0
] | [
427,
55
] | python | en | ['en', 'error', 'th'] | False |
get_service | (hass, config, discovery_info=None) | Get the Mastodon notification service. | Get the Mastodon notification service. | def get_service(hass, config, discovery_info=None):
"""Get the Mastodon notification service."""
client_id = config.get(CONF_CLIENT_ID)
client_secret = config.get(CONF_CLIENT_SECRET)
access_token = config.get(CONF_ACCESS_TOKEN)
base_url = config.get(CONF_BASE_URL)
try:
mastodon = Mastodon(
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
api_base_url=base_url,
)
mastodon.account_verify_credentials()
except MastodonUnauthorizedError:
_LOGGER.warning("Authentication failed")
return None
return MastodonNotificationService(mastodon) | [
"def",
"get_service",
"(",
"hass",
",",
"config",
",",
"discovery_info",
"=",
"None",
")",
":",
"client_id",
"=",
"config",
".",
"get",
"(",
"CONF_CLIENT_ID",
")",
"client_secret",
"=",
"config",
".",
"get",
"(",
"CONF_CLIENT_SECRET",
")",
"access_token",
"=",
"config",
".",
"get",
"(",
"CONF_ACCESS_TOKEN",
")",
"base_url",
"=",
"config",
".",
"get",
"(",
"CONF_BASE_URL",
")",
"try",
":",
"mastodon",
"=",
"Mastodon",
"(",
"client_id",
"=",
"client_id",
",",
"client_secret",
"=",
"client_secret",
",",
"access_token",
"=",
"access_token",
",",
"api_base_url",
"=",
"base_url",
",",
")",
"mastodon",
".",
"account_verify_credentials",
"(",
")",
"except",
"MastodonUnauthorizedError",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Authentication failed\"",
")",
"return",
"None",
"return",
"MastodonNotificationService",
"(",
"mastodon",
")"
] | [
27,
0
] | [
46,
48
] | python | en | ['en', 'fi', 'en'] | True |
MastodonNotificationService.__init__ | (self, api) | Initialize the service. | Initialize the service. | def __init__(self, api):
"""Initialize the service."""
self._api = api | [
"def",
"__init__",
"(",
"self",
",",
"api",
")",
":",
"self",
".",
"_api",
"=",
"api"
] | [
52,
4
] | [
54,
23
] | python | en | ['en', 'en', 'en'] | True |
MastodonNotificationService.send_message | (self, message="", **kwargs) | Send a message to a user. | Send a message to a user. | def send_message(self, message="", **kwargs):
"""Send a message to a user."""
try:
self._api.toot(message)
except MastodonAPIError:
_LOGGER.error("Unable to send message") | [
"def",
"send_message",
"(",
"self",
",",
"message",
"=",
"\"\"",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"self",
".",
"_api",
".",
"toot",
"(",
"message",
")",
"except",
"MastodonAPIError",
":",
"_LOGGER",
".",
"error",
"(",
"\"Unable to send message\"",
")"
] | [
56,
4
] | [
61,
51
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywell.test_setup_us | (self, mock_ht, mock_sc) | Test for the US setup. | Test for the US setup. | def test_setup_us(self, mock_ht, mock_sc):
"""Test for the US setup."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "us",
}
bad_pass_config = {CONF_USERNAME: "user", honeywell.CONF_REGION: "us"}
bad_region_config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "un",
}
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA(None)
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA({})
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA(bad_pass_config)
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA(bad_region_config)
hass = mock.MagicMock()
add_entities = mock.MagicMock()
locations = [mock.MagicMock(), mock.MagicMock()]
devices_1 = [mock.MagicMock()]
devices_2 = [mock.MagicMock(), mock.MagicMock]
mock_sc.return_value.locations_by_id.values.return_value = locations
locations[0].devices_by_id.values.return_value = devices_1
locations[1].devices_by_id.values.return_value = devices_2
result = honeywell.setup_platform(hass, config, add_entities)
assert result
assert mock_sc.call_count == 1
assert mock_sc.call_args == mock.call("user", "pass")
mock_ht.assert_has_calls(
[
mock.call(mock_sc.return_value, devices_1[0], 18, 28, "user", "pass"),
mock.call(mock_sc.return_value, devices_2[0], 18, 28, "user", "pass"),
mock.call(mock_sc.return_value, devices_2[1], 18, 28, "user", "pass"),
]
) | [
"def",
"test_setup_us",
"(",
"self",
",",
"mock_ht",
",",
"mock_sc",
")",
":",
"config",
"=",
"{",
"CONF_USERNAME",
":",
"\"user\"",
",",
"CONF_PASSWORD",
":",
"\"pass\"",
",",
"honeywell",
".",
"CONF_REGION",
":",
"\"us\"",
",",
"}",
"bad_pass_config",
"=",
"{",
"CONF_USERNAME",
":",
"\"user\"",
",",
"honeywell",
".",
"CONF_REGION",
":",
"\"us\"",
"}",
"bad_region_config",
"=",
"{",
"CONF_USERNAME",
":",
"\"user\"",
",",
"CONF_PASSWORD",
":",
"\"pass\"",
",",
"honeywell",
".",
"CONF_REGION",
":",
"\"un\"",
",",
"}",
"with",
"pytest",
".",
"raises",
"(",
"vol",
".",
"Invalid",
")",
":",
"honeywell",
".",
"PLATFORM_SCHEMA",
"(",
"None",
")",
"with",
"pytest",
".",
"raises",
"(",
"vol",
".",
"Invalid",
")",
":",
"honeywell",
".",
"PLATFORM_SCHEMA",
"(",
"{",
"}",
")",
"with",
"pytest",
".",
"raises",
"(",
"vol",
".",
"Invalid",
")",
":",
"honeywell",
".",
"PLATFORM_SCHEMA",
"(",
"bad_pass_config",
")",
"with",
"pytest",
".",
"raises",
"(",
"vol",
".",
"Invalid",
")",
":",
"honeywell",
".",
"PLATFORM_SCHEMA",
"(",
"bad_region_config",
")",
"hass",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"add_entities",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"locations",
"=",
"[",
"mock",
".",
"MagicMock",
"(",
")",
",",
"mock",
".",
"MagicMock",
"(",
")",
"]",
"devices_1",
"=",
"[",
"mock",
".",
"MagicMock",
"(",
")",
"]",
"devices_2",
"=",
"[",
"mock",
".",
"MagicMock",
"(",
")",
",",
"mock",
".",
"MagicMock",
"]",
"mock_sc",
".",
"return_value",
".",
"locations_by_id",
".",
"values",
".",
"return_value",
"=",
"locations",
"locations",
"[",
"0",
"]",
".",
"devices_by_id",
".",
"values",
".",
"return_value",
"=",
"devices_1",
"locations",
"[",
"1",
"]",
".",
"devices_by_id",
".",
"values",
".",
"return_value",
"=",
"devices_2",
"result",
"=",
"honeywell",
".",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
")",
"assert",
"result",
"assert",
"mock_sc",
".",
"call_count",
"==",
"1",
"assert",
"mock_sc",
".",
"call_args",
"==",
"mock",
".",
"call",
"(",
"\"user\"",
",",
"\"pass\"",
")",
"mock_ht",
".",
"assert_has_calls",
"(",
"[",
"mock",
".",
"call",
"(",
"mock_sc",
".",
"return_value",
",",
"devices_1",
"[",
"0",
"]",
",",
"18",
",",
"28",
",",
"\"user\"",
",",
"\"pass\"",
")",
",",
"mock",
".",
"call",
"(",
"mock_sc",
".",
"return_value",
",",
"devices_2",
"[",
"0",
"]",
",",
"18",
",",
"28",
",",
"\"user\"",
",",
"\"pass\"",
")",
",",
"mock",
".",
"call",
"(",
"mock_sc",
".",
"return_value",
",",
"devices_2",
"[",
"1",
"]",
",",
"18",
",",
"28",
",",
"\"user\"",
",",
"\"pass\"",
")",
",",
"]",
")"
] | [
30,
4
] | [
76,
9
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywell.test_setup_us_failures | (self, mock_sc) | Test the US setup. | Test the US setup. | def test_setup_us_failures(self, mock_sc):
"""Test the US setup."""
hass = mock.MagicMock()
add_entities = mock.MagicMock()
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "us",
}
mock_sc.side_effect = somecomfort.AuthError
result = honeywell.setup_platform(hass, config, add_entities)
assert not result
assert not add_entities.called
mock_sc.side_effect = somecomfort.SomeComfortError
result = honeywell.setup_platform(hass, config, add_entities)
assert not result
assert not add_entities.called | [
"def",
"test_setup_us_failures",
"(",
"self",
",",
"mock_sc",
")",
":",
"hass",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"add_entities",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"config",
"=",
"{",
"CONF_USERNAME",
":",
"\"user\"",
",",
"CONF_PASSWORD",
":",
"\"pass\"",
",",
"honeywell",
".",
"CONF_REGION",
":",
"\"us\"",
",",
"}",
"mock_sc",
".",
"side_effect",
"=",
"somecomfort",
".",
"AuthError",
"result",
"=",
"honeywell",
".",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
")",
"assert",
"not",
"result",
"assert",
"not",
"add_entities",
".",
"called",
"mock_sc",
".",
"side_effect",
"=",
"somecomfort",
".",
"SomeComfortError",
"result",
"=",
"honeywell",
".",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
")",
"assert",
"not",
"result",
"assert",
"not",
"add_entities",
".",
"called"
] | [
79,
4
] | [
97,
38
] | python | en | ['en', 'haw', 'en'] | True |
TestHoneywell._test_us_filtered_devices | (self, mock_ht, mock_sc, loc=None, dev=None) | Test for US filtered thermostats. | Test for US filtered thermostats. | def _test_us_filtered_devices(self, mock_ht, mock_sc, loc=None, dev=None):
"""Test for US filtered thermostats."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "us",
"location": loc,
"thermostat": dev,
}
locations = {
1: mock.MagicMock(
locationid=mock.sentinel.loc1,
devices_by_id={
11: mock.MagicMock(deviceid=mock.sentinel.loc1dev1),
12: mock.MagicMock(deviceid=mock.sentinel.loc1dev2),
},
),
2: mock.MagicMock(
locationid=mock.sentinel.loc2,
devices_by_id={21: mock.MagicMock(deviceid=mock.sentinel.loc2dev1)},
),
3: mock.MagicMock(
locationid=mock.sentinel.loc3,
devices_by_id={31: mock.MagicMock(deviceid=mock.sentinel.loc3dev1)},
),
}
mock_sc.return_value = mock.MagicMock(locations_by_id=locations)
hass = mock.MagicMock()
add_entities = mock.MagicMock()
assert honeywell.setup_platform(hass, config, add_entities) is True
return mock_ht.call_args_list, mock_sc | [
"def",
"_test_us_filtered_devices",
"(",
"self",
",",
"mock_ht",
",",
"mock_sc",
",",
"loc",
"=",
"None",
",",
"dev",
"=",
"None",
")",
":",
"config",
"=",
"{",
"CONF_USERNAME",
":",
"\"user\"",
",",
"CONF_PASSWORD",
":",
"\"pass\"",
",",
"honeywell",
".",
"CONF_REGION",
":",
"\"us\"",
",",
"\"location\"",
":",
"loc",
",",
"\"thermostat\"",
":",
"dev",
",",
"}",
"locations",
"=",
"{",
"1",
":",
"mock",
".",
"MagicMock",
"(",
"locationid",
"=",
"mock",
".",
"sentinel",
".",
"loc1",
",",
"devices_by_id",
"=",
"{",
"11",
":",
"mock",
".",
"MagicMock",
"(",
"deviceid",
"=",
"mock",
".",
"sentinel",
".",
"loc1dev1",
")",
",",
"12",
":",
"mock",
".",
"MagicMock",
"(",
"deviceid",
"=",
"mock",
".",
"sentinel",
".",
"loc1dev2",
")",
",",
"}",
",",
")",
",",
"2",
":",
"mock",
".",
"MagicMock",
"(",
"locationid",
"=",
"mock",
".",
"sentinel",
".",
"loc2",
",",
"devices_by_id",
"=",
"{",
"21",
":",
"mock",
".",
"MagicMock",
"(",
"deviceid",
"=",
"mock",
".",
"sentinel",
".",
"loc2dev1",
")",
"}",
",",
")",
",",
"3",
":",
"mock",
".",
"MagicMock",
"(",
"locationid",
"=",
"mock",
".",
"sentinel",
".",
"loc3",
",",
"devices_by_id",
"=",
"{",
"31",
":",
"mock",
".",
"MagicMock",
"(",
"deviceid",
"=",
"mock",
".",
"sentinel",
".",
"loc3dev1",
")",
"}",
",",
")",
",",
"}",
"mock_sc",
".",
"return_value",
"=",
"mock",
".",
"MagicMock",
"(",
"locations_by_id",
"=",
"locations",
")",
"hass",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"add_entities",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"assert",
"honeywell",
".",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
")",
"is",
"True",
"return",
"mock_ht",
".",
"call_args_list",
",",
"mock_sc"
] | [
101,
4
] | [
132,
46
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywell.test_us_filtered_thermostat_1 | (self) | Test for US filtered thermostats. | Test for US filtered thermostats. | def test_us_filtered_thermostat_1(self):
"""Test for US filtered thermostats."""
result, client = self._test_us_filtered_devices(dev=mock.sentinel.loc1dev1)
devices = [x[0][1].deviceid for x in result]
assert [mock.sentinel.loc1dev1] == devices | [
"def",
"test_us_filtered_thermostat_1",
"(",
"self",
")",
":",
"result",
",",
"client",
"=",
"self",
".",
"_test_us_filtered_devices",
"(",
"dev",
"=",
"mock",
".",
"sentinel",
".",
"loc1dev1",
")",
"devices",
"=",
"[",
"x",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"deviceid",
"for",
"x",
"in",
"result",
"]",
"assert",
"[",
"mock",
".",
"sentinel",
".",
"loc1dev1",
"]",
"==",
"devices"
] | [
134,
4
] | [
138,
50
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywell.test_us_filtered_thermostat_2 | (self) | Test for US filtered location. | Test for US filtered location. | def test_us_filtered_thermostat_2(self):
"""Test for US filtered location."""
result, client = self._test_us_filtered_devices(dev=mock.sentinel.loc2dev1)
devices = [x[0][1].deviceid for x in result]
assert [mock.sentinel.loc2dev1] == devices | [
"def",
"test_us_filtered_thermostat_2",
"(",
"self",
")",
":",
"result",
",",
"client",
"=",
"self",
".",
"_test_us_filtered_devices",
"(",
"dev",
"=",
"mock",
".",
"sentinel",
".",
"loc2dev1",
")",
"devices",
"=",
"[",
"x",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"deviceid",
"for",
"x",
"in",
"result",
"]",
"assert",
"[",
"mock",
".",
"sentinel",
".",
"loc2dev1",
"]",
"==",
"devices"
] | [
140,
4
] | [
144,
50
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywell.test_us_filtered_location_1 | (self) | Test for US filtered locations. | Test for US filtered locations. | def test_us_filtered_location_1(self):
"""Test for US filtered locations."""
result, client = self._test_us_filtered_devices(loc=mock.sentinel.loc1)
devices = [x[0][1].deviceid for x in result]
assert [mock.sentinel.loc1dev1, mock.sentinel.loc1dev2] == devices | [
"def",
"test_us_filtered_location_1",
"(",
"self",
")",
":",
"result",
",",
"client",
"=",
"self",
".",
"_test_us_filtered_devices",
"(",
"loc",
"=",
"mock",
".",
"sentinel",
".",
"loc1",
")",
"devices",
"=",
"[",
"x",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"deviceid",
"for",
"x",
"in",
"result",
"]",
"assert",
"[",
"mock",
".",
"sentinel",
".",
"loc1dev1",
",",
"mock",
".",
"sentinel",
".",
"loc1dev2",
"]",
"==",
"devices"
] | [
146,
4
] | [
150,
74
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywell.test_us_filtered_location_2 | (self) | Test for US filtered locations. | Test for US filtered locations. | def test_us_filtered_location_2(self):
"""Test for US filtered locations."""
result, client = self._test_us_filtered_devices(loc=mock.sentinel.loc2)
devices = [x[0][1].deviceid for x in result]
assert [mock.sentinel.loc2dev1] == devices | [
"def",
"test_us_filtered_location_2",
"(",
"self",
")",
":",
"result",
",",
"client",
"=",
"self",
".",
"_test_us_filtered_devices",
"(",
"loc",
"=",
"mock",
".",
"sentinel",
".",
"loc2",
")",
"devices",
"=",
"[",
"x",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"deviceid",
"for",
"x",
"in",
"result",
"]",
"assert",
"[",
"mock",
".",
"sentinel",
".",
"loc2dev1",
"]",
"==",
"devices"
] | [
152,
4
] | [
156,
50
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywell.test_eu_setup_full_config | (self, mock_round, mock_evo) | Test the EU setup with complete configuration. | Test the EU setup with complete configuration. | def test_eu_setup_full_config(self, mock_round, mock_evo):
"""Test the EU setup with complete configuration."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "eu",
}
mock_evo.return_value.temperatures.return_value = [{"id": "foo"}, {"id": "bar"}]
hass = mock.MagicMock()
add_entities = mock.MagicMock()
assert honeywell.setup_platform(hass, config, add_entities)
assert mock_evo.call_count == 1
assert mock_evo.call_args == mock.call("user", "pass")
assert mock_evo.return_value.temperatures.call_count == 1
assert mock_evo.return_value.temperatures.call_args == mock.call(
force_refresh=True
)
mock_round.assert_has_calls(
[
mock.call(mock_evo.return_value, "foo", True, 20.0),
mock.call(mock_evo.return_value, "bar", False, 20.0),
]
)
assert 2 == add_entities.call_count | [
"def",
"test_eu_setup_full_config",
"(",
"self",
",",
"mock_round",
",",
"mock_evo",
")",
":",
"config",
"=",
"{",
"CONF_USERNAME",
":",
"\"user\"",
",",
"CONF_PASSWORD",
":",
"\"pass\"",
",",
"honeywell",
".",
"CONF_REGION",
":",
"\"eu\"",
",",
"}",
"mock_evo",
".",
"return_value",
".",
"temperatures",
".",
"return_value",
"=",
"[",
"{",
"\"id\"",
":",
"\"foo\"",
"}",
",",
"{",
"\"id\"",
":",
"\"bar\"",
"}",
"]",
"hass",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"add_entities",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"assert",
"honeywell",
".",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
")",
"assert",
"mock_evo",
".",
"call_count",
"==",
"1",
"assert",
"mock_evo",
".",
"call_args",
"==",
"mock",
".",
"call",
"(",
"\"user\"",
",",
"\"pass\"",
")",
"assert",
"mock_evo",
".",
"return_value",
".",
"temperatures",
".",
"call_count",
"==",
"1",
"assert",
"mock_evo",
".",
"return_value",
".",
"temperatures",
".",
"call_args",
"==",
"mock",
".",
"call",
"(",
"force_refresh",
"=",
"True",
")",
"mock_round",
".",
"assert_has_calls",
"(",
"[",
"mock",
".",
"call",
"(",
"mock_evo",
".",
"return_value",
",",
"\"foo\"",
",",
"True",
",",
"20.0",
")",
",",
"mock",
".",
"call",
"(",
"mock_evo",
".",
"return_value",
",",
"\"bar\"",
",",
"False",
",",
"20.0",
")",
",",
"]",
")",
"assert",
"2",
"==",
"add_entities",
".",
"call_count"
] | [
160,
4
] | [
183,
43
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywell.test_eu_setup_partial_config | (self, mock_round, mock_evo) | Test the EU setup with partial configuration. | Test the EU setup with partial configuration. | def test_eu_setup_partial_config(self, mock_round, mock_evo):
"""Test the EU setup with partial configuration."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "eu",
}
mock_evo.return_value.temperatures.return_value = [{"id": "foo"}, {"id": "bar"}]
hass = mock.MagicMock()
add_entities = mock.MagicMock()
assert honeywell.setup_platform(hass, config, add_entities)
mock_round.assert_has_calls(
[
mock.call(mock_evo.return_value, "foo", True, 16),
mock.call(mock_evo.return_value, "bar", False, 16),
]
) | [
"def",
"test_eu_setup_partial_config",
"(",
"self",
",",
"mock_round",
",",
"mock_evo",
")",
":",
"config",
"=",
"{",
"CONF_USERNAME",
":",
"\"user\"",
",",
"CONF_PASSWORD",
":",
"\"pass\"",
",",
"honeywell",
".",
"CONF_REGION",
":",
"\"eu\"",
",",
"}",
"mock_evo",
".",
"return_value",
".",
"temperatures",
".",
"return_value",
"=",
"[",
"{",
"\"id\"",
":",
"\"foo\"",
"}",
",",
"{",
"\"id\"",
":",
"\"bar\"",
"}",
"]",
"hass",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"add_entities",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"assert",
"honeywell",
".",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
")",
"mock_round",
".",
"assert_has_calls",
"(",
"[",
"mock",
".",
"call",
"(",
"mock_evo",
".",
"return_value",
",",
"\"foo\"",
",",
"True",
",",
"16",
")",
",",
"mock",
".",
"call",
"(",
"mock_evo",
".",
"return_value",
",",
"\"bar\"",
",",
"False",
",",
"16",
")",
",",
"]",
")"
] | [
187,
4
] | [
205,
9
] | python | en | ['en', 'pt', 'en'] | True |
TestHoneywell.test_eu_setup_bad_temp | (self, mock_round, mock_evo) | Test the EU setup with invalid temperature. | Test the EU setup with invalid temperature. | def test_eu_setup_bad_temp(self, mock_round, mock_evo):
"""Test the EU setup with invalid temperature."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "eu",
}
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA(config) | [
"def",
"test_eu_setup_bad_temp",
"(",
"self",
",",
"mock_round",
",",
"mock_evo",
")",
":",
"config",
"=",
"{",
"CONF_USERNAME",
":",
"\"user\"",
",",
"CONF_PASSWORD",
":",
"\"pass\"",
",",
"honeywell",
".",
"CONF_REGION",
":",
"\"eu\"",
",",
"}",
"with",
"pytest",
".",
"raises",
"(",
"vol",
".",
"Invalid",
")",
":",
"honeywell",
".",
"PLATFORM_SCHEMA",
"(",
"config",
")"
] | [
209,
4
] | [
218,
45
] | python | en | ['en', 'sm', 'en'] | True |
TestHoneywell.test_eu_setup_error | (self, mock_round, mock_evo) | Test the EU setup with errors. | Test the EU setup with errors. | def test_eu_setup_error(self, mock_round, mock_evo):
"""Test the EU setup with errors."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "eu",
}
mock_evo.return_value.temperatures.side_effect = (
requests.exceptions.RequestException
)
add_entities = mock.MagicMock()
hass = mock.MagicMock()
assert not honeywell.setup_platform(hass, config, add_entities) | [
"def",
"test_eu_setup_error",
"(",
"self",
",",
"mock_round",
",",
"mock_evo",
")",
":",
"config",
"=",
"{",
"CONF_USERNAME",
":",
"\"user\"",
",",
"CONF_PASSWORD",
":",
"\"pass\"",
",",
"honeywell",
".",
"CONF_REGION",
":",
"\"eu\"",
",",
"}",
"mock_evo",
".",
"return_value",
".",
"temperatures",
".",
"side_effect",
"=",
"(",
"requests",
".",
"exceptions",
".",
"RequestException",
")",
"add_entities",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"hass",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"assert",
"not",
"honeywell",
".",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
")"
] | [
222,
4
] | [
234,
71
] | python | en | ['en', 'haw', 'en'] | True |
TestHoneywellRound.setup_method | (self, method) | Test the setup method. | Test the setup method. | def setup_method(self, method):
"""Test the setup method."""
def fake_temperatures(force_refresh=None):
"""Create fake temperatures."""
temps = [
{
"id": "1",
"temp": 20,
"setpoint": 21,
"thermostat": "main",
"name": "House",
},
{
"id": "2",
"temp": 21,
"setpoint": 22,
"thermostat": "DOMESTIC_HOT_WATER",
},
]
return temps
self.device = mock.MagicMock()
self.device.temperatures.side_effect = fake_temperatures
self.round1 = honeywell.RoundThermostat(self.device, "1", True, 16)
self.round1.update()
self.round2 = honeywell.RoundThermostat(self.device, "2", False, 17)
self.round2.update() | [
"def",
"setup_method",
"(",
"self",
",",
"method",
")",
":",
"def",
"fake_temperatures",
"(",
"force_refresh",
"=",
"None",
")",
":",
"\"\"\"Create fake temperatures.\"\"\"",
"temps",
"=",
"[",
"{",
"\"id\"",
":",
"\"1\"",
",",
"\"temp\"",
":",
"20",
",",
"\"setpoint\"",
":",
"21",
",",
"\"thermostat\"",
":",
"\"main\"",
",",
"\"name\"",
":",
"\"House\"",
",",
"}",
",",
"{",
"\"id\"",
":",
"\"2\"",
",",
"\"temp\"",
":",
"21",
",",
"\"setpoint\"",
":",
"22",
",",
"\"thermostat\"",
":",
"\"DOMESTIC_HOT_WATER\"",
",",
"}",
",",
"]",
"return",
"temps",
"self",
".",
"device",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"self",
".",
"device",
".",
"temperatures",
".",
"side_effect",
"=",
"fake_temperatures",
"self",
".",
"round1",
"=",
"honeywell",
".",
"RoundThermostat",
"(",
"self",
".",
"device",
",",
"\"1\"",
",",
"True",
",",
"16",
")",
"self",
".",
"round1",
".",
"update",
"(",
")",
"self",
".",
"round2",
"=",
"honeywell",
".",
"RoundThermostat",
"(",
"self",
".",
"device",
",",
"\"2\"",
",",
"False",
",",
"17",
")",
"self",
".",
"round2",
".",
"update",
"(",
")"
] | [
240,
4
] | [
267,
28
] | python | en | ['en', 'et', 'en'] | True |
TestHoneywellRound.test_attributes | (self) | Test the attributes. | Test the attributes. | def test_attributes(self):
"""Test the attributes."""
assert "House" == self.round1.name
assert TEMP_CELSIUS == self.round1.temperature_unit
assert 20 == self.round1.current_temperature
assert 21 == self.round1.target_temperature
assert not self.round1.is_away_mode_on
assert "Hot Water" == self.round2.name
assert TEMP_CELSIUS == self.round2.temperature_unit
assert 21 == self.round2.current_temperature
assert self.round2.target_temperature is None
assert not self.round2.is_away_mode_on | [
"def",
"test_attributes",
"(",
"self",
")",
":",
"assert",
"\"House\"",
"==",
"self",
".",
"round1",
".",
"name",
"assert",
"TEMP_CELSIUS",
"==",
"self",
".",
"round1",
".",
"temperature_unit",
"assert",
"20",
"==",
"self",
".",
"round1",
".",
"current_temperature",
"assert",
"21",
"==",
"self",
".",
"round1",
".",
"target_temperature",
"assert",
"not",
"self",
".",
"round1",
".",
"is_away_mode_on",
"assert",
"\"Hot Water\"",
"==",
"self",
".",
"round2",
".",
"name",
"assert",
"TEMP_CELSIUS",
"==",
"self",
".",
"round2",
".",
"temperature_unit",
"assert",
"21",
"==",
"self",
".",
"round2",
".",
"current_temperature",
"assert",
"self",
".",
"round2",
".",
"target_temperature",
"is",
"None",
"assert",
"not",
"self",
".",
"round2",
".",
"is_away_mode_on"
] | [
269,
4
] | [
281,
46
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellRound.test_away_mode | (self) | Test setting the away mode. | Test setting the away mode. | def test_away_mode(self):
"""Test setting the away mode."""
assert not self.round1.is_away_mode_on
self.round1.turn_away_mode_on()
assert self.round1.is_away_mode_on
assert self.device.set_temperature.call_count == 1
assert self.device.set_temperature.call_args == mock.call("House", 16)
self.device.set_temperature.reset_mock()
self.round1.turn_away_mode_off()
assert not self.round1.is_away_mode_on
assert self.device.cancel_temp_override.call_count == 1
assert self.device.cancel_temp_override.call_args == mock.call("House") | [
"def",
"test_away_mode",
"(",
"self",
")",
":",
"assert",
"not",
"self",
".",
"round1",
".",
"is_away_mode_on",
"self",
".",
"round1",
".",
"turn_away_mode_on",
"(",
")",
"assert",
"self",
".",
"round1",
".",
"is_away_mode_on",
"assert",
"self",
".",
"device",
".",
"set_temperature",
".",
"call_count",
"==",
"1",
"assert",
"self",
".",
"device",
".",
"set_temperature",
".",
"call_args",
"==",
"mock",
".",
"call",
"(",
"\"House\"",
",",
"16",
")",
"self",
".",
"device",
".",
"set_temperature",
".",
"reset_mock",
"(",
")",
"self",
".",
"round1",
".",
"turn_away_mode_off",
"(",
")",
"assert",
"not",
"self",
".",
"round1",
".",
"is_away_mode_on",
"assert",
"self",
".",
"device",
".",
"cancel_temp_override",
".",
"call_count",
"==",
"1",
"assert",
"self",
".",
"device",
".",
"cancel_temp_override",
".",
"call_args",
"==",
"mock",
".",
"call",
"(",
"\"House\"",
")"
] | [
283,
4
] | [
295,
79
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellRound.test_set_temperature | (self) | Test setting the temperature. | Test setting the temperature. | def test_set_temperature(self):
"""Test setting the temperature."""
self.round1.set_temperature(temperature=25)
assert self.device.set_temperature.call_count == 1
assert self.device.set_temperature.call_args == mock.call("House", 25) | [
"def",
"test_set_temperature",
"(",
"self",
")",
":",
"self",
".",
"round1",
".",
"set_temperature",
"(",
"temperature",
"=",
"25",
")",
"assert",
"self",
".",
"device",
".",
"set_temperature",
".",
"call_count",
"==",
"1",
"assert",
"self",
".",
"device",
".",
"set_temperature",
".",
"call_args",
"==",
"mock",
".",
"call",
"(",
"\"House\"",
",",
"25",
")"
] | [
297,
4
] | [
301,
78
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellRound.test_set_hvac_mode | (self) | Test setting the system operation. | Test setting the system operation. | def test_set_hvac_mode(self) -> None:
"""Test setting the system operation."""
self.round1.set_hvac_mode("cool")
assert "cool" == self.round1.current_operation
assert "cool" == self.device.system_mode
self.round1.set_hvac_mode("heat")
assert "heat" == self.round1.current_operation
assert "heat" == self.device.system_mode | [
"def",
"test_set_hvac_mode",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"round1",
".",
"set_hvac_mode",
"(",
"\"cool\"",
")",
"assert",
"\"cool\"",
"==",
"self",
".",
"round1",
".",
"current_operation",
"assert",
"\"cool\"",
"==",
"self",
".",
"device",
".",
"system_mode",
"self",
".",
"round1",
".",
"set_hvac_mode",
"(",
"\"heat\"",
")",
"assert",
"\"heat\"",
"==",
"self",
".",
"round1",
".",
"current_operation",
"assert",
"\"heat\"",
"==",
"self",
".",
"device",
".",
"system_mode"
] | [
303,
4
] | [
311,
48
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellUS.setup_method | (self, method) | Test the setup method. | Test the setup method. | def setup_method(self, method):
"""Test the setup method."""
self.client = mock.MagicMock()
self.device = mock.MagicMock()
self.cool_away_temp = 18
self.heat_away_temp = 28
self.honeywell = honeywell.HoneywellUSThermostat(
self.client,
self.device,
self.cool_away_temp,
self.heat_away_temp,
"user",
"password",
)
self.device.fan_running = True
self.device.name = "test"
self.device.temperature_unit = "F"
self.device.current_temperature = 72
self.device.setpoint_cool = 78
self.device.setpoint_heat = 65
self.device.system_mode = "heat"
self.device.fan_mode = "auto" | [
"def",
"setup_method",
"(",
"self",
",",
"method",
")",
":",
"self",
".",
"client",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"self",
".",
"device",
"=",
"mock",
".",
"MagicMock",
"(",
")",
"self",
".",
"cool_away_temp",
"=",
"18",
"self",
".",
"heat_away_temp",
"=",
"28",
"self",
".",
"honeywell",
"=",
"honeywell",
".",
"HoneywellUSThermostat",
"(",
"self",
".",
"client",
",",
"self",
".",
"device",
",",
"self",
".",
"cool_away_temp",
",",
"self",
".",
"heat_away_temp",
",",
"\"user\"",
",",
"\"password\"",
",",
")",
"self",
".",
"device",
".",
"fan_running",
"=",
"True",
"self",
".",
"device",
".",
"name",
"=",
"\"test\"",
"self",
".",
"device",
".",
"temperature_unit",
"=",
"\"F\"",
"self",
".",
"device",
".",
"current_temperature",
"=",
"72",
"self",
".",
"device",
".",
"setpoint_cool",
"=",
"78",
"self",
".",
"device",
".",
"setpoint_heat",
"=",
"65",
"self",
".",
"device",
".",
"system_mode",
"=",
"\"heat\"",
"self",
".",
"device",
".",
"fan_mode",
"=",
"\"auto\""
] | [
317,
4
] | [
339,
37
] | python | en | ['en', 'et', 'en'] | True |
TestHoneywellUS.test_properties | (self) | Test the properties. | Test the properties. | def test_properties(self):
"""Test the properties."""
assert self.honeywell.is_fan_on
assert "test" == self.honeywell.name
assert 72 == self.honeywell.current_temperature | [
"def",
"test_properties",
"(",
"self",
")",
":",
"assert",
"self",
".",
"honeywell",
".",
"is_fan_on",
"assert",
"\"test\"",
"==",
"self",
".",
"honeywell",
".",
"name",
"assert",
"72",
"==",
"self",
".",
"honeywell",
".",
"current_temperature"
] | [
341,
4
] | [
345,
55
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellUS.test_unit_of_measurement | (self) | Test the unit of measurement. | Test the unit of measurement. | def test_unit_of_measurement(self):
"""Test the unit of measurement."""
assert TEMP_FAHRENHEIT == self.honeywell.temperature_unit
self.device.temperature_unit = "C"
assert TEMP_CELSIUS == self.honeywell.temperature_unit | [
"def",
"test_unit_of_measurement",
"(",
"self",
")",
":",
"assert",
"TEMP_FAHRENHEIT",
"==",
"self",
".",
"honeywell",
".",
"temperature_unit",
"self",
".",
"device",
".",
"temperature_unit",
"=",
"\"C\"",
"assert",
"TEMP_CELSIUS",
"==",
"self",
".",
"honeywell",
".",
"temperature_unit"
] | [
347,
4
] | [
351,
62
] | python | en | ['en', 'fr', 'en'] | True |
TestHoneywellUS.test_target_temp | (self) | Test the target temperature. | Test the target temperature. | def test_target_temp(self):
"""Test the target temperature."""
assert 65 == self.honeywell.target_temperature
self.device.system_mode = "cool"
assert 78 == self.honeywell.target_temperature | [
"def",
"test_target_temp",
"(",
"self",
")",
":",
"assert",
"65",
"==",
"self",
".",
"honeywell",
".",
"target_temperature",
"self",
".",
"device",
".",
"system_mode",
"=",
"\"cool\"",
"assert",
"78",
"==",
"self",
".",
"honeywell",
".",
"target_temperature"
] | [
353,
4
] | [
357,
54
] | python | en | ['en', 'la', 'en'] | True |
TestHoneywellUS.test_set_temp | (self) | Test setting the temperature. | Test setting the temperature. | def test_set_temp(self):
"""Test setting the temperature."""
self.honeywell.set_temperature(temperature=70)
assert 70 == self.device.setpoint_heat
assert 70 == self.honeywell.target_temperature
self.device.system_mode = "cool"
assert 78 == self.honeywell.target_temperature
self.honeywell.set_temperature(temperature=74)
assert 74 == self.device.setpoint_cool
assert 74 == self.honeywell.target_temperature | [
"def",
"test_set_temp",
"(",
"self",
")",
":",
"self",
".",
"honeywell",
".",
"set_temperature",
"(",
"temperature",
"=",
"70",
")",
"assert",
"70",
"==",
"self",
".",
"device",
".",
"setpoint_heat",
"assert",
"70",
"==",
"self",
".",
"honeywell",
".",
"target_temperature",
"self",
".",
"device",
".",
"system_mode",
"=",
"\"cool\"",
"assert",
"78",
"==",
"self",
".",
"honeywell",
".",
"target_temperature",
"self",
".",
"honeywell",
".",
"set_temperature",
"(",
"temperature",
"=",
"74",
")",
"assert",
"74",
"==",
"self",
".",
"device",
".",
"setpoint_cool",
"assert",
"74",
"==",
"self",
".",
"honeywell",
".",
"target_temperature"
] | [
359,
4
] | [
369,
54
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellUS.test_set_hvac_mode | (self) | Test setting the operation mode. | Test setting the operation mode. | def test_set_hvac_mode(self) -> None:
"""Test setting the operation mode."""
self.honeywell.set_hvac_mode("cool")
assert "cool" == self.device.system_mode
self.honeywell.set_hvac_mode("heat")
assert "heat" == self.device.system_mode | [
"def",
"test_set_hvac_mode",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"honeywell",
".",
"set_hvac_mode",
"(",
"\"cool\"",
")",
"assert",
"\"cool\"",
"==",
"self",
".",
"device",
".",
"system_mode",
"self",
".",
"honeywell",
".",
"set_hvac_mode",
"(",
"\"heat\"",
")",
"assert",
"\"heat\"",
"==",
"self",
".",
"device",
".",
"system_mode"
] | [
371,
4
] | [
377,
48
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellUS.test_set_temp_fail | (self) | Test if setting the temperature fails. | Test if setting the temperature fails. | def test_set_temp_fail(self):
"""Test if setting the temperature fails."""
self.device.setpoint_heat = mock.MagicMock(
side_effect=somecomfort.SomeComfortError
)
self.honeywell.set_temperature(temperature=123) | [
"def",
"test_set_temp_fail",
"(",
"self",
")",
":",
"self",
".",
"device",
".",
"setpoint_heat",
"=",
"mock",
".",
"MagicMock",
"(",
"side_effect",
"=",
"somecomfort",
".",
"SomeComfortError",
")",
"self",
".",
"honeywell",
".",
"set_temperature",
"(",
"temperature",
"=",
"123",
")"
] | [
379,
4
] | [
384,
55
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellUS.test_attributes | (self) | Test the attributes. | Test the attributes. | def test_attributes(self):
"""Test the attributes."""
expected = {
honeywell.ATTR_FAN: "running",
ATTR_FAN_MODE: "auto",
ATTR_FAN_MODES: somecomfort.FAN_MODES,
ATTR_HVAC_MODES: somecomfort.SYSTEM_MODES,
}
assert expected == self.honeywell.device_state_attributes
expected["fan"] = "idle"
self.device.fan_running = False
assert expected == self.honeywell.device_state_attributes | [
"def",
"test_attributes",
"(",
"self",
")",
":",
"expected",
"=",
"{",
"honeywell",
".",
"ATTR_FAN",
":",
"\"running\"",
",",
"ATTR_FAN_MODE",
":",
"\"auto\"",
",",
"ATTR_FAN_MODES",
":",
"somecomfort",
".",
"FAN_MODES",
",",
"ATTR_HVAC_MODES",
":",
"somecomfort",
".",
"SYSTEM_MODES",
",",
"}",
"assert",
"expected",
"==",
"self",
".",
"honeywell",
".",
"device_state_attributes",
"expected",
"[",
"\"fan\"",
"]",
"=",
"\"idle\"",
"self",
".",
"device",
".",
"fan_running",
"=",
"False",
"assert",
"expected",
"==",
"self",
".",
"honeywell",
".",
"device_state_attributes"
] | [
386,
4
] | [
397,
65
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellUS.test_with_no_fan | (self) | Test if there is on fan. | Test if there is on fan. | def test_with_no_fan(self):
"""Test if there is on fan."""
self.device.fan_running = False
self.device.fan_mode = None
expected = {
honeywell.ATTR_FAN: "idle",
ATTR_FAN_MODE: None,
ATTR_FAN_MODES: somecomfort.FAN_MODES,
ATTR_HVAC_MODES: somecomfort.SYSTEM_MODES,
}
assert expected == self.honeywell.device_state_attributes | [
"def",
"test_with_no_fan",
"(",
"self",
")",
":",
"self",
".",
"device",
".",
"fan_running",
"=",
"False",
"self",
".",
"device",
".",
"fan_mode",
"=",
"None",
"expected",
"=",
"{",
"honeywell",
".",
"ATTR_FAN",
":",
"\"idle\"",
",",
"ATTR_FAN_MODE",
":",
"None",
",",
"ATTR_FAN_MODES",
":",
"somecomfort",
".",
"FAN_MODES",
",",
"ATTR_HVAC_MODES",
":",
"somecomfort",
".",
"SYSTEM_MODES",
",",
"}",
"assert",
"expected",
"==",
"self",
".",
"honeywell",
".",
"device_state_attributes"
] | [
399,
4
] | [
409,
65
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellUS.test_heat_away_mode | (self) | Test setting the heat away mode. | Test setting the heat away mode. | def test_heat_away_mode(self):
"""Test setting the heat away mode."""
self.honeywell.set_hvac_mode("heat")
assert not self.honeywell.is_away_mode_on
self.honeywell.turn_away_mode_on()
assert self.honeywell.is_away_mode_on
assert self.device.setpoint_heat == self.heat_away_temp
assert self.device.hold_heat is True
self.honeywell.turn_away_mode_off()
assert not self.honeywell.is_away_mode_on
assert self.device.hold_heat is False | [
"def",
"test_heat_away_mode",
"(",
"self",
")",
":",
"self",
".",
"honeywell",
".",
"set_hvac_mode",
"(",
"\"heat\"",
")",
"assert",
"not",
"self",
".",
"honeywell",
".",
"is_away_mode_on",
"self",
".",
"honeywell",
".",
"turn_away_mode_on",
"(",
")",
"assert",
"self",
".",
"honeywell",
".",
"is_away_mode_on",
"assert",
"self",
".",
"device",
".",
"setpoint_heat",
"==",
"self",
".",
"heat_away_temp",
"assert",
"self",
".",
"device",
".",
"hold_heat",
"is",
"True",
"self",
".",
"honeywell",
".",
"turn_away_mode_off",
"(",
")",
"assert",
"not",
"self",
".",
"honeywell",
".",
"is_away_mode_on",
"assert",
"self",
".",
"device",
".",
"hold_heat",
"is",
"False"
] | [
411,
4
] | [
422,
45
] | python | en | ['en', 'en', 'en'] | True |
TestHoneywellUS.test_retry | (self, test_somecomfort) | Test retry connection. | Test retry connection. | def test_retry(self, test_somecomfort):
"""Test retry connection."""
old_device = self.honeywell._device
self.honeywell._retry()
assert self.honeywell._device == old_device | [
"def",
"test_retry",
"(",
"self",
",",
"test_somecomfort",
")",
":",
"old_device",
"=",
"self",
".",
"honeywell",
".",
"_device",
"self",
".",
"honeywell",
".",
"_retry",
"(",
")",
"assert",
"self",
".",
"honeywell",
".",
"_device",
"==",
"old_device"
] | [
425,
4
] | [
429,
51
] | python | en | ['en', 'en', 'en'] | True |
get_scanner | (hass, config) | Validate the configuration and return a Unifi direct scanner. | Validate the configuration and return a Unifi direct scanner. | def get_scanner(hass, config):
"""Validate the configuration and return a Unifi direct scanner."""
scanner = UnifiDeviceScanner(config[DOMAIN])
if not scanner.connected:
return False
return scanner | [
"def",
"get_scanner",
"(",
"hass",
",",
"config",
")",
":",
"scanner",
"=",
"UnifiDeviceScanner",
"(",
"config",
"[",
"DOMAIN",
"]",
")",
"if",
"not",
"scanner",
".",
"connected",
":",
"return",
"False",
"return",
"scanner"
] | [
32,
0
] | [
37,
18
] | python | en | ['en', 'en', 'en'] | True |
UnifiDeviceScanner.__init__ | (self, config) | Initialize the scanner. | Initialize the scanner. | def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.port = config[CONF_PORT]
self.ssh = None
self.connected = False
self.last_results = {}
self._connect() | [
"def",
"__init__",
"(",
"self",
",",
"config",
")",
":",
"self",
".",
"host",
"=",
"config",
"[",
"CONF_HOST",
"]",
"self",
".",
"username",
"=",
"config",
"[",
"CONF_USERNAME",
"]",
"self",
".",
"password",
"=",
"config",
"[",
"CONF_PASSWORD",
"]",
"self",
".",
"port",
"=",
"config",
"[",
"CONF_PORT",
"]",
"self",
".",
"ssh",
"=",
"None",
"self",
".",
"connected",
"=",
"False",
"self",
".",
"last_results",
"=",
"{",
"}",
"self",
".",
"_connect",
"(",
")"
] | [
43,
4
] | [
52,
23
] | python | en | ['en', 'en', 'en'] | True |
UnifiDeviceScanner.scan_devices | (self) | Scan for new devices and return a list with found device IDs. | Scan for new devices and return a list with found device IDs. | def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
result = _response_to_json(self._get_update())
if result:
self.last_results = result
return self.last_results.keys() | [
"def",
"scan_devices",
"(",
"self",
")",
":",
"result",
"=",
"_response_to_json",
"(",
"self",
".",
"_get_update",
"(",
")",
")",
"if",
"result",
":",
"self",
".",
"last_results",
"=",
"result",
"return",
"self",
".",
"last_results",
".",
"keys",
"(",
")"
] | [
54,
4
] | [
59,
39
] | python | en | ['en', 'en', 'en'] | True |
UnifiDeviceScanner.get_device_name | (self, device) | Return the name of the given device or None if we don't know. | Return the name of the given device or None if we don't know. | def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
hostname = next(
(
value.get("hostname")
for key, value in self.last_results.items()
if key.upper() == device.upper()
),
None,
)
if hostname is not None:
hostname = str(hostname)
return hostname | [
"def",
"get_device_name",
"(",
"self",
",",
"device",
")",
":",
"hostname",
"=",
"next",
"(",
"(",
"value",
".",
"get",
"(",
"\"hostname\"",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"last_results",
".",
"items",
"(",
")",
"if",
"key",
".",
"upper",
"(",
")",
"==",
"device",
".",
"upper",
"(",
")",
")",
",",
"None",
",",
")",
"if",
"hostname",
"is",
"not",
"None",
":",
"hostname",
"=",
"str",
"(",
"hostname",
")",
"return",
"hostname"
] | [
61,
4
] | [
73,
23
] | python | en | ['en', 'en', 'en'] | True |
UnifiDeviceScanner._connect | (self) | Connect to the Unifi AP SSH server. | Connect to the Unifi AP SSH server. | def _connect(self):
"""Connect to the Unifi AP SSH server."""
self.ssh = pxssh.pxssh()
try:
self.ssh.login(
self.host, self.username, password=self.password, port=self.port
)
self.connected = True
except exceptions.EOF:
_LOGGER.error("Connection refused. SSH enabled?")
self._disconnect() | [
"def",
"_connect",
"(",
"self",
")",
":",
"self",
".",
"ssh",
"=",
"pxssh",
".",
"pxssh",
"(",
")",
"try",
":",
"self",
".",
"ssh",
".",
"login",
"(",
"self",
".",
"host",
",",
"self",
".",
"username",
",",
"password",
"=",
"self",
".",
"password",
",",
"port",
"=",
"self",
".",
"port",
")",
"self",
".",
"connected",
"=",
"True",
"except",
"exceptions",
".",
"EOF",
":",
"_LOGGER",
".",
"error",
"(",
"\"Connection refused. SSH enabled?\"",
")",
"self",
".",
"_disconnect",
"(",
")"
] | [
75,
4
] | [
86,
30
] | python | en | ['en', 'en', 'en'] | True |
UnifiDeviceScanner._disconnect | (self) | Disconnect the current SSH connection. | Disconnect the current SSH connection. | def _disconnect(self):
"""Disconnect the current SSH connection."""
try:
self.ssh.logout()
except Exception: # pylint: disable=broad-except
pass
finally:
self.ssh = None
self.connected = False | [
"def",
"_disconnect",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"ssh",
".",
"logout",
"(",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"pass",
"finally",
":",
"self",
".",
"ssh",
"=",
"None",
"self",
".",
"connected",
"=",
"False"
] | [
88,
4
] | [
97,
30
] | python | en | ['en', 'en', 'en'] | True |
test_send_message_with_data | (hass) | Test sending a message with to a notify group. | Test sending a message with to a notify group. | async def test_send_message_with_data(hass):
"""Test sending a message with to a notify group."""
service1 = demo.DemoNotificationService(hass)
service2 = demo.DemoNotificationService(hass)
service1.send_message = MagicMock(autospec=True)
service2.send_message = MagicMock(autospec=True)
def mock_get_service(hass, config, discovery_info=None):
if config["name"] == "demo1":
return service1
return service2
assert await async_setup_component(
hass,
"group",
{},
)
await hass.async_block_till_done()
with patch.object(demo, "get_service", mock_get_service):
await async_setup_component(
hass,
notify.DOMAIN,
{
"notify": [
{"name": "demo1", "platform": "demo"},
{"name": "demo2", "platform": "demo"},
]
},
)
await hass.async_block_till_done()
service = await group.async_get_service(
hass,
{
"services": [
{"service": "demo1"},
{
"service": "demo2",
"data": {
"target": "unnamed device",
"data": {"test": "message"},
},
},
]
},
)
"""Test sending a message with to a notify group."""
await service.async_send_message(
"Hello", title="Test notification", data={"hello": "world"}
)
await hass.async_block_till_done()
assert service1.send_message.mock_calls[0][1][0] == "Hello"
assert service1.send_message.mock_calls[0][2] == {
"title": "Test notification",
"data": {"hello": "world"},
}
assert service2.send_message.mock_calls[0][1][0] == "Hello"
assert service2.send_message.mock_calls[0][2] == {
"target": ["unnamed device"],
"title": "Test notification",
"data": {"hello": "world", "test": "message"},
} | [
"async",
"def",
"test_send_message_with_data",
"(",
"hass",
")",
":",
"service1",
"=",
"demo",
".",
"DemoNotificationService",
"(",
"hass",
")",
"service2",
"=",
"demo",
".",
"DemoNotificationService",
"(",
"hass",
")",
"service1",
".",
"send_message",
"=",
"MagicMock",
"(",
"autospec",
"=",
"True",
")",
"service2",
".",
"send_message",
"=",
"MagicMock",
"(",
"autospec",
"=",
"True",
")",
"def",
"mock_get_service",
"(",
"hass",
",",
"config",
",",
"discovery_info",
"=",
"None",
")",
":",
"if",
"config",
"[",
"\"name\"",
"]",
"==",
"\"demo1\"",
":",
"return",
"service1",
"return",
"service2",
"assert",
"await",
"async_setup_component",
"(",
"hass",
",",
"\"group\"",
",",
"{",
"}",
",",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"with",
"patch",
".",
"object",
"(",
"demo",
",",
"\"get_service\"",
",",
"mock_get_service",
")",
":",
"await",
"async_setup_component",
"(",
"hass",
",",
"notify",
".",
"DOMAIN",
",",
"{",
"\"notify\"",
":",
"[",
"{",
"\"name\"",
":",
"\"demo1\"",
",",
"\"platform\"",
":",
"\"demo\"",
"}",
",",
"{",
"\"name\"",
":",
"\"demo2\"",
",",
"\"platform\"",
":",
"\"demo\"",
"}",
",",
"]",
"}",
",",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"service",
"=",
"await",
"group",
".",
"async_get_service",
"(",
"hass",
",",
"{",
"\"services\"",
":",
"[",
"{",
"\"service\"",
":",
"\"demo1\"",
"}",
",",
"{",
"\"service\"",
":",
"\"demo2\"",
",",
"\"data\"",
":",
"{",
"\"target\"",
":",
"\"unnamed device\"",
",",
"\"data\"",
":",
"{",
"\"test\"",
":",
"\"message\"",
"}",
",",
"}",
",",
"}",
",",
"]",
"}",
",",
")",
"\"\"\"Test sending a message with to a notify group.\"\"\"",
"await",
"service",
".",
"async_send_message",
"(",
"\"Hello\"",
",",
"title",
"=",
"\"Test notification\"",
",",
"data",
"=",
"{",
"\"hello\"",
":",
"\"world\"",
"}",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"assert",
"service1",
".",
"send_message",
".",
"mock_calls",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"\"Hello\"",
"assert",
"service1",
".",
"send_message",
".",
"mock_calls",
"[",
"0",
"]",
"[",
"2",
"]",
"==",
"{",
"\"title\"",
":",
"\"Test notification\"",
",",
"\"data\"",
":",
"{",
"\"hello\"",
":",
"\"world\"",
"}",
",",
"}",
"assert",
"service2",
".",
"send_message",
".",
"mock_calls",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"\"Hello\"",
"assert",
"service2",
".",
"send_message",
".",
"mock_calls",
"[",
"0",
"]",
"[",
"2",
"]",
"==",
"{",
"\"target\"",
":",
"[",
"\"unnamed device\"",
"]",
",",
"\"title\"",
":",
"\"Test notification\"",
",",
"\"data\"",
":",
"{",
"\"hello\"",
":",
"\"world\"",
",",
"\"test\"",
":",
"\"message\"",
"}",
",",
"}"
] | [
13,
0
] | [
79,
5
] | python | en | ['en', 'en', 'en'] | True |
test_reload_notify | (hass) | Verify we can reload the notify service. | Verify we can reload the notify service. | async def test_reload_notify(hass):
"""Verify we can reload the notify service."""
assert await async_setup_component(
hass,
"group",
{},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
notify.DOMAIN,
{
notify.DOMAIN: [
{"name": "demo1", "platform": "demo"},
{"name": "demo2", "platform": "demo"},
{
"name": "group_notify",
"platform": "group",
"services": [{"service": "demo1"}],
},
]
},
)
await hass.async_block_till_done()
assert hass.services.has_service(notify.DOMAIN, "demo1")
assert hass.services.has_service(notify.DOMAIN, "demo2")
assert hass.services.has_service(notify.DOMAIN, "group_notify")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"group/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
"group",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert hass.services.has_service(notify.DOMAIN, "demo1")
assert hass.services.has_service(notify.DOMAIN, "demo2")
assert not hass.services.has_service(notify.DOMAIN, "group_notify")
assert hass.services.has_service(notify.DOMAIN, "new_group_notify") | [
"async",
"def",
"test_reload_notify",
"(",
"hass",
")",
":",
"assert",
"await",
"async_setup_component",
"(",
"hass",
",",
"\"group\"",
",",
"{",
"}",
",",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"assert",
"await",
"async_setup_component",
"(",
"hass",
",",
"notify",
".",
"DOMAIN",
",",
"{",
"notify",
".",
"DOMAIN",
":",
"[",
"{",
"\"name\"",
":",
"\"demo1\"",
",",
"\"platform\"",
":",
"\"demo\"",
"}",
",",
"{",
"\"name\"",
":",
"\"demo2\"",
",",
"\"platform\"",
":",
"\"demo\"",
"}",
",",
"{",
"\"name\"",
":",
"\"group_notify\"",
",",
"\"platform\"",
":",
"\"group\"",
",",
"\"services\"",
":",
"[",
"{",
"\"service\"",
":",
"\"demo1\"",
"}",
"]",
",",
"}",
",",
"]",
"}",
",",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"assert",
"hass",
".",
"services",
".",
"has_service",
"(",
"notify",
".",
"DOMAIN",
",",
"\"demo1\"",
")",
"assert",
"hass",
".",
"services",
".",
"has_service",
"(",
"notify",
".",
"DOMAIN",
",",
"\"demo2\"",
")",
"assert",
"hass",
".",
"services",
".",
"has_service",
"(",
"notify",
".",
"DOMAIN",
",",
"\"group_notify\"",
")",
"yaml_path",
"=",
"path",
".",
"join",
"(",
"_get_fixtures_base_path",
"(",
")",
",",
"\"fixtures\"",
",",
"\"group/configuration.yaml\"",
",",
")",
"with",
"patch",
".",
"object",
"(",
"hass_config",
",",
"\"YAML_CONFIG_FILE\"",
",",
"yaml_path",
")",
":",
"await",
"hass",
".",
"services",
".",
"async_call",
"(",
"\"group\"",
",",
"SERVICE_RELOAD",
",",
"{",
"}",
",",
"blocking",
"=",
"True",
",",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"assert",
"hass",
".",
"services",
".",
"has_service",
"(",
"notify",
".",
"DOMAIN",
",",
"\"demo1\"",
")",
"assert",
"hass",
".",
"services",
".",
"has_service",
"(",
"notify",
".",
"DOMAIN",
",",
"\"demo2\"",
")",
"assert",
"not",
"hass",
".",
"services",
".",
"has_service",
"(",
"notify",
".",
"DOMAIN",
",",
"\"group_notify\"",
")",
"assert",
"hass",
".",
"services",
".",
"has_service",
"(",
"notify",
".",
"DOMAIN",
",",
"\"new_group_notify\"",
")"
] | [
82,
0
] | [
130,
71
] | python | en | ['en', 'en', 'en'] | True |
setup_platform | (hass, config, add_entities, discovery_info=None) | Set up the pocketcasts platform for sensors. | Set up the pocketcasts platform for sensors. | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the pocketcasts platform for sensors."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
api = pocketcasts.Api(username, password)
_LOGGER.debug("Found %d podcasts", len(api.my_podcasts()))
add_entities([PocketCastsSensor(api)], True)
except OSError as err:
_LOGGER.error("Connection to server failed: %s", err)
return False | [
"def",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"username",
"=",
"config",
".",
"get",
"(",
"CONF_USERNAME",
")",
"password",
"=",
"config",
".",
"get",
"(",
"CONF_PASSWORD",
")",
"try",
":",
"api",
"=",
"pocketcasts",
".",
"Api",
"(",
"username",
",",
"password",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Found %d podcasts\"",
",",
"len",
"(",
"api",
".",
"my_podcasts",
"(",
")",
")",
")",
"add_entities",
"(",
"[",
"PocketCastsSensor",
"(",
"api",
")",
"]",
",",
"True",
")",
"except",
"OSError",
"as",
"err",
":",
"_LOGGER",
".",
"error",
"(",
"\"Connection to server failed: %s\"",
",",
"err",
")",
"return",
"False"
] | [
25,
0
] | [
36,
20
] | python | en | ['en', 'pt', 'en'] | True |
PocketCastsSensor.__init__ | (self, api) | Initialize the sensor. | Initialize the sensor. | def __init__(self, api):
"""Initialize the sensor."""
self._api = api
self._state = None | [
"def",
"__init__",
"(",
"self",
",",
"api",
")",
":",
"self",
".",
"_api",
"=",
"api",
"self",
".",
"_state",
"=",
"None"
] | [
42,
4
] | [
45,
26
] | python | en | ['en', 'en', 'en'] | True |
PocketCastsSensor.name | (self) | Return the name of the sensor. | Return the name of the sensor. | def name(self):
"""Return the name of the sensor."""
return SENSOR_NAME | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"SENSOR_NAME"
] | [
48,
4
] | [
50,
26
] | python | en | ['en', 'mi', 'en'] | True |
PocketCastsSensor.state | (self) | Return the sensor state. | Return the sensor state. | def state(self):
"""Return the sensor state."""
return self._state | [
"def",
"state",
"(",
"self",
")",
":",
"return",
"self",
".",
"_state"
] | [
53,
4
] | [
55,
26
] | python | en | ['en', 'bs', 'en'] | True |
PocketCastsSensor.icon | (self) | Return the icon for the sensor. | Return the icon for the sensor. | def icon(self):
"""Return the icon for the sensor."""
return ICON | [
"def",
"icon",
"(",
"self",
")",
":",
"return",
"ICON"
] | [
58,
4
] | [
60,
19
] | python | en | ['en', 'en', 'en'] | True |
PocketCastsSensor.update | (self) | Update sensor values. | Update sensor values. | def update(self):
"""Update sensor values."""
try:
self._state = len(self._api.new_episodes_released())
_LOGGER.debug("Found %d new episodes", self._state)
except OSError as err:
_LOGGER.warning("Failed to contact server: %s", err) | [
"def",
"update",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_state",
"=",
"len",
"(",
"self",
".",
"_api",
".",
"new_episodes_released",
"(",
")",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Found %d new episodes\"",
",",
"self",
".",
"_state",
")",
"except",
"OSError",
"as",
"err",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Failed to contact server: %s\"",
",",
"err",
")"
] | [
62,
4
] | [
68,
64
] | python | en | ['en', 'nl', 'en'] | True |
async_setup_platform | (hass, config, async_add_entities, discovery_info=None) | Set up the IP Webcam Sensor. | Set up the IP Webcam Sensor. | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the IP Webcam Sensor."""
if discovery_info is None:
return
host = discovery_info[CONF_HOST]
name = discovery_info[CONF_NAME]
sensors = discovery_info[CONF_SENSORS]
ipcam = hass.data[DATA_IP_WEBCAM][host]
all_sensors = []
for sensor in sensors:
all_sensors.append(IPWebcamSensor(name, host, ipcam, sensor))
async_add_entities(all_sensors, True) | [
"async",
"def",
"async_setup_platform",
"(",
"hass",
",",
"config",
",",
"async_add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"if",
"discovery_info",
"is",
"None",
":",
"return",
"host",
"=",
"discovery_info",
"[",
"CONF_HOST",
"]",
"name",
"=",
"discovery_info",
"[",
"CONF_NAME",
"]",
"sensors",
"=",
"discovery_info",
"[",
"CONF_SENSORS",
"]",
"ipcam",
"=",
"hass",
".",
"data",
"[",
"DATA_IP_WEBCAM",
"]",
"[",
"host",
"]",
"all_sensors",
"=",
"[",
"]",
"for",
"sensor",
"in",
"sensors",
":",
"all_sensors",
".",
"append",
"(",
"IPWebcamSensor",
"(",
"name",
",",
"host",
",",
"ipcam",
",",
"sensor",
")",
")",
"async_add_entities",
"(",
"all_sensors",
",",
"True",
")"
] | [
14,
0
] | [
29,
41
] | python | en | ['en', 'da', 'en'] | True |
IPWebcamSensor.__init__ | (self, name, host, ipcam, sensor) | Initialize the sensor. | Initialize the sensor. | def __init__(self, name, host, ipcam, sensor):
"""Initialize the sensor."""
super().__init__(host, ipcam)
self._sensor = sensor
self._mapped_name = KEY_MAP.get(self._sensor, self._sensor)
self._name = f"{name} {self._mapped_name}"
self._state = None
self._unit = None | [
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"host",
",",
"ipcam",
",",
"sensor",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"host",
",",
"ipcam",
")",
"self",
".",
"_sensor",
"=",
"sensor",
"self",
".",
"_mapped_name",
"=",
"KEY_MAP",
".",
"get",
"(",
"self",
".",
"_sensor",
",",
"self",
".",
"_sensor",
")",
"self",
".",
"_name",
"=",
"f\"{name} {self._mapped_name}\"",
"self",
".",
"_state",
"=",
"None",
"self",
".",
"_unit",
"=",
"None"
] | [
35,
4
] | [
43,
25
] | python | en | ['en', 'en', 'en'] | True |