repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
sequencelengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
sequencelengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
mezz64/pyEight
pyeight/user.py
EightUser.last_room_temp
def last_room_temp(self): """Return avg room temperature for last session.""" try: rmtemps = self.intervals[1]['timeseries']['tempRoomC'] except KeyError: return None tmp = 0 num_temps = len(rmtemps) if num_temps == 0: return None for temp in rmtemps: tmp += temp[1] rmtemp = tmp/num_temps return rmtemp
python
def last_room_temp(self): """Return avg room temperature for last session.""" try: rmtemps = self.intervals[1]['timeseries']['tempRoomC'] except KeyError: return None tmp = 0 num_temps = len(rmtemps) if num_temps == 0: return None for temp in rmtemps: tmp += temp[1] rmtemp = tmp/num_temps return rmtemp
[ "def", "last_room_temp", "(", "self", ")", ":", "try", ":", "rmtemps", "=", "self", ".", "intervals", "[", "1", "]", "[", "'timeseries'", "]", "[", "'tempRoomC'", "]", "except", "KeyError", ":", "return", "None", "tmp", "=", "0", "num_temps", "=", "len", "(", "rmtemps", ")", "if", "num_temps", "==", "0", ":", "return", "None", "for", "temp", "in", "rmtemps", ":", "tmp", "+=", "temp", "[", "1", "]", "rmtemp", "=", "tmp", "/", "num_temps", "return", "rmtemp" ]
Return avg room temperature for last session.
[ "Return", "avg", "room", "temperature", "for", "last", "session", "." ]
train
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L374-L389
mezz64/pyEight
pyeight/user.py
EightUser.last_heart_rate
def last_heart_rate(self): """Return avg heart rate for last session.""" try: rates = self.intervals[1]['timeseries']['heartRate'] except KeyError: return None tmp = 0 num_rates = len(rates) if num_rates == 0: return None for rate in rates: tmp += rate[1] rateavg = tmp/num_rates return rateavg
python
def last_heart_rate(self): """Return avg heart rate for last session.""" try: rates = self.intervals[1]['timeseries']['heartRate'] except KeyError: return None tmp = 0 num_rates = len(rates) if num_rates == 0: return None for rate in rates: tmp += rate[1] rateavg = tmp/num_rates return rateavg
[ "def", "last_heart_rate", "(", "self", ")", ":", "try", ":", "rates", "=", "self", ".", "intervals", "[", "1", "]", "[", "'timeseries'", "]", "[", "'heartRate'", "]", "except", "KeyError", ":", "return", "None", "tmp", "=", "0", "num_rates", "=", "len", "(", "rates", ")", "if", "num_rates", "==", "0", ":", "return", "None", "for", "rate", "in", "rates", ":", "tmp", "+=", "rate", "[", "1", "]", "rateavg", "=", "tmp", "/", "num_rates", "return", "rateavg" ]
Return avg heart rate for last session.
[ "Return", "avg", "heart", "rate", "for", "last", "session", "." ]
train
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L419-L434
mezz64/pyEight
pyeight/user.py
EightUser.last_values
def last_values(self): """Return a dict of all the 'last' parameters.""" last_dict = { 'date': self.last_session_date, 'score': self.last_sleep_score, 'breakdown': self.last_sleep_breakdown, 'tnt': self.last_tnt, 'bed_temp': self.last_bed_temp, 'room_temp': self.last_room_temp, 'resp_rate': self.last_resp_rate, 'heart_rate': self.last_heart_rate, 'processing': self.last_session_processing, } return last_dict
python
def last_values(self): """Return a dict of all the 'last' parameters.""" last_dict = { 'date': self.last_session_date, 'score': self.last_sleep_score, 'breakdown': self.last_sleep_breakdown, 'tnt': self.last_tnt, 'bed_temp': self.last_bed_temp, 'room_temp': self.last_room_temp, 'resp_rate': self.last_resp_rate, 'heart_rate': self.last_heart_rate, 'processing': self.last_session_processing, } return last_dict
[ "def", "last_values", "(", "self", ")", ":", "last_dict", "=", "{", "'date'", ":", "self", ".", "last_session_date", ",", "'score'", ":", "self", ".", "last_sleep_score", ",", "'breakdown'", ":", "self", ".", "last_sleep_breakdown", ",", "'tnt'", ":", "self", ".", "last_tnt", ",", "'bed_temp'", ":", "self", ".", "last_bed_temp", ",", "'room_temp'", ":", "self", ".", "last_room_temp", ",", "'resp_rate'", ":", "self", ".", "last_resp_rate", ",", "'heart_rate'", ":", "self", ".", "last_heart_rate", ",", "'processing'", ":", "self", ".", "last_session_processing", ",", "}", "return", "last_dict" ]
Return a dict of all the 'last' parameters.
[ "Return", "a", "dict", "of", "all", "the", "last", "parameters", "." ]
train
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L437-L450
mezz64/pyEight
pyeight/user.py
EightUser.heating_stats
def heating_stats(self): """Calculate some heating data stats.""" local_5 = [] local_10 = [] for i in range(0, 10): level = self.past_heating_level(i) if level == 0: _LOGGER.debug('Cant calculate stats yet...') return if i < 5: local_5.append(level) local_10.append(level) _LOGGER.debug('%s Heating History: %s', self.side, local_10) try: # Average of 5min on the history dict. fiveminavg = statistics.mean(local_5) tenminavg = statistics.mean(local_10) _LOGGER.debug('%s Heating 5 min avg: %s', self.side, fiveminavg) _LOGGER.debug('%s Heating 10 min avg: %s', self.side, tenminavg) # Standard deviation fivestdev = statistics.stdev(local_5) tenstdev = statistics.stdev(local_10) _LOGGER.debug('%s Heating 5 min stdev: %s', self.side, fivestdev) _LOGGER.debug('%s Heating 10 min stdev: %s', self.side, tenstdev) # Variance fivevar = statistics.variance(local_5) tenvar = statistics.variance(local_10) _LOGGER.debug('%s Heating 5 min variance: %s', self.side, fivevar) _LOGGER.debug('%s Heating 10 min variance: %s', self.side, tenvar) except: _LOGGER.debug('Cant calculate stats yet...')
python
def heating_stats(self): """Calculate some heating data stats.""" local_5 = [] local_10 = [] for i in range(0, 10): level = self.past_heating_level(i) if level == 0: _LOGGER.debug('Cant calculate stats yet...') return if i < 5: local_5.append(level) local_10.append(level) _LOGGER.debug('%s Heating History: %s', self.side, local_10) try: # Average of 5min on the history dict. fiveminavg = statistics.mean(local_5) tenminavg = statistics.mean(local_10) _LOGGER.debug('%s Heating 5 min avg: %s', self.side, fiveminavg) _LOGGER.debug('%s Heating 10 min avg: %s', self.side, tenminavg) # Standard deviation fivestdev = statistics.stdev(local_5) tenstdev = statistics.stdev(local_10) _LOGGER.debug('%s Heating 5 min stdev: %s', self.side, fivestdev) _LOGGER.debug('%s Heating 10 min stdev: %s', self.side, tenstdev) # Variance fivevar = statistics.variance(local_5) tenvar = statistics.variance(local_10) _LOGGER.debug('%s Heating 5 min variance: %s', self.side, fivevar) _LOGGER.debug('%s Heating 10 min variance: %s', self.side, tenvar) except: _LOGGER.debug('Cant calculate stats yet...')
[ "def", "heating_stats", "(", "self", ")", ":", "local_5", "=", "[", "]", "local_10", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "10", ")", ":", "level", "=", "self", ".", "past_heating_level", "(", "i", ")", "if", "level", "==", "0", ":", "_LOGGER", ".", "debug", "(", "'Cant calculate stats yet...'", ")", "return", "if", "i", "<", "5", ":", "local_5", ".", "append", "(", "level", ")", "local_10", ".", "append", "(", "level", ")", "_LOGGER", ".", "debug", "(", "'%s Heating History: %s'", ",", "self", ".", "side", ",", "local_10", ")", "try", ":", "# Average of 5min on the history dict.", "fiveminavg", "=", "statistics", ".", "mean", "(", "local_5", ")", "tenminavg", "=", "statistics", ".", "mean", "(", "local_10", ")", "_LOGGER", ".", "debug", "(", "'%s Heating 5 min avg: %s'", ",", "self", ".", "side", ",", "fiveminavg", ")", "_LOGGER", ".", "debug", "(", "'%s Heating 10 min avg: %s'", ",", "self", ".", "side", ",", "tenminavg", ")", "# Standard deviation", "fivestdev", "=", "statistics", ".", "stdev", "(", "local_5", ")", "tenstdev", "=", "statistics", ".", "stdev", "(", "local_10", ")", "_LOGGER", ".", "debug", "(", "'%s Heating 5 min stdev: %s'", ",", "self", ".", "side", ",", "fivestdev", ")", "_LOGGER", ".", "debug", "(", "'%s Heating 10 min stdev: %s'", ",", "self", ".", "side", ",", "tenstdev", ")", "# Variance", "fivevar", "=", "statistics", ".", "variance", "(", "local_5", ")", "tenvar", "=", "statistics", ".", "variance", "(", "local_10", ")", "_LOGGER", ".", "debug", "(", "'%s Heating 5 min variance: %s'", ",", "self", ".", "side", ",", "fivevar", ")", "_LOGGER", ".", "debug", "(", "'%s Heating 10 min variance: %s'", ",", "self", ".", "side", ",", "tenvar", ")", "except", ":", "_LOGGER", ".", "debug", "(", "'Cant calculate stats yet...'", ")" ]
Calculate some heating data stats.
[ "Calculate", "some", "heating", "data", "stats", "." ]
train
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L452-L487
mezz64/pyEight
pyeight/user.py
EightUser.dynamic_presence
def dynamic_presence(self): """ Determine presence based on bed heating level and end presence time reported by the api. Idea originated from Alex Lee Yuk Cheung SmartThings Code. """ # self.heating_stats() if not self.presence: if self.heating_level > 50: # Can likely make this better if not self.now_heating: self.presence = True elif self.heating_level - self.target_heating_level >= 8: self.presence = True elif self.heating_level > 25: # Catch rising edge if self.past_heating_level(0) - self.past_heating_level(1) >= 2 \ and self.past_heating_level(1) - self.past_heating_level(2) >= 2 \ and self.past_heating_level(2) - self.past_heating_level(3) >= 2: # Values are increasing so we are likely in bed if not self.now_heating: self.presence = True elif self.heating_level - self.target_heating_level >= 8: self.presence = True elif self.presence: if self.heating_level <= 15: # Failsafe, very slow self.presence = False elif self.heating_level < 50: if self.past_heating_level(0) - self.past_heating_level(1) < 0 \ and self.past_heating_level(1) - self.past_heating_level(2) < 0 \ and self.past_heating_level(2) - self.past_heating_level(3) < 0: # Values are decreasing so we are likely out of bed self.presence = False # Last seen can lag real-time by up to 35min so this is # mostly a backup to using the heat values. # seen_delta = datetime.fromtimestamp(time.time()) \ # - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S') # _LOGGER.debug('%s Last seen time delta: %s', self.side, # seen_delta.total_seconds()) # if self.presence and seen_delta.total_seconds() > 2100: # self.presence = False _LOGGER.debug('%s Presence Results: %s', self.side, self.presence)
python
def dynamic_presence(self): """ Determine presence based on bed heating level and end presence time reported by the api. Idea originated from Alex Lee Yuk Cheung SmartThings Code. """ # self.heating_stats() if not self.presence: if self.heating_level > 50: # Can likely make this better if not self.now_heating: self.presence = True elif self.heating_level - self.target_heating_level >= 8: self.presence = True elif self.heating_level > 25: # Catch rising edge if self.past_heating_level(0) - self.past_heating_level(1) >= 2 \ and self.past_heating_level(1) - self.past_heating_level(2) >= 2 \ and self.past_heating_level(2) - self.past_heating_level(3) >= 2: # Values are increasing so we are likely in bed if not self.now_heating: self.presence = True elif self.heating_level - self.target_heating_level >= 8: self.presence = True elif self.presence: if self.heating_level <= 15: # Failsafe, very slow self.presence = False elif self.heating_level < 50: if self.past_heating_level(0) - self.past_heating_level(1) < 0 \ and self.past_heating_level(1) - self.past_heating_level(2) < 0 \ and self.past_heating_level(2) - self.past_heating_level(3) < 0: # Values are decreasing so we are likely out of bed self.presence = False # Last seen can lag real-time by up to 35min so this is # mostly a backup to using the heat values. # seen_delta = datetime.fromtimestamp(time.time()) \ # - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S') # _LOGGER.debug('%s Last seen time delta: %s', self.side, # seen_delta.total_seconds()) # if self.presence and seen_delta.total_seconds() > 2100: # self.presence = False _LOGGER.debug('%s Presence Results: %s', self.side, self.presence)
[ "def", "dynamic_presence", "(", "self", ")", ":", "# self.heating_stats()", "if", "not", "self", ".", "presence", ":", "if", "self", ".", "heating_level", ">", "50", ":", "# Can likely make this better", "if", "not", "self", ".", "now_heating", ":", "self", ".", "presence", "=", "True", "elif", "self", ".", "heating_level", "-", "self", ".", "target_heating_level", ">=", "8", ":", "self", ".", "presence", "=", "True", "elif", "self", ".", "heating_level", ">", "25", ":", "# Catch rising edge", "if", "self", ".", "past_heating_level", "(", "0", ")", "-", "self", ".", "past_heating_level", "(", "1", ")", ">=", "2", "and", "self", ".", "past_heating_level", "(", "1", ")", "-", "self", ".", "past_heating_level", "(", "2", ")", ">=", "2", "and", "self", ".", "past_heating_level", "(", "2", ")", "-", "self", ".", "past_heating_level", "(", "3", ")", ">=", "2", ":", "# Values are increasing so we are likely in bed", "if", "not", "self", ".", "now_heating", ":", "self", ".", "presence", "=", "True", "elif", "self", ".", "heating_level", "-", "self", ".", "target_heating_level", ">=", "8", ":", "self", ".", "presence", "=", "True", "elif", "self", ".", "presence", ":", "if", "self", ".", "heating_level", "<=", "15", ":", "# Failsafe, very slow", "self", ".", "presence", "=", "False", "elif", "self", ".", "heating_level", "<", "50", ":", "if", "self", ".", "past_heating_level", "(", "0", ")", "-", "self", ".", "past_heating_level", "(", "1", ")", "<", "0", "and", "self", ".", "past_heating_level", "(", "1", ")", "-", "self", ".", "past_heating_level", "(", "2", ")", "<", "0", "and", "self", ".", "past_heating_level", "(", "2", ")", "-", "self", ".", "past_heating_level", "(", "3", ")", "<", "0", ":", "# Values are decreasing so we are likely out of bed", "self", ".", "presence", "=", "False", "# Last seen can lag real-time by up to 35min so this is", "# mostly a backup to using the heat values.", "# seen_delta = datetime.fromtimestamp(time.time()) \\", "# - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S')", "# _LOGGER.debug('%s Last seen time delta: %s', self.side,", "# seen_delta.total_seconds())", "# if self.presence and seen_delta.total_seconds() > 2100:", "# self.presence = False", "_LOGGER", ".", "debug", "(", "'%s Presence Results: %s'", ",", "self", ".", "side", ",", "self", ".", "presence", ")" ]
Determine presence based on bed heating level and end presence time reported by the api. Idea originated from Alex Lee Yuk Cheung SmartThings Code.
[ "Determine", "presence", "based", "on", "bed", "heating", "level", "and", "end", "presence", "time", "reported", "by", "the", "api", "." ]
train
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L494-L542
mezz64/pyEight
pyeight/user.py
EightUser.set_heating_level
async def set_heating_level(self, level, duration=0): """Update heating data json.""" url = '{}/devices/{}'.format(API_URL, self.device.deviceid) # Catch bad inputs level = 10 if level < 10 else level level = 100 if level > 100 else level if self.side == 'left': data = { 'leftHeatingDuration': duration, 'leftTargetHeatingLevel': level } elif self.side == 'right': data = { 'rightHeatingDuration': duration, 'rightTargetHeatingLevel': level } set_heat = await self.device.api_put(url, data) if set_heat is None: _LOGGER.error('Unable to set eight heating level.') else: # Standard device json is returned after setting self.device.handle_device_json(set_heat['device'])
python
async def set_heating_level(self, level, duration=0): """Update heating data json.""" url = '{}/devices/{}'.format(API_URL, self.device.deviceid) # Catch bad inputs level = 10 if level < 10 else level level = 100 if level > 100 else level if self.side == 'left': data = { 'leftHeatingDuration': duration, 'leftTargetHeatingLevel': level } elif self.side == 'right': data = { 'rightHeatingDuration': duration, 'rightTargetHeatingLevel': level } set_heat = await self.device.api_put(url, data) if set_heat is None: _LOGGER.error('Unable to set eight heating level.') else: # Standard device json is returned after setting self.device.handle_device_json(set_heat['device'])
[ "async", "def", "set_heating_level", "(", "self", ",", "level", ",", "duration", "=", "0", ")", ":", "url", "=", "'{}/devices/{}'", ".", "format", "(", "API_URL", ",", "self", ".", "device", ".", "deviceid", ")", "# Catch bad inputs", "level", "=", "10", "if", "level", "<", "10", "else", "level", "level", "=", "100", "if", "level", ">", "100", "else", "level", "if", "self", ".", "side", "==", "'left'", ":", "data", "=", "{", "'leftHeatingDuration'", ":", "duration", ",", "'leftTargetHeatingLevel'", ":", "level", "}", "elif", "self", ".", "side", "==", "'right'", ":", "data", "=", "{", "'rightHeatingDuration'", ":", "duration", ",", "'rightTargetHeatingLevel'", ":", "level", "}", "set_heat", "=", "await", "self", ".", "device", ".", "api_put", "(", "url", ",", "data", ")", "if", "set_heat", "is", "None", ":", "_LOGGER", ".", "error", "(", "'Unable to set eight heating level.'", ")", "else", ":", "# Standard device json is returned after setting", "self", ".", "device", ".", "handle_device_json", "(", "set_heat", "[", "'device'", "]", ")" ]
Update heating data json.
[ "Update", "heating", "data", "json", "." ]
train
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L555-L579
mezz64/pyEight
pyeight/user.py
EightUser.update_trend_data
async def update_trend_data(self, startdate, enddate): """Update trends data json for specified time period.""" url = '{}/users/{}/trends'.format(API_URL, self.userid) params = { 'tz': self.device.tzone, 'from': startdate, 'to': enddate } trends = await self.device.api_get(url, params) if trends is None: _LOGGER.error('Unable to fetch eight trend data.') else: self.trends = trends['days']
python
async def update_trend_data(self, startdate, enddate): """Update trends data json for specified time period.""" url = '{}/users/{}/trends'.format(API_URL, self.userid) params = { 'tz': self.device.tzone, 'from': startdate, 'to': enddate } trends = await self.device.api_get(url, params) if trends is None: _LOGGER.error('Unable to fetch eight trend data.') else: self.trends = trends['days']
[ "async", "def", "update_trend_data", "(", "self", ",", "startdate", ",", "enddate", ")", ":", "url", "=", "'{}/users/{}/trends'", ".", "format", "(", "API_URL", ",", "self", ".", "userid", ")", "params", "=", "{", "'tz'", ":", "self", ".", "device", ".", "tzone", ",", "'from'", ":", "startdate", ",", "'to'", ":", "enddate", "}", "trends", "=", "await", "self", ".", "device", ".", "api_get", "(", "url", ",", "params", ")", "if", "trends", "is", "None", ":", "_LOGGER", ".", "error", "(", "'Unable to fetch eight trend data.'", ")", "else", ":", "self", ".", "trends", "=", "trends", "[", "'days'", "]" ]
Update trends data json for specified time period.
[ "Update", "trends", "data", "json", "for", "specified", "time", "period", "." ]
train
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L581-L594
mezz64/pyEight
pyeight/user.py
EightUser.update_intervals_data
async def update_intervals_data(self): """Update intervals data json for specified time period.""" url = '{}/users/{}/intervals'.format(API_URL, self.userid) intervals = await self.device.api_get(url) if intervals is None: _LOGGER.error('Unable to fetch eight intervals data.') else: self.intervals = intervals['intervals']
python
async def update_intervals_data(self): """Update intervals data json for specified time period.""" url = '{}/users/{}/intervals'.format(API_URL, self.userid) intervals = await self.device.api_get(url) if intervals is None: _LOGGER.error('Unable to fetch eight intervals data.') else: self.intervals = intervals['intervals']
[ "async", "def", "update_intervals_data", "(", "self", ")", ":", "url", "=", "'{}/users/{}/intervals'", ".", "format", "(", "API_URL", ",", "self", ".", "userid", ")", "intervals", "=", "await", "self", ".", "device", ".", "api_get", "(", "url", ")", "if", "intervals", "is", "None", ":", "_LOGGER", ".", "error", "(", "'Unable to fetch eight intervals data.'", ")", "else", ":", "self", ".", "intervals", "=", "intervals", "[", "'intervals'", "]" ]
Update intervals data json for specified time period.
[ "Update", "intervals", "data", "json", "for", "specified", "time", "period", "." ]
train
https://github.com/mezz64/pyEight/blob/e557e4e6876f490d0964298e9475d68b64222d4f/pyeight/user.py#L596-L604
erikvw/django-collect-offline
django_collect_offline/transaction/transaction_deserializer.py
save
def save(obj=None, m2m_data=None): """Saves a deserialized model object. Uses save_base to avoid running code in model.save() and to avoid triggering signals (if raw=True). """ m2m_data = {} if m2m_data is None else m2m_data obj.save_base(raw=True) for attr, values in m2m_data.items(): for value in values: getattr(obj, attr).add(value)
python
def save(obj=None, m2m_data=None): """Saves a deserialized model object. Uses save_base to avoid running code in model.save() and to avoid triggering signals (if raw=True). """ m2m_data = {} if m2m_data is None else m2m_data obj.save_base(raw=True) for attr, values in m2m_data.items(): for value in values: getattr(obj, attr).add(value)
[ "def", "save", "(", "obj", "=", "None", ",", "m2m_data", "=", "None", ")", ":", "m2m_data", "=", "{", "}", "if", "m2m_data", "is", "None", "else", "m2m_data", "obj", ".", "save_base", "(", "raw", "=", "True", ")", "for", "attr", ",", "values", "in", "m2m_data", ".", "items", "(", ")", ":", "for", "value", "in", "values", ":", "getattr", "(", "obj", ",", "attr", ")", ".", "add", "(", "value", ")" ]
Saves a deserialized model object. Uses save_base to avoid running code in model.save() and to avoid triggering signals (if raw=True).
[ "Saves", "a", "deserialized", "model", "object", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/transaction_deserializer.py#L16-L26
erikvw/django-collect-offline
django_collect_offline/transaction/transaction_deserializer.py
TransactionDeserializer.deserialize_transactions
def deserialize_transactions(self, transactions=None, deserialize_only=None): """Deserializes the encrypted serialized model instances, tx, in a queryset of transactions. Note: each transaction instance contains encrypted JSON text that represents just ONE model instance. """ if ( not self.allow_self and transactions.filter(producer=socket.gethostname()).exists() ): raise TransactionDeserializerError( f"Not deserializing own transactions. Got " f"allow_self=False, hostname={socket.gethostname()}" ) for transaction in transactions: json_text = self.aes_decrypt(cipher_text=transaction.tx) json_text = self.custom_parser(json_text) deserialized = next(self.deserialize(json_text=json_text)) if not deserialize_only: if transaction.action == DELETE: deserialized.object.delete() else: self.save(obj=deserialized.object, m2m_data=deserialized.m2m_data) transaction.is_consumed = True transaction.save()
python
def deserialize_transactions(self, transactions=None, deserialize_only=None): """Deserializes the encrypted serialized model instances, tx, in a queryset of transactions. Note: each transaction instance contains encrypted JSON text that represents just ONE model instance. """ if ( not self.allow_self and transactions.filter(producer=socket.gethostname()).exists() ): raise TransactionDeserializerError( f"Not deserializing own transactions. Got " f"allow_self=False, hostname={socket.gethostname()}" ) for transaction in transactions: json_text = self.aes_decrypt(cipher_text=transaction.tx) json_text = self.custom_parser(json_text) deserialized = next(self.deserialize(json_text=json_text)) if not deserialize_only: if transaction.action == DELETE: deserialized.object.delete() else: self.save(obj=deserialized.object, m2m_data=deserialized.m2m_data) transaction.is_consumed = True transaction.save()
[ "def", "deserialize_transactions", "(", "self", ",", "transactions", "=", "None", ",", "deserialize_only", "=", "None", ")", ":", "if", "(", "not", "self", ".", "allow_self", "and", "transactions", ".", "filter", "(", "producer", "=", "socket", ".", "gethostname", "(", ")", ")", ".", "exists", "(", ")", ")", ":", "raise", "TransactionDeserializerError", "(", "f\"Not deserializing own transactions. Got \"", "f\"allow_self=False, hostname={socket.gethostname()}\"", ")", "for", "transaction", "in", "transactions", ":", "json_text", "=", "self", ".", "aes_decrypt", "(", "cipher_text", "=", "transaction", ".", "tx", ")", "json_text", "=", "self", ".", "custom_parser", "(", "json_text", ")", "deserialized", "=", "next", "(", "self", ".", "deserialize", "(", "json_text", "=", "json_text", ")", ")", "if", "not", "deserialize_only", ":", "if", "transaction", ".", "action", "==", "DELETE", ":", "deserialized", ".", "object", ".", "delete", "(", ")", "else", ":", "self", ".", "save", "(", "obj", "=", "deserialized", ".", "object", ",", "m2m_data", "=", "deserialized", ".", "m2m_data", ")", "transaction", ".", "is_consumed", "=", "True", "transaction", ".", "save", "(", ")" ]
Deserializes the encrypted serialized model instances, tx, in a queryset of transactions. Note: each transaction instance contains encrypted JSON text that represents just ONE model instance.
[ "Deserializes", "the", "encrypted", "serialized", "model", "instances", "tx", "in", "a", "queryset", "of", "transactions", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/transaction_deserializer.py#L49-L75
erikvw/django-collect-offline
django_collect_offline/transaction/transaction_deserializer.py
TransactionDeserializer.custom_parser
def custom_parser(self, json_text=None): """Runs json_text thru custom parsers. """ app_config = django_apps.get_app_config("django_collect_offline") for json_parser in app_config.custom_json_parsers: json_text = json_parser(json_text) return json_text
python
def custom_parser(self, json_text=None): """Runs json_text thru custom parsers. """ app_config = django_apps.get_app_config("django_collect_offline") for json_parser in app_config.custom_json_parsers: json_text = json_parser(json_text) return json_text
[ "def", "custom_parser", "(", "self", ",", "json_text", "=", "None", ")", ":", "app_config", "=", "django_apps", ".", "get_app_config", "(", "\"django_collect_offline\"", ")", "for", "json_parser", "in", "app_config", ".", "custom_json_parsers", ":", "json_text", "=", "json_parser", "(", "json_text", ")", "return", "json_text" ]
Runs json_text thru custom parsers.
[ "Runs", "json_text", "thru", "custom", "parsers", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/transaction/transaction_deserializer.py#L77-L83
RedHatQE/python-stitches
stitches/connection.py
Connection.cli
def cli(self): """ cli lazy property """ client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(hostname=self.private_hostname, username=self.username, key_filename=self.key_filename, timeout=self.timeout, look_for_keys=self.look_for_keys) # set keepalive transport = client.get_transport() transport.set_keepalive(3) return client
python
def cli(self): """ cli lazy property """ client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(hostname=self.private_hostname, username=self.username, key_filename=self.key_filename, timeout=self.timeout, look_for_keys=self.look_for_keys) # set keepalive transport = client.get_transport() transport.set_keepalive(3) return client
[ "def", "cli", "(", "self", ")", ":", "client", "=", "paramiko", ".", "SSHClient", "(", ")", "client", ".", "set_missing_host_key_policy", "(", "paramiko", ".", "AutoAddPolicy", "(", ")", ")", "client", ".", "connect", "(", "hostname", "=", "self", ".", "private_hostname", ",", "username", "=", "self", ".", "username", ",", "key_filename", "=", "self", ".", "key_filename", ",", "timeout", "=", "self", ".", "timeout", ",", "look_for_keys", "=", "self", ".", "look_for_keys", ")", "# set keepalive", "transport", "=", "client", ".", "get_transport", "(", ")", "transport", ".", "set_keepalive", "(", "3", ")", "return", "client" ]
cli lazy property
[ "cli", "lazy", "property" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L111-L124
RedHatQE/python-stitches
stitches/connection.py
Connection.channel
def channel(self): """ channel lazy property """ # start shell, non-blocking channel chan = self.cli.invoke_shell(width=360, height=80) chan.setblocking(0) # set channel timeout chan.settimeout(10) # now waiting for shell prompt ('username@') result = "" count = 0 while count < 10: try: recv_part = chan.recv(16384).decode() result += recv_part except socket.timeout: # socket.timeout here means 'no more data' pass if result.find('%s@' % self.username) != -1: return chan time.sleep(1) count += 1 # failed to get shell prompt on channel :-( raise StitchesConnectionException("Failed to get shell prompt")
python
def channel(self): """ channel lazy property """ # start shell, non-blocking channel chan = self.cli.invoke_shell(width=360, height=80) chan.setblocking(0) # set channel timeout chan.settimeout(10) # now waiting for shell prompt ('username@') result = "" count = 0 while count < 10: try: recv_part = chan.recv(16384).decode() result += recv_part except socket.timeout: # socket.timeout here means 'no more data' pass if result.find('%s@' % self.username) != -1: return chan time.sleep(1) count += 1 # failed to get shell prompt on channel :-( raise StitchesConnectionException("Failed to get shell prompt")
[ "def", "channel", "(", "self", ")", ":", "# start shell, non-blocking channel", "chan", "=", "self", ".", "cli", ".", "invoke_shell", "(", "width", "=", "360", ",", "height", "=", "80", ")", "chan", ".", "setblocking", "(", "0", ")", "# set channel timeout", "chan", ".", "settimeout", "(", "10", ")", "# now waiting for shell prompt ('username@')", "result", "=", "\"\"", "count", "=", "0", "while", "count", "<", "10", ":", "try", ":", "recv_part", "=", "chan", ".", "recv", "(", "16384", ")", ".", "decode", "(", ")", "result", "+=", "recv_part", "except", "socket", ".", "timeout", ":", "# socket.timeout here means 'no more data'", "pass", "if", "result", ".", "find", "(", "'%s@'", "%", "self", ".", "username", ")", "!=", "-", "1", ":", "return", "chan", "time", ".", "sleep", "(", "1", ")", "count", "+=", "1", "# failed to get shell prompt on channel :-(", "raise", "StitchesConnectionException", "(", "\"Failed to get shell prompt\"", ")" ]
channel lazy property
[ "channel", "lazy", "property" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L127-L150
RedHatQE/python-stitches
stitches/connection.py
Connection.pbm
def pbm(self): """ Plumbum lazy property """ if not self.disable_rpyc: from plumbum import SshMachine return SshMachine(host=self.private_hostname, user=self.username, keyfile=self.key_filename, ssh_opts=["-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no"]) else: return None
python
def pbm(self): """ Plumbum lazy property """ if not self.disable_rpyc: from plumbum import SshMachine return SshMachine(host=self.private_hostname, user=self.username, keyfile=self.key_filename, ssh_opts=["-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no"]) else: return None
[ "def", "pbm", "(", "self", ")", ":", "if", "not", "self", ".", "disable_rpyc", ":", "from", "plumbum", "import", "SshMachine", "return", "SshMachine", "(", "host", "=", "self", ".", "private_hostname", ",", "user", "=", "self", ".", "username", ",", "keyfile", "=", "self", ".", "key_filename", ",", "ssh_opts", "=", "[", "\"-o\"", ",", "\"UserKnownHostsFile=/dev/null\"", ",", "\"-o\"", ",", "\"StrictHostKeyChecking=no\"", "]", ")", "else", ":", "return", "None" ]
Plumbum lazy property
[ "Plumbum", "lazy", "property" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L158-L167
RedHatQE/python-stitches
stitches/connection.py
Connection.rpyc
def rpyc(self): """ RPyC lazy property """ if not self.disable_rpyc: try: import rpyc devnull_fd = open("/dev/null", "w") rpyc_dirname = os.path.dirname(rpyc.__file__) rnd_id = ''.join(random.choice(string.ascii_lowercase) for x in range(10)) pid_filename = "/tmp/%s.pid" % rnd_id pid_dest_filename = "/tmp/%s%s.pid" % (rnd_id, rnd_id) rnd_filename = "/tmp/" + rnd_id + ".tar.gz" rnd_dest_filename = "/tmp/" + rnd_id + rnd_id + ".tar.gz" subprocess.check_call(["tar", "-cz", "--exclude", "*.pyc", "--exclude", "*.pyo", "--transform", "s,%s,%s," % (rpyc_dirname[1:][:-5], rnd_id), rpyc_dirname, "-f", rnd_filename], stdout=devnull_fd, stderr=devnull_fd) devnull_fd.close() self.sftp.put(rnd_filename, rnd_dest_filename) os.remove(rnd_filename) self.recv_exit_status("tar -zxvf %s -C /tmp" % rnd_dest_filename, 10) server_script = r""" import os print os.environ from rpyc.utils.server import ThreadedServer from rpyc import SlaveService import sys t = ThreadedServer(SlaveService, hostname = 'localhost', port = 0, reuse_addr = True) fd = open('""" + pid_filename + r"""', 'w') fd.write(str(t.port)) fd.close() t.start() """ command = "echo \"%s\" | PYTHONPATH=\"/tmp/%s\" python " % (server_script, rnd_id) self.stdin_rpyc, self.stdout_rpyc, self.stderr_rpyc = self.exec_command(command, get_pty=True) self.recv_exit_status("while [ ! -f %s ]; do sleep 1; done" % (pid_filename), 10) self.sftp.get(pid_filename, pid_dest_filename) pid_fd = open(pid_dest_filename, 'r') port = int(pid_fd.read()) pid_fd.close() os.remove(pid_dest_filename) return rpyc.classic.ssh_connect(self.pbm, port) except Exception as err: self.logger.debug("Failed to setup rpyc: %s" % err) return None else: return None
python
def rpyc(self): """ RPyC lazy property """ if not self.disable_rpyc: try: import rpyc devnull_fd = open("/dev/null", "w") rpyc_dirname = os.path.dirname(rpyc.__file__) rnd_id = ''.join(random.choice(string.ascii_lowercase) for x in range(10)) pid_filename = "/tmp/%s.pid" % rnd_id pid_dest_filename = "/tmp/%s%s.pid" % (rnd_id, rnd_id) rnd_filename = "/tmp/" + rnd_id + ".tar.gz" rnd_dest_filename = "/tmp/" + rnd_id + rnd_id + ".tar.gz" subprocess.check_call(["tar", "-cz", "--exclude", "*.pyc", "--exclude", "*.pyo", "--transform", "s,%s,%s," % (rpyc_dirname[1:][:-5], rnd_id), rpyc_dirname, "-f", rnd_filename], stdout=devnull_fd, stderr=devnull_fd) devnull_fd.close() self.sftp.put(rnd_filename, rnd_dest_filename) os.remove(rnd_filename) self.recv_exit_status("tar -zxvf %s -C /tmp" % rnd_dest_filename, 10) server_script = r""" import os print os.environ from rpyc.utils.server import ThreadedServer from rpyc import SlaveService import sys t = ThreadedServer(SlaveService, hostname = 'localhost', port = 0, reuse_addr = True) fd = open('""" + pid_filename + r"""', 'w') fd.write(str(t.port)) fd.close() t.start() """ command = "echo \"%s\" | PYTHONPATH=\"/tmp/%s\" python " % (server_script, rnd_id) self.stdin_rpyc, self.stdout_rpyc, self.stderr_rpyc = self.exec_command(command, get_pty=True) self.recv_exit_status("while [ ! -f %s ]; do sleep 1; done" % (pid_filename), 10) self.sftp.get(pid_filename, pid_dest_filename) pid_fd = open(pid_dest_filename, 'r') port = int(pid_fd.read()) pid_fd.close() os.remove(pid_dest_filename) return rpyc.classic.ssh_connect(self.pbm, port) except Exception as err: self.logger.debug("Failed to setup rpyc: %s" % err) return None else: return None
[ "def", "rpyc", "(", "self", ")", ":", "if", "not", "self", ".", "disable_rpyc", ":", "try", ":", "import", "rpyc", "devnull_fd", "=", "open", "(", "\"/dev/null\"", ",", "\"w\"", ")", "rpyc_dirname", "=", "os", ".", "path", ".", "dirname", "(", "rpyc", ".", "__file__", ")", "rnd_id", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "string", ".", "ascii_lowercase", ")", "for", "x", "in", "range", "(", "10", ")", ")", "pid_filename", "=", "\"/tmp/%s.pid\"", "%", "rnd_id", "pid_dest_filename", "=", "\"/tmp/%s%s.pid\"", "%", "(", "rnd_id", ",", "rnd_id", ")", "rnd_filename", "=", "\"/tmp/\"", "+", "rnd_id", "+", "\".tar.gz\"", "rnd_dest_filename", "=", "\"/tmp/\"", "+", "rnd_id", "+", "rnd_id", "+", "\".tar.gz\"", "subprocess", ".", "check_call", "(", "[", "\"tar\"", ",", "\"-cz\"", ",", "\"--exclude\"", ",", "\"*.pyc\"", ",", "\"--exclude\"", ",", "\"*.pyo\"", ",", "\"--transform\"", ",", "\"s,%s,%s,\"", "%", "(", "rpyc_dirname", "[", "1", ":", "]", "[", ":", "-", "5", "]", ",", "rnd_id", ")", ",", "rpyc_dirname", ",", "\"-f\"", ",", "rnd_filename", "]", ",", "stdout", "=", "devnull_fd", ",", "stderr", "=", "devnull_fd", ")", "devnull_fd", ".", "close", "(", ")", "self", ".", "sftp", ".", "put", "(", "rnd_filename", ",", "rnd_dest_filename", ")", "os", ".", "remove", "(", "rnd_filename", ")", "self", ".", "recv_exit_status", "(", "\"tar -zxvf %s -C /tmp\"", "%", "rnd_dest_filename", ",", "10", ")", "server_script", "=", "r\"\"\"\nimport os\nprint os.environ\nfrom rpyc.utils.server import ThreadedServer\nfrom rpyc import SlaveService\nimport sys\nt = ThreadedServer(SlaveService, hostname = 'localhost', port = 0, reuse_addr = True)\nfd = open('\"\"\"", "+", "pid_filename", "+", "r\"\"\"', 'w')\nfd.write(str(t.port))\nfd.close()\nt.start()\n\"\"\"", "command", "=", "\"echo \\\"%s\\\" | PYTHONPATH=\\\"/tmp/%s\\\" python \"", "%", "(", "server_script", ",", "rnd_id", ")", "self", ".", "stdin_rpyc", ",", "self", ".", "stdout_rpyc", ",", "self", ".", "stderr_rpyc", "=", "self", ".", "exec_command", "(", "command", ",", "get_pty", "=", "True", ")", "self", ".", "recv_exit_status", "(", "\"while [ ! -f %s ]; do sleep 1; done\"", "%", "(", "pid_filename", ")", ",", "10", ")", "self", ".", "sftp", ".", "get", "(", "pid_filename", ",", "pid_dest_filename", ")", "pid_fd", "=", "open", "(", "pid_dest_filename", ",", "'r'", ")", "port", "=", "int", "(", "pid_fd", ".", "read", "(", ")", ")", "pid_fd", ".", "close", "(", ")", "os", ".", "remove", "(", "pid_dest_filename", ")", "return", "rpyc", ".", "classic", ".", "ssh_connect", "(", "self", ".", "pbm", ",", "port", ")", "except", "Exception", "as", "err", ":", "self", ".", "logger", ".", "debug", "(", "\"Failed to setup rpyc: %s\"", "%", "err", ")", "return", "None", "else", ":", "return", "None" ]
RPyC lazy property
[ "RPyC", "lazy", "property" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L170-L219
RedHatQE/python-stitches
stitches/connection.py
Connection.disconnect
def disconnect(self): """ Close the connection """ if hasattr(self, '_lazy_sftp'): if self.sftp is not None: self.sftp.close() delattr(self, '_lazy_sftp') if hasattr(self, '_lazy_channel'): if self.channel is not None: self.channel.close() delattr(self, '_lazy_channel') if hasattr(self, '_lazy_cli'): if self.cli is not None: self.cli.close() delattr(self, '_lazy_cli') if hasattr(self, '_lazy_pbm'): if self.pbm is not None: self.pbm.close() delattr(self, '_lazy_pbm') if hasattr(self, '_lazy_rpyc'): if self.rpyc is not None: self.rpyc.close() delattr(self, '_lazy_rpyc')
python
def disconnect(self): """ Close the connection """ if hasattr(self, '_lazy_sftp'): if self.sftp is not None: self.sftp.close() delattr(self, '_lazy_sftp') if hasattr(self, '_lazy_channel'): if self.channel is not None: self.channel.close() delattr(self, '_lazy_channel') if hasattr(self, '_lazy_cli'): if self.cli is not None: self.cli.close() delattr(self, '_lazy_cli') if hasattr(self, '_lazy_pbm'): if self.pbm is not None: self.pbm.close() delattr(self, '_lazy_pbm') if hasattr(self, '_lazy_rpyc'): if self.rpyc is not None: self.rpyc.close() delattr(self, '_lazy_rpyc')
[ "def", "disconnect", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_lazy_sftp'", ")", ":", "if", "self", ".", "sftp", "is", "not", "None", ":", "self", ".", "sftp", ".", "close", "(", ")", "delattr", "(", "self", ",", "'_lazy_sftp'", ")", "if", "hasattr", "(", "self", ",", "'_lazy_channel'", ")", ":", "if", "self", ".", "channel", "is", "not", "None", ":", "self", ".", "channel", ".", "close", "(", ")", "delattr", "(", "self", ",", "'_lazy_channel'", ")", "if", "hasattr", "(", "self", ",", "'_lazy_cli'", ")", ":", "if", "self", ".", "cli", "is", "not", "None", ":", "self", ".", "cli", ".", "close", "(", ")", "delattr", "(", "self", ",", "'_lazy_cli'", ")", "if", "hasattr", "(", "self", ",", "'_lazy_pbm'", ")", ":", "if", "self", ".", "pbm", "is", "not", "None", ":", "self", ".", "pbm", ".", "close", "(", ")", "delattr", "(", "self", ",", "'_lazy_pbm'", ")", "if", "hasattr", "(", "self", ",", "'_lazy_rpyc'", ")", ":", "if", "self", ".", "rpyc", "is", "not", "None", ":", "self", ".", "rpyc", ".", "close", "(", ")", "delattr", "(", "self", ",", "'_lazy_rpyc'", ")" ]
Close the connection
[ "Close", "the", "connection" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L227-L250
RedHatQE/python-stitches
stitches/connection.py
Connection.exec_command
def exec_command(self, command, bufsize=-1, get_pty=False): """ Execute a command in the connection @param command: command to execute @type command: str @param bufsize: buffer size @type bufsize: int @param get_pty: get pty @type get_pty: bool @return: the stdin, stdout, and stderr of the executing command @rtype: tuple(L{paramiko.ChannelFile}, L{paramiko.ChannelFile}, L{paramiko.ChannelFile}) @raise SSHException: if the server fails to execute the command """ self.last_command = command return self.cli.exec_command(command, bufsize, get_pty=get_pty)
python
def exec_command(self, command, bufsize=-1, get_pty=False): """ Execute a command in the connection @param command: command to execute @type command: str @param bufsize: buffer size @type bufsize: int @param get_pty: get pty @type get_pty: bool @return: the stdin, stdout, and stderr of the executing command @rtype: tuple(L{paramiko.ChannelFile}, L{paramiko.ChannelFile}, L{paramiko.ChannelFile}) @raise SSHException: if the server fails to execute the command """ self.last_command = command return self.cli.exec_command(command, bufsize, get_pty=get_pty)
[ "def", "exec_command", "(", "self", ",", "command", ",", "bufsize", "=", "-", "1", ",", "get_pty", "=", "False", ")", ":", "self", ".", "last_command", "=", "command", "return", "self", ".", "cli", ".", "exec_command", "(", "command", ",", "bufsize", ",", "get_pty", "=", "get_pty", ")" ]
Execute a command in the connection @param command: command to execute @type command: str @param bufsize: buffer size @type bufsize: int @param get_pty: get pty @type get_pty: bool @return: the stdin, stdout, and stderr of the executing command @rtype: tuple(L{paramiko.ChannelFile}, L{paramiko.ChannelFile}, L{paramiko.ChannelFile}) @raise SSHException: if the server fails to execute the command
[ "Execute", "a", "command", "in", "the", "connection" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L252-L272
RedHatQE/python-stitches
stitches/connection.py
Connection.recv_exit_status
def recv_exit_status(self, command, timeout=10, get_pty=False): """ Execute a command and get its return value @param command: command to execute @type command: str @param timeout: command execution timeout @type timeout: int @param get_pty: get pty @type get_pty: bool @return: the exit code of the process or None in case of timeout @rtype: int or None """ status = None self.last_command = command stdin, stdout, stderr = self.cli.exec_command(command, get_pty=get_pty) if stdout and stderr and stdin: for _ in range(timeout): if stdout.channel.exit_status_ready(): status = stdout.channel.recv_exit_status() break time.sleep(1) self.last_stdout = stdout.read() self.last_stderr = stderr.read() stdin.close() stdout.close() stderr.close() return status
python
def recv_exit_status(self, command, timeout=10, get_pty=False): """ Execute a command and get its return value @param command: command to execute @type command: str @param timeout: command execution timeout @type timeout: int @param get_pty: get pty @type get_pty: bool @return: the exit code of the process or None in case of timeout @rtype: int or None """ status = None self.last_command = command stdin, stdout, stderr = self.cli.exec_command(command, get_pty=get_pty) if stdout and stderr and stdin: for _ in range(timeout): if stdout.channel.exit_status_ready(): status = stdout.channel.recv_exit_status() break time.sleep(1) self.last_stdout = stdout.read() self.last_stderr = stderr.read() stdin.close() stdout.close() stderr.close() return status
[ "def", "recv_exit_status", "(", "self", ",", "command", ",", "timeout", "=", "10", ",", "get_pty", "=", "False", ")", ":", "status", "=", "None", "self", ".", "last_command", "=", "command", "stdin", ",", "stdout", ",", "stderr", "=", "self", ".", "cli", ".", "exec_command", "(", "command", ",", "get_pty", "=", "get_pty", ")", "if", "stdout", "and", "stderr", "and", "stdin", ":", "for", "_", "in", "range", "(", "timeout", ")", ":", "if", "stdout", ".", "channel", ".", "exit_status_ready", "(", ")", ":", "status", "=", "stdout", ".", "channel", ".", "recv_exit_status", "(", ")", "break", "time", ".", "sleep", "(", "1", ")", "self", ".", "last_stdout", "=", "stdout", ".", "read", "(", ")", "self", ".", "last_stderr", "=", "stderr", ".", "read", "(", ")", "stdin", ".", "close", "(", ")", "stdout", ".", "close", "(", ")", "stderr", ".", "close", "(", ")", "return", "status" ]
Execute a command and get its return value @param command: command to execute @type command: str @param timeout: command execution timeout @type timeout: int @param get_pty: get pty @type get_pty: bool @return: the exit code of the process or None in case of timeout @rtype: int or None
[ "Execute", "a", "command", "and", "get", "its", "return", "value" ]
train
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/connection.py#L274-L306
gwww/elkm1
elkm1_lib/counters.py
Counter.set
def set(self, value): """(Helper) Set counter to value""" self._elk.send(cx_encode(self._index, value))
python
def set(self, value): """(Helper) Set counter to value""" self._elk.send(cx_encode(self._index, value))
[ "def", "set", "(", "self", ",", "value", ")", ":", "self", ".", "_elk", ".", "send", "(", "cx_encode", "(", "self", ".", "_index", ",", "value", ")", ")" ]
(Helper) Set counter to value
[ "(", "Helper", ")", "Set", "counter", "to", "value" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/counters.py#L13-L15
MonashBI/arcana
arcana/pipeline/provenance.py
Record.save
def save(self, path): """ Saves the provenance object to a JSON file, optionally including checksums for inputs and outputs (which are initially produced mid- run) to insert during the write Parameters ---------- path : str Path to save the generated JSON file inputs : dict[str, str | list[str] | list[list[str]]] | None Checksums of all pipeline inputs used by the pipeline. For inputs of matching frequency to the output derivative associated with the provenance object, the values of the dictionary will be single checksums. If the output is of lower frequency they will be lists of checksums or in the case of 'per_session' inputs to 'per_study' outputs, lists of lists of checksum. They need to be provided here if the provenance object was initialised without checksums outputs : dict[str, str] | None Checksums of all pipeline outputs. They need to be provided here if the provenance object was initialised without checksums """ with open(path, 'w') as f: try: json.dump(self.prov, f, indent=2) except TypeError: raise ArcanaError( "Could not serialise provenance record dictionary:\n{}" .format(pformat(self.prov)))
python
def save(self, path): """ Saves the provenance object to a JSON file, optionally including checksums for inputs and outputs (which are initially produced mid- run) to insert during the write Parameters ---------- path : str Path to save the generated JSON file inputs : dict[str, str | list[str] | list[list[str]]] | None Checksums of all pipeline inputs used by the pipeline. For inputs of matching frequency to the output derivative associated with the provenance object, the values of the dictionary will be single checksums. If the output is of lower frequency they will be lists of checksums or in the case of 'per_session' inputs to 'per_study' outputs, lists of lists of checksum. They need to be provided here if the provenance object was initialised without checksums outputs : dict[str, str] | None Checksums of all pipeline outputs. They need to be provided here if the provenance object was initialised without checksums """ with open(path, 'w') as f: try: json.dump(self.prov, f, indent=2) except TypeError: raise ArcanaError( "Could not serialise provenance record dictionary:\n{}" .format(pformat(self.prov)))
[ "def", "save", "(", "self", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "try", ":", "json", ".", "dump", "(", "self", ".", "prov", ",", "f", ",", "indent", "=", "2", ")", "except", "TypeError", ":", "raise", "ArcanaError", "(", "\"Could not serialise provenance record dictionary:\\n{}\"", ".", "format", "(", "pformat", "(", "self", ".", "prov", ")", ")", ")" ]
Saves the provenance object to a JSON file, optionally including checksums for inputs and outputs (which are initially produced mid- run) to insert during the write Parameters ---------- path : str Path to save the generated JSON file inputs : dict[str, str | list[str] | list[list[str]]] | None Checksums of all pipeline inputs used by the pipeline. For inputs of matching frequency to the output derivative associated with the provenance object, the values of the dictionary will be single checksums. If the output is of lower frequency they will be lists of checksums or in the case of 'per_session' inputs to 'per_study' outputs, lists of lists of checksum. They need to be provided here if the provenance object was initialised without checksums outputs : dict[str, str] | None Checksums of all pipeline outputs. They need to be provided here if the provenance object was initialised without checksums
[ "Saves", "the", "provenance", "object", "to", "a", "JSON", "file", "optionally", "including", "checksums", "for", "inputs", "and", "outputs", "(", "which", "are", "initially", "produced", "mid", "-", "run", ")", "to", "insert", "during", "the", "write" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/provenance.py#L112-L140
MonashBI/arcana
arcana/pipeline/provenance.py
Record.load
def load(cls, pipeline_name, frequency, subject_id, visit_id, from_study, path): """ Loads a saved provenance object from a JSON file Parameters ---------- path : str Path to the provenance file frequency : str The frequency of the record subject_id : str | None The subject ID of the provenance record visit_id : str | None The visit ID of the provenance record from_study : str Name of the study the derivatives were created for Returns ------- record : Record The loaded provenance record """ with open(path) as f: prov = json.load(f) return Record(pipeline_name, frequency, subject_id, visit_id, from_study, prov)
python
def load(cls, pipeline_name, frequency, subject_id, visit_id, from_study, path): """ Loads a saved provenance object from a JSON file Parameters ---------- path : str Path to the provenance file frequency : str The frequency of the record subject_id : str | None The subject ID of the provenance record visit_id : str | None The visit ID of the provenance record from_study : str Name of the study the derivatives were created for Returns ------- record : Record The loaded provenance record """ with open(path) as f: prov = json.load(f) return Record(pipeline_name, frequency, subject_id, visit_id, from_study, prov)
[ "def", "load", "(", "cls", ",", "pipeline_name", ",", "frequency", ",", "subject_id", ",", "visit_id", ",", "from_study", ",", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "prov", "=", "json", ".", "load", "(", "f", ")", "return", "Record", "(", "pipeline_name", ",", "frequency", ",", "subject_id", ",", "visit_id", ",", "from_study", ",", "prov", ")" ]
Loads a saved provenance object from a JSON file Parameters ---------- path : str Path to the provenance file frequency : str The frequency of the record subject_id : str | None The subject ID of the provenance record visit_id : str | None The visit ID of the provenance record from_study : str Name of the study the derivatives were created for Returns ------- record : Record The loaded provenance record
[ "Loads", "a", "saved", "provenance", "object", "from", "a", "JSON", "file" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/provenance.py#L143-L169
MonashBI/arcana
arcana/pipeline/provenance.py
Record.mismatches
def mismatches(self, other, include=None, exclude=None): """ Compares information stored within provenance objects with the exception of version information to see if they match. Matches are constrained to the paths passed to the 'include' kwarg, with the exception of sub-paths passed to the 'exclude' kwarg Parameters ---------- other : Provenance The provenance object to compare against include : list[list[str]] | None Paths in the provenance to include in the match. If None all are incluced exclude : list[list[str]] | None Paths in the provenance to exclude from the match. In None all are excluded """ if include is not None: include_res = [self._gen_prov_path_regex(p) for p in include] if exclude is not None: exclude_res = [self._gen_prov_path_regex(p) for p in exclude] diff = DeepDiff(self._prov, other._prov, ignore_order=True) # Create regular expresssions for the include and exclude paths in # the format that deepdiff uses for nested dictionary/lists def include_change(change): if include is None: included = True else: included = any(rx.match(change) for rx in include_res) if included and exclude is not None: included = not any(rx.match(change) for rx in exclude_res) return included filtered_diff = {} for change_type, changes in diff.items(): if isinstance(changes, dict): filtered = dict((k, v) for k, v in changes.items() if include_change(k)) else: filtered = [c for c in changes if include_change(c)] if filtered: filtered_diff[change_type] = filtered return filtered_diff
python
def mismatches(self, other, include=None, exclude=None): """ Compares information stored within provenance objects with the exception of version information to see if they match. Matches are constrained to the paths passed to the 'include' kwarg, with the exception of sub-paths passed to the 'exclude' kwarg Parameters ---------- other : Provenance The provenance object to compare against include : list[list[str]] | None Paths in the provenance to include in the match. If None all are incluced exclude : list[list[str]] | None Paths in the provenance to exclude from the match. In None all are excluded """ if include is not None: include_res = [self._gen_prov_path_regex(p) for p in include] if exclude is not None: exclude_res = [self._gen_prov_path_regex(p) for p in exclude] diff = DeepDiff(self._prov, other._prov, ignore_order=True) # Create regular expresssions for the include and exclude paths in # the format that deepdiff uses for nested dictionary/lists def include_change(change): if include is None: included = True else: included = any(rx.match(change) for rx in include_res) if included and exclude is not None: included = not any(rx.match(change) for rx in exclude_res) return included filtered_diff = {} for change_type, changes in diff.items(): if isinstance(changes, dict): filtered = dict((k, v) for k, v in changes.items() if include_change(k)) else: filtered = [c for c in changes if include_change(c)] if filtered: filtered_diff[change_type] = filtered return filtered_diff
[ "def", "mismatches", "(", "self", ",", "other", ",", "include", "=", "None", ",", "exclude", "=", "None", ")", ":", "if", "include", "is", "not", "None", ":", "include_res", "=", "[", "self", ".", "_gen_prov_path_regex", "(", "p", ")", "for", "p", "in", "include", "]", "if", "exclude", "is", "not", "None", ":", "exclude_res", "=", "[", "self", ".", "_gen_prov_path_regex", "(", "p", ")", "for", "p", "in", "exclude", "]", "diff", "=", "DeepDiff", "(", "self", ".", "_prov", ",", "other", ".", "_prov", ",", "ignore_order", "=", "True", ")", "# Create regular expresssions for the include and exclude paths in", "# the format that deepdiff uses for nested dictionary/lists", "def", "include_change", "(", "change", ")", ":", "if", "include", "is", "None", ":", "included", "=", "True", "else", ":", "included", "=", "any", "(", "rx", ".", "match", "(", "change", ")", "for", "rx", "in", "include_res", ")", "if", "included", "and", "exclude", "is", "not", "None", ":", "included", "=", "not", "any", "(", "rx", ".", "match", "(", "change", ")", "for", "rx", "in", "exclude_res", ")", "return", "included", "filtered_diff", "=", "{", "}", "for", "change_type", ",", "changes", "in", "diff", ".", "items", "(", ")", ":", "if", "isinstance", "(", "changes", ",", "dict", ")", ":", "filtered", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "changes", ".", "items", "(", ")", "if", "include_change", "(", "k", ")", ")", "else", ":", "filtered", "=", "[", "c", "for", "c", "in", "changes", "if", "include_change", "(", "c", ")", "]", "if", "filtered", ":", "filtered_diff", "[", "change_type", "]", "=", "filtered", "return", "filtered_diff" ]
Compares information stored within provenance objects with the exception of version information to see if they match. Matches are constrained to the paths passed to the 'include' kwarg, with the exception of sub-paths passed to the 'exclude' kwarg Parameters ---------- other : Provenance The provenance object to compare against include : list[list[str]] | None Paths in the provenance to include in the match. If None all are incluced exclude : list[list[str]] | None Paths in the provenance to exclude from the match. In None all are excluded
[ "Compares", "information", "stored", "within", "provenance", "objects", "with", "the", "exception", "of", "version", "information", "to", "see", "if", "they", "match", ".", "Matches", "are", "constrained", "to", "the", "paths", "passed", "to", "the", "include", "kwarg", "with", "the", "exception", "of", "sub", "-", "paths", "passed", "to", "the", "exclude", "kwarg" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/provenance.py#L171-L215
MonashBI/arcana
arcana/study/multi.py
MultiStudy.translate
def translate(cls, substudy_name, pipeline_getter, auto_added=False): """ A method for translating pipeline constructors from a sub-study to the namespace of a multi-study. Returns a new method that calls the sub-study pipeline constructor with appropriate keyword arguments Parameters ---------- substudy_name : str Name of the sub-study pipeline_getter : str Name of method used to construct the pipeline in the sub-study auto_added : bool Signify that a method was automatically added by the MultiStudyMetaClass. Used in checks when pickling Study objects """ assert isinstance(substudy_name, basestring) assert isinstance(pipeline_getter, basestring) def translated_getter(self, **name_maps): substudy_spec = self.substudy_spec(substudy_name) # Combine mapping of names of sub-study specs with return getattr(self.substudy(substudy_name), pipeline_getter)( prefix=substudy_name + '_', input_map=substudy_spec.name_map, output_map=substudy_spec.name_map, study=self, name_maps=name_maps) # Add reduce method to allow it to be pickled translated_getter.auto_added = auto_added return translated_getter
python
def translate(cls, substudy_name, pipeline_getter, auto_added=False): """ A method for translating pipeline constructors from a sub-study to the namespace of a multi-study. Returns a new method that calls the sub-study pipeline constructor with appropriate keyword arguments Parameters ---------- substudy_name : str Name of the sub-study pipeline_getter : str Name of method used to construct the pipeline in the sub-study auto_added : bool Signify that a method was automatically added by the MultiStudyMetaClass. Used in checks when pickling Study objects """ assert isinstance(substudy_name, basestring) assert isinstance(pipeline_getter, basestring) def translated_getter(self, **name_maps): substudy_spec = self.substudy_spec(substudy_name) # Combine mapping of names of sub-study specs with return getattr(self.substudy(substudy_name), pipeline_getter)( prefix=substudy_name + '_', input_map=substudy_spec.name_map, output_map=substudy_spec.name_map, study=self, name_maps=name_maps) # Add reduce method to allow it to be pickled translated_getter.auto_added = auto_added return translated_getter
[ "def", "translate", "(", "cls", ",", "substudy_name", ",", "pipeline_getter", ",", "auto_added", "=", "False", ")", ":", "assert", "isinstance", "(", "substudy_name", ",", "basestring", ")", "assert", "isinstance", "(", "pipeline_getter", ",", "basestring", ")", "def", "translated_getter", "(", "self", ",", "*", "*", "name_maps", ")", ":", "substudy_spec", "=", "self", ".", "substudy_spec", "(", "substudy_name", ")", "# Combine mapping of names of sub-study specs with", "return", "getattr", "(", "self", ".", "substudy", "(", "substudy_name", ")", ",", "pipeline_getter", ")", "(", "prefix", "=", "substudy_name", "+", "'_'", ",", "input_map", "=", "substudy_spec", ".", "name_map", ",", "output_map", "=", "substudy_spec", ".", "name_map", ",", "study", "=", "self", ",", "name_maps", "=", "name_maps", ")", "# Add reduce method to allow it to be pickled", "translated_getter", ".", "auto_added", "=", "auto_added", "return", "translated_getter" ]
A method for translating pipeline constructors from a sub-study to the namespace of a multi-study. Returns a new method that calls the sub-study pipeline constructor with appropriate keyword arguments Parameters ---------- substudy_name : str Name of the sub-study pipeline_getter : str Name of method used to construct the pipeline in the sub-study auto_added : bool Signify that a method was automatically added by the MultiStudyMetaClass. Used in checks when pickling Study objects
[ "A", "method", "for", "translating", "pipeline", "constructors", "from", "a", "sub", "-", "study", "to", "the", "namespace", "of", "a", "multi", "-", "study", ".", "Returns", "a", "new", "method", "that", "calls", "the", "sub", "-", "study", "pipeline", "constructor", "with", "appropriate", "keyword", "arguments" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/multi.py#L169-L199
MonashBI/arcana
arcana/study/multi.py
SubStudySpec.auto_data_specs
def auto_data_specs(self): """ Data specs in the sub-study class that are not explicitly provided in the name map """ for spec in self.study_class.data_specs(): if spec.name not in self._name_map: yield spec
python
def auto_data_specs(self): """ Data specs in the sub-study class that are not explicitly provided in the name map """ for spec in self.study_class.data_specs(): if spec.name not in self._name_map: yield spec
[ "def", "auto_data_specs", "(", "self", ")", ":", "for", "spec", "in", "self", ".", "study_class", ".", "data_specs", "(", ")", ":", "if", "spec", ".", "name", "not", "in", "self", ".", "_name_map", ":", "yield", "spec" ]
Data specs in the sub-study class that are not explicitly provided in the name map
[ "Data", "specs", "in", "the", "sub", "-", "study", "class", "that", "are", "not", "explicitly", "provided", "in", "the", "name", "map" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/multi.py#L264-L271
MonashBI/arcana
arcana/study/multi.py
SubStudySpec.auto_param_specs
def auto_param_specs(self): """ Parameter pecs in the sub-study class that are not explicitly provided in the name map """ for spec in self.study_class.parameter_specs(): if spec.name not in self._name_map: yield spec
python
def auto_param_specs(self): """ Parameter pecs in the sub-study class that are not explicitly provided in the name map """ for spec in self.study_class.parameter_specs(): if spec.name not in self._name_map: yield spec
[ "def", "auto_param_specs", "(", "self", ")", ":", "for", "spec", "in", "self", ".", "study_class", ".", "parameter_specs", "(", ")", ":", "if", "spec", ".", "name", "not", "in", "self", ".", "_name_map", ":", "yield", "spec" ]
Parameter pecs in the sub-study class that are not explicitly provided in the name map
[ "Parameter", "pecs", "in", "the", "sub", "-", "study", "class", "that", "are", "not", "explicitly", "provided", "in", "the", "name", "map" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/study/multi.py#L274-L281
MonashBI/arcana
arcana/environment/base.py
MapNode._make_nodes
def _make_nodes(self, cwd=None): """ Cast generated nodes to be Arcana nodes """ for i, node in NipypeMapNode._make_nodes(self, cwd=cwd): # "Cast" NiPype node to a Arcana Node and set Arcana Node # parameters node.__class__ = self.node_cls node._environment = self._environment node._versions = self._versions node._wall_time = self._wall_time node._annotations = self._annotations yield i, node
python
def _make_nodes(self, cwd=None): """ Cast generated nodes to be Arcana nodes """ for i, node in NipypeMapNode._make_nodes(self, cwd=cwd): # "Cast" NiPype node to a Arcana Node and set Arcana Node # parameters node.__class__ = self.node_cls node._environment = self._environment node._versions = self._versions node._wall_time = self._wall_time node._annotations = self._annotations yield i, node
[ "def", "_make_nodes", "(", "self", ",", "cwd", "=", "None", ")", ":", "for", "i", ",", "node", "in", "NipypeMapNode", ".", "_make_nodes", "(", "self", ",", "cwd", "=", "cwd", ")", ":", "# \"Cast\" NiPype node to a Arcana Node and set Arcana Node", "# parameters", "node", ".", "__class__", "=", "self", ".", "node_cls", "node", ".", "_environment", "=", "self", ".", "_environment", "node", ".", "_versions", "=", "self", ".", "_versions", "node", ".", "_wall_time", "=", "self", ".", "_wall_time", "node", ".", "_annotations", "=", "self", ".", "_annotations", "yield", "i", ",", "node" ]
Cast generated nodes to be Arcana nodes
[ "Cast", "generated", "nodes", "to", "be", "Arcana", "nodes" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/base.py#L107-L119
tym-xqo/nerium
nerium/query.py
get_query
def get_query(query_name): """Find file matching query_name, read and return query object """ query_file_match = list( filter(lambda i: query_name == i.stem, FLAT_QUERIES)) if not query_file_match: return None # TODO: Log warning if more than one match query_file = query_file_match[0] with open(query_file) as f: metadata, query_body = frontmatter.parse(f.read()) result_mod = query_file.suffix.strip('.') query_obj = SimpleNamespace( name=query_name, metadata=metadata, path=query_file, result_mod=result_mod, body=query_body, error=False, executed=datetime.utcnow().isoformat()) return query_obj
python
def get_query(query_name): """Find file matching query_name, read and return query object """ query_file_match = list( filter(lambda i: query_name == i.stem, FLAT_QUERIES)) if not query_file_match: return None # TODO: Log warning if more than one match query_file = query_file_match[0] with open(query_file) as f: metadata, query_body = frontmatter.parse(f.read()) result_mod = query_file.suffix.strip('.') query_obj = SimpleNamespace( name=query_name, metadata=metadata, path=query_file, result_mod=result_mod, body=query_body, error=False, executed=datetime.utcnow().isoformat()) return query_obj
[ "def", "get_query", "(", "query_name", ")", ":", "query_file_match", "=", "list", "(", "filter", "(", "lambda", "i", ":", "query_name", "==", "i", ".", "stem", ",", "FLAT_QUERIES", ")", ")", "if", "not", "query_file_match", ":", "return", "None", "# TODO: Log warning if more than one match", "query_file", "=", "query_file_match", "[", "0", "]", "with", "open", "(", "query_file", ")", "as", "f", ":", "metadata", ",", "query_body", "=", "frontmatter", ".", "parse", "(", "f", ".", "read", "(", ")", ")", "result_mod", "=", "query_file", ".", "suffix", ".", "strip", "(", "'.'", ")", "query_obj", "=", "SimpleNamespace", "(", "name", "=", "query_name", ",", "metadata", "=", "metadata", ",", "path", "=", "query_file", ",", "result_mod", "=", "result_mod", ",", "body", "=", "query_body", ",", "error", "=", "False", ",", "executed", "=", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", ")", "return", "query_obj" ]
Find file matching query_name, read and return query object
[ "Find", "file", "matching", "query_name", "read", "and", "return", "query", "object" ]
train
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/query.py#L19-L39
tym-xqo/nerium
nerium/query.py
get_result_set
def get_result_set(query_name, **kwargs): """ Call get_query, then submit query from file to resultset module """ query = get_query(query_name) if not query: query = SimpleNamespace() query.error = f"No query found matching '{query_name}'" return query try: result_mod = import_module( f'nerium.contrib.resultset.{query.result_mod}') except ModuleNotFoundError: result_mod = import_module('nerium.resultset.sql') query.params = {**kwargs} query.body = process_template(sql=query.body, **query.params) result = result_mod.result(query, **query.params) # Dumping and reloading via json here gets us datetime and decimal # serialization handling courtesy of `tablib` query.result = json.loads(json.dumps(result, default=serialize_objects_handler)) try: if 'error' in query.result[0].keys(): query.error = query.result[0]['error'] except IndexError: pass return query
python
def get_result_set(query_name, **kwargs): """ Call get_query, then submit query from file to resultset module """ query = get_query(query_name) if not query: query = SimpleNamespace() query.error = f"No query found matching '{query_name}'" return query try: result_mod = import_module( f'nerium.contrib.resultset.{query.result_mod}') except ModuleNotFoundError: result_mod = import_module('nerium.resultset.sql') query.params = {**kwargs} query.body = process_template(sql=query.body, **query.params) result = result_mod.result(query, **query.params) # Dumping and reloading via json here gets us datetime and decimal # serialization handling courtesy of `tablib` query.result = json.loads(json.dumps(result, default=serialize_objects_handler)) try: if 'error' in query.result[0].keys(): query.error = query.result[0]['error'] except IndexError: pass return query
[ "def", "get_result_set", "(", "query_name", ",", "*", "*", "kwargs", ")", ":", "query", "=", "get_query", "(", "query_name", ")", "if", "not", "query", ":", "query", "=", "SimpleNamespace", "(", ")", "query", ".", "error", "=", "f\"No query found matching '{query_name}'\"", "return", "query", "try", ":", "result_mod", "=", "import_module", "(", "f'nerium.contrib.resultset.{query.result_mod}'", ")", "except", "ModuleNotFoundError", ":", "result_mod", "=", "import_module", "(", "'nerium.resultset.sql'", ")", "query", ".", "params", "=", "{", "*", "*", "kwargs", "}", "query", ".", "body", "=", "process_template", "(", "sql", "=", "query", ".", "body", ",", "*", "*", "query", ".", "params", ")", "result", "=", "result_mod", ".", "result", "(", "query", ",", "*", "*", "query", ".", "params", ")", "# Dumping and reloading via json here gets us datetime and decimal", "# serialization handling courtesy of `tablib`", "query", ".", "result", "=", "json", ".", "loads", "(", "json", ".", "dumps", "(", "result", ",", "default", "=", "serialize_objects_handler", ")", ")", "try", ":", "if", "'error'", "in", "query", ".", "result", "[", "0", "]", ".", "keys", "(", ")", ":", "query", ".", "error", "=", "query", ".", "result", "[", "0", "]", "[", "'error'", "]", "except", "IndexError", ":", "pass", "return", "query" ]
Call get_query, then submit query from file to resultset module
[ "Call", "get_query", "then", "submit", "query", "from", "file", "to", "resultset", "module" ]
train
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/query.py#L49-L73
tym-xqo/nerium
nerium/query.py
results_to_csv
def results_to_csv(query_name, **kwargs): """ Generate CSV from result data """ query = get_result_set(query_name, **kwargs) result = query.result columns = list(result[0].keys()) data = [tuple(row.values()) for row in result] frame = tablib.Dataset() frame.headers = columns for row in data: frame.append(row) csvs = frame.export('csv') return csvs
python
def results_to_csv(query_name, **kwargs): """ Generate CSV from result data """ query = get_result_set(query_name, **kwargs) result = query.result columns = list(result[0].keys()) data = [tuple(row.values()) for row in result] frame = tablib.Dataset() frame.headers = columns for row in data: frame.append(row) csvs = frame.export('csv') return csvs
[ "def", "results_to_csv", "(", "query_name", ",", "*", "*", "kwargs", ")", ":", "query", "=", "get_result_set", "(", "query_name", ",", "*", "*", "kwargs", ")", "result", "=", "query", ".", "result", "columns", "=", "list", "(", "result", "[", "0", "]", ".", "keys", "(", ")", ")", "data", "=", "[", "tuple", "(", "row", ".", "values", "(", ")", ")", "for", "row", "in", "result", "]", "frame", "=", "tablib", ".", "Dataset", "(", ")", "frame", ".", "headers", "=", "columns", "for", "row", "in", "data", ":", "frame", ".", "append", "(", "row", ")", "csvs", "=", "frame", ".", "export", "(", "'csv'", ")", "return", "csvs" ]
Generate CSV from result data
[ "Generate", "CSV", "from", "result", "data" ]
train
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/query.py#L76-L88
erikvw/django-collect-offline
django_collect_offline/site_offline_models.py
SiteOfflineModels.register
def register(self, models=None, wrapper_cls=None): """Registers with app_label.modelname, wrapper_cls. """ self.loaded = True for model in models: model = model.lower() if model not in self.registry: self.registry.update({model: wrapper_cls or self.wrapper_cls}) if self.register_historical: historical_model = ".historical".join(model.split(".")) self.registry.update( {historical_model: wrapper_cls or self.wrapper_cls} ) else: raise AlreadyRegistered(f"Model is already registered. Got {model}.")
python
def register(self, models=None, wrapper_cls=None): """Registers with app_label.modelname, wrapper_cls. """ self.loaded = True for model in models: model = model.lower() if model not in self.registry: self.registry.update({model: wrapper_cls or self.wrapper_cls}) if self.register_historical: historical_model = ".historical".join(model.split(".")) self.registry.update( {historical_model: wrapper_cls or self.wrapper_cls} ) else: raise AlreadyRegistered(f"Model is already registered. Got {model}.")
[ "def", "register", "(", "self", ",", "models", "=", "None", ",", "wrapper_cls", "=", "None", ")", ":", "self", ".", "loaded", "=", "True", "for", "model", "in", "models", ":", "model", "=", "model", ".", "lower", "(", ")", "if", "model", "not", "in", "self", ".", "registry", ":", "self", ".", "registry", ".", "update", "(", "{", "model", ":", "wrapper_cls", "or", "self", ".", "wrapper_cls", "}", ")", "if", "self", ".", "register_historical", ":", "historical_model", "=", "\".historical\"", ".", "join", "(", "model", ".", "split", "(", "\".\"", ")", ")", "self", ".", "registry", ".", "update", "(", "{", "historical_model", ":", "wrapper_cls", "or", "self", ".", "wrapper_cls", "}", ")", "else", ":", "raise", "AlreadyRegistered", "(", "f\"Model is already registered. Got {model}.\"", ")" ]
Registers with app_label.modelname, wrapper_cls.
[ "Registers", "with", "app_label", ".", "modelname", "wrapper_cls", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L28-L42
erikvw/django-collect-offline
django_collect_offline/site_offline_models.py
SiteOfflineModels.register_for_app
def register_for_app( self, app_label=None, exclude_models=None, exclude_model_classes=None ): """Registers all models for this app_label. """ models = [] exclude_models = exclude_models or [] app_config = django_apps.get_app_config(app_label) for model in app_config.get_models(): if model._meta.label_lower in exclude_models: pass elif exclude_model_classes and issubclass(model, exclude_model_classes): pass else: models.append(model._meta.label_lower) self.register(models)
python
def register_for_app( self, app_label=None, exclude_models=None, exclude_model_classes=None ): """Registers all models for this app_label. """ models = [] exclude_models = exclude_models or [] app_config = django_apps.get_app_config(app_label) for model in app_config.get_models(): if model._meta.label_lower in exclude_models: pass elif exclude_model_classes and issubclass(model, exclude_model_classes): pass else: models.append(model._meta.label_lower) self.register(models)
[ "def", "register_for_app", "(", "self", ",", "app_label", "=", "None", ",", "exclude_models", "=", "None", ",", "exclude_model_classes", "=", "None", ")", ":", "models", "=", "[", "]", "exclude_models", "=", "exclude_models", "or", "[", "]", "app_config", "=", "django_apps", ".", "get_app_config", "(", "app_label", ")", "for", "model", "in", "app_config", ".", "get_models", "(", ")", ":", "if", "model", ".", "_meta", ".", "label_lower", "in", "exclude_models", ":", "pass", "elif", "exclude_model_classes", "and", "issubclass", "(", "model", ",", "exclude_model_classes", ")", ":", "pass", "else", ":", "models", ".", "append", "(", "model", ".", "_meta", ".", "label_lower", ")", "self", ".", "register", "(", "models", ")" ]
Registers all models for this app_label.
[ "Registers", "all", "models", "for", "this", "app_label", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L44-L59
erikvw/django-collect-offline
django_collect_offline/site_offline_models.py
SiteOfflineModels.get_wrapped_instance
def get_wrapped_instance(self, instance=None): """Returns a wrapped model instance. """ if instance._meta.label_lower not in self.registry: raise ModelNotRegistered(f"{repr(instance)} is not registered with {self}.") wrapper_cls = self.registry.get(instance._meta.label_lower) or self.wrapper_cls if wrapper_cls: return wrapper_cls(instance) return instance
python
def get_wrapped_instance(self, instance=None): """Returns a wrapped model instance. """ if instance._meta.label_lower not in self.registry: raise ModelNotRegistered(f"{repr(instance)} is not registered with {self}.") wrapper_cls = self.registry.get(instance._meta.label_lower) or self.wrapper_cls if wrapper_cls: return wrapper_cls(instance) return instance
[ "def", "get_wrapped_instance", "(", "self", ",", "instance", "=", "None", ")", ":", "if", "instance", ".", "_meta", ".", "label_lower", "not", "in", "self", ".", "registry", ":", "raise", "ModelNotRegistered", "(", "f\"{repr(instance)} is not registered with {self}.\"", ")", "wrapper_cls", "=", "self", ".", "registry", ".", "get", "(", "instance", ".", "_meta", ".", "label_lower", ")", "or", "self", ".", "wrapper_cls", "if", "wrapper_cls", ":", "return", "wrapper_cls", "(", "instance", ")", "return", "instance" ]
Returns a wrapped model instance.
[ "Returns", "a", "wrapped", "model", "instance", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L61-L69
erikvw/django-collect-offline
django_collect_offline/site_offline_models.py
SiteOfflineModels.site_models
def site_models(self, app_label=None): """Returns a dictionary of registered models. """ site_models = {} app_configs = ( django_apps.get_app_configs() if app_label is None else [django_apps.get_app_config(app_label)] ) for app_config in app_configs: model_list = [ model for model in app_config.get_models() if model._meta.label_lower in self.registry ] if model_list: model_list.sort(key=lambda m: m._meta.verbose_name) site_models.update({app_config.name: model_list}) return site_models
python
def site_models(self, app_label=None): """Returns a dictionary of registered models. """ site_models = {} app_configs = ( django_apps.get_app_configs() if app_label is None else [django_apps.get_app_config(app_label)] ) for app_config in app_configs: model_list = [ model for model in app_config.get_models() if model._meta.label_lower in self.registry ] if model_list: model_list.sort(key=lambda m: m._meta.verbose_name) site_models.update({app_config.name: model_list}) return site_models
[ "def", "site_models", "(", "self", ",", "app_label", "=", "None", ")", ":", "site_models", "=", "{", "}", "app_configs", "=", "(", "django_apps", ".", "get_app_configs", "(", ")", "if", "app_label", "is", "None", "else", "[", "django_apps", ".", "get_app_config", "(", "app_label", ")", "]", ")", "for", "app_config", "in", "app_configs", ":", "model_list", "=", "[", "model", "for", "model", "in", "app_config", ".", "get_models", "(", ")", "if", "model", ".", "_meta", ".", "label_lower", "in", "self", ".", "registry", "]", "if", "model_list", ":", "model_list", ".", "sort", "(", "key", "=", "lambda", "m", ":", "m", ".", "_meta", ".", "verbose_name", ")", "site_models", ".", "update", "(", "{", "app_config", ".", "name", ":", "model_list", "}", ")", "return", "site_models" ]
Returns a dictionary of registered models.
[ "Returns", "a", "dictionary", "of", "registered", "models", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/site_offline_models.py#L71-L89
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.get_fileset
def get_fileset(self, fileset): """ Set the path of the fileset from the repository """ # Don't need to cache fileset as it is already local as long # as the path is set if fileset._path is None: primary_path = self.fileset_path(fileset) aux_files = fileset.format.default_aux_file_paths(primary_path) if not op.exists(primary_path): raise ArcanaMissingDataException( "{} does not exist in {}" .format(fileset, self)) for aux_name, aux_path in aux_files.items(): if not op.exists(aux_path): raise ArcanaMissingDataException( "{} is missing '{}' side car in {}" .format(fileset, aux_name, self)) else: primary_path = fileset.path aux_files = fileset.aux_files return primary_path, aux_files
python
def get_fileset(self, fileset): """ Set the path of the fileset from the repository """ # Don't need to cache fileset as it is already local as long # as the path is set if fileset._path is None: primary_path = self.fileset_path(fileset) aux_files = fileset.format.default_aux_file_paths(primary_path) if not op.exists(primary_path): raise ArcanaMissingDataException( "{} does not exist in {}" .format(fileset, self)) for aux_name, aux_path in aux_files.items(): if not op.exists(aux_path): raise ArcanaMissingDataException( "{} is missing '{}' side car in {}" .format(fileset, aux_name, self)) else: primary_path = fileset.path aux_files = fileset.aux_files return primary_path, aux_files
[ "def", "get_fileset", "(", "self", ",", "fileset", ")", ":", "# Don't need to cache fileset as it is already local as long", "# as the path is set", "if", "fileset", ".", "_path", "is", "None", ":", "primary_path", "=", "self", ".", "fileset_path", "(", "fileset", ")", "aux_files", "=", "fileset", ".", "format", ".", "default_aux_file_paths", "(", "primary_path", ")", "if", "not", "op", ".", "exists", "(", "primary_path", ")", ":", "raise", "ArcanaMissingDataException", "(", "\"{} does not exist in {}\"", ".", "format", "(", "fileset", ",", "self", ")", ")", "for", "aux_name", ",", "aux_path", "in", "aux_files", ".", "items", "(", ")", ":", "if", "not", "op", ".", "exists", "(", "aux_path", ")", ":", "raise", "ArcanaMissingDataException", "(", "\"{} is missing '{}' side car in {}\"", ".", "format", "(", "fileset", ",", "aux_name", ",", "self", ")", ")", "else", ":", "primary_path", "=", "fileset", ".", "path", "aux_files", "=", "fileset", ".", "aux_files", "return", "primary_path", ",", "aux_files" ]
Set the path of the fileset from the repository
[ "Set", "the", "path", "of", "the", "fileset", "from", "the", "repository" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L94-L115
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.get_field
def get_field(self, field): """ Update the value of the field from the repository """ # Load fields JSON, locking to prevent read/write conflicts # Would be better if only checked if locked to allow # concurrent reads but not possible with multi-process # locks (in my understanding at least). fpath = self.fields_json_path(field) try: with InterProcessLock(fpath + self.LOCK_SUFFIX, logger=logger), open(fpath, 'r') as f: dct = json.load(f) val = dct[field.name] if field.array: val = [field.dtype(v) for v in val] else: val = field.dtype(val) except (KeyError, IOError) as e: try: # Check to see if the IOError wasn't just because of a # missing file if e.errno != errno.ENOENT: raise except AttributeError: pass raise ArcanaMissingDataException( "{} does not exist in the local repository {}" .format(field.name, self)) return val
python
def get_field(self, field): """ Update the value of the field from the repository """ # Load fields JSON, locking to prevent read/write conflicts # Would be better if only checked if locked to allow # concurrent reads but not possible with multi-process # locks (in my understanding at least). fpath = self.fields_json_path(field) try: with InterProcessLock(fpath + self.LOCK_SUFFIX, logger=logger), open(fpath, 'r') as f: dct = json.load(f) val = dct[field.name] if field.array: val = [field.dtype(v) for v in val] else: val = field.dtype(val) except (KeyError, IOError) as e: try: # Check to see if the IOError wasn't just because of a # missing file if e.errno != errno.ENOENT: raise except AttributeError: pass raise ArcanaMissingDataException( "{} does not exist in the local repository {}" .format(field.name, self)) return val
[ "def", "get_field", "(", "self", ",", "field", ")", ":", "# Load fields JSON, locking to prevent read/write conflicts", "# Would be better if only checked if locked to allow", "# concurrent reads but not possible with multi-process", "# locks (in my understanding at least).", "fpath", "=", "self", ".", "fields_json_path", "(", "field", ")", "try", ":", "with", "InterProcessLock", "(", "fpath", "+", "self", ".", "LOCK_SUFFIX", ",", "logger", "=", "logger", ")", ",", "open", "(", "fpath", ",", "'r'", ")", "as", "f", ":", "dct", "=", "json", ".", "load", "(", "f", ")", "val", "=", "dct", "[", "field", ".", "name", "]", "if", "field", ".", "array", ":", "val", "=", "[", "field", ".", "dtype", "(", "v", ")", "for", "v", "in", "val", "]", "else", ":", "val", "=", "field", ".", "dtype", "(", "val", ")", "except", "(", "KeyError", ",", "IOError", ")", "as", "e", ":", "try", ":", "# Check to see if the IOError wasn't just because of a", "# missing file", "if", "e", ".", "errno", "!=", "errno", ".", "ENOENT", ":", "raise", "except", "AttributeError", ":", "pass", "raise", "ArcanaMissingDataException", "(", "\"{} does not exist in the local repository {}\"", ".", "format", "(", "field", ".", "name", ",", "self", ")", ")", "return", "val" ]
Update the value of the field from the repository
[ "Update", "the", "value", "of", "the", "field", "from", "the", "repository" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L117-L146
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.put_fileset
def put_fileset(self, fileset): """ Inserts or updates a fileset in the repository """ target_path = self.fileset_path(fileset) if op.isfile(fileset.path): shutil.copyfile(fileset.path, target_path) # Copy side car files into repository for aux_name, aux_path in fileset.format.default_aux_file_paths( target_path).items(): shutil.copyfile(self.aux_file[aux_name], aux_path) elif op.isdir(fileset.path): if op.exists(target_path): shutil.rmtree(target_path) shutil.copytree(fileset.path, target_path) else: assert False
python
def put_fileset(self, fileset): """ Inserts or updates a fileset in the repository """ target_path = self.fileset_path(fileset) if op.isfile(fileset.path): shutil.copyfile(fileset.path, target_path) # Copy side car files into repository for aux_name, aux_path in fileset.format.default_aux_file_paths( target_path).items(): shutil.copyfile(self.aux_file[aux_name], aux_path) elif op.isdir(fileset.path): if op.exists(target_path): shutil.rmtree(target_path) shutil.copytree(fileset.path, target_path) else: assert False
[ "def", "put_fileset", "(", "self", ",", "fileset", ")", ":", "target_path", "=", "self", ".", "fileset_path", "(", "fileset", ")", "if", "op", ".", "isfile", "(", "fileset", ".", "path", ")", ":", "shutil", ".", "copyfile", "(", "fileset", ".", "path", ",", "target_path", ")", "# Copy side car files into repository", "for", "aux_name", ",", "aux_path", "in", "fileset", ".", "format", ".", "default_aux_file_paths", "(", "target_path", ")", ".", "items", "(", ")", ":", "shutil", ".", "copyfile", "(", "self", ".", "aux_file", "[", "aux_name", "]", ",", "aux_path", ")", "elif", "op", ".", "isdir", "(", "fileset", ".", "path", ")", ":", "if", "op", ".", "exists", "(", "target_path", ")", ":", "shutil", ".", "rmtree", "(", "target_path", ")", "shutil", ".", "copytree", "(", "fileset", ".", "path", ",", "target_path", ")", "else", ":", "assert", "False" ]
Inserts or updates a fileset in the repository
[ "Inserts", "or", "updates", "a", "fileset", "in", "the", "repository" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L148-L164
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.put_field
def put_field(self, field): """ Inserts or updates a field in the repository """ fpath = self.fields_json_path(field) # Open fields JSON, locking to prevent other processes # reading or writing with InterProcessLock(fpath + self.LOCK_SUFFIX, logger=logger): try: with open(fpath, 'r') as f: dct = json.load(f) except IOError as e: if e.errno == errno.ENOENT: dct = {} else: raise if field.array: dct[field.name] = list(field.value) else: dct[field.name] = field.value with open(fpath, 'w') as f: json.dump(dct, f, indent=2)
python
def put_field(self, field): """ Inserts or updates a field in the repository """ fpath = self.fields_json_path(field) # Open fields JSON, locking to prevent other processes # reading or writing with InterProcessLock(fpath + self.LOCK_SUFFIX, logger=logger): try: with open(fpath, 'r') as f: dct = json.load(f) except IOError as e: if e.errno == errno.ENOENT: dct = {} else: raise if field.array: dct[field.name] = list(field.value) else: dct[field.name] = field.value with open(fpath, 'w') as f: json.dump(dct, f, indent=2)
[ "def", "put_field", "(", "self", ",", "field", ")", ":", "fpath", "=", "self", ".", "fields_json_path", "(", "field", ")", "# Open fields JSON, locking to prevent other processes", "# reading or writing", "with", "InterProcessLock", "(", "fpath", "+", "self", ".", "LOCK_SUFFIX", ",", "logger", "=", "logger", ")", ":", "try", ":", "with", "open", "(", "fpath", ",", "'r'", ")", "as", "f", ":", "dct", "=", "json", ".", "load", "(", "f", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "dct", "=", "{", "}", "else", ":", "raise", "if", "field", ".", "array", ":", "dct", "[", "field", ".", "name", "]", "=", "list", "(", "field", ".", "value", ")", "else", ":", "dct", "[", "field", ".", "name", "]", "=", "field", ".", "value", "with", "open", "(", "fpath", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "dct", ",", "f", ",", "indent", "=", "2", ")" ]
Inserts or updates a field in the repository
[ "Inserts", "or", "updates", "a", "field", "in", "the", "repository" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L166-L187
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.find_data
def find_data(self, subject_ids=None, visit_ids=None, **kwargs): """ Find all data within a repository, registering filesets, fields and provenance with the found_fileset, found_field and found_provenance methods, respectively Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned Returns ------- filesets : list[Fileset] All the filesets found in the repository fields : list[Field] All the fields found in the repository records : list[Record] The provenance records found in the repository """ all_filesets = [] all_fields = [] all_records = [] for session_path, dirs, files in os.walk(self.root_dir): relpath = op.relpath(session_path, self.root_dir) path_parts = relpath.split(op.sep) if relpath != '.' else [] ids = self._extract_ids_from_path(path_parts, dirs, files) if ids is None: continue subj_id, visit_id, from_study = ids # Check for summaries and filtered IDs if subj_id == self.SUMMARY_NAME: subj_id = None elif subject_ids is not None and subj_id not in subject_ids: continue if visit_id == self.SUMMARY_NAME: visit_id = None elif visit_ids is not None and visit_id not in visit_ids: continue # Map IDs into ID space of study subj_id = self.map_subject_id(subj_id) visit_id = self.map_visit_id(visit_id) # Determine frequency of session|summary if (subj_id, visit_id) == (None, None): frequency = 'per_study' elif subj_id is None: frequency = 'per_visit' elif visit_id is None: frequency = 'per_subject' else: frequency = 'per_session' filtered_files = self._filter_files(files, session_path) for fname in filtered_files: basename = split_extension(fname)[0] all_filesets.append( Fileset.from_path( op.join(session_path, fname), frequency=frequency, subject_id=subj_id, visit_id=visit_id, repository=self, from_study=from_study, potential_aux_files=[ f for f in filtered_files if (split_extension(f)[0] == basename and f != fname)], **kwargs)) for fname in self._filter_dirs(dirs, session_path): all_filesets.append( Fileset.from_path( op.join(session_path, fname), frequency=frequency, subject_id=subj_id, visit_id=visit_id, repository=self, from_study=from_study, **kwargs)) if self.FIELDS_FNAME in files: with open(op.join(session_path, self.FIELDS_FNAME), 'r') as f: dct = json.load(f) all_fields.extend( Field(name=k, value=v, frequency=frequency, subject_id=subj_id, visit_id=visit_id, repository=self, from_study=from_study, **kwargs) for k, v in list(dct.items())) if self.PROV_DIR in dirs: if from_study is None: raise ArcanaRepositoryError( "Found provenance directory in session directory (i.e." " not in study-specific sub-directory)") base_prov_dir = op.join(session_path, self.PROV_DIR) for fname in os.listdir(base_prov_dir): all_records.append(Record.load( split_extension(fname)[0], frequency, subj_id, visit_id, from_study, op.join(base_prov_dir, fname))) return all_filesets, all_fields, all_records
python
def find_data(self, subject_ids=None, visit_ids=None, **kwargs): """ Find all data within a repository, registering filesets, fields and provenance with the found_fileset, found_field and found_provenance methods, respectively Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned Returns ------- filesets : list[Fileset] All the filesets found in the repository fields : list[Field] All the fields found in the repository records : list[Record] The provenance records found in the repository """ all_filesets = [] all_fields = [] all_records = [] for session_path, dirs, files in os.walk(self.root_dir): relpath = op.relpath(session_path, self.root_dir) path_parts = relpath.split(op.sep) if relpath != '.' else [] ids = self._extract_ids_from_path(path_parts, dirs, files) if ids is None: continue subj_id, visit_id, from_study = ids # Check for summaries and filtered IDs if subj_id == self.SUMMARY_NAME: subj_id = None elif subject_ids is not None and subj_id not in subject_ids: continue if visit_id == self.SUMMARY_NAME: visit_id = None elif visit_ids is not None and visit_id not in visit_ids: continue # Map IDs into ID space of study subj_id = self.map_subject_id(subj_id) visit_id = self.map_visit_id(visit_id) # Determine frequency of session|summary if (subj_id, visit_id) == (None, None): frequency = 'per_study' elif subj_id is None: frequency = 'per_visit' elif visit_id is None: frequency = 'per_subject' else: frequency = 'per_session' filtered_files = self._filter_files(files, session_path) for fname in filtered_files: basename = split_extension(fname)[0] all_filesets.append( Fileset.from_path( op.join(session_path, fname), frequency=frequency, subject_id=subj_id, visit_id=visit_id, repository=self, from_study=from_study, potential_aux_files=[ f for f in filtered_files if (split_extension(f)[0] == basename and f != fname)], **kwargs)) for fname in self._filter_dirs(dirs, session_path): all_filesets.append( Fileset.from_path( op.join(session_path, fname), frequency=frequency, subject_id=subj_id, visit_id=visit_id, repository=self, from_study=from_study, **kwargs)) if self.FIELDS_FNAME in files: with open(op.join(session_path, self.FIELDS_FNAME), 'r') as f: dct = json.load(f) all_fields.extend( Field(name=k, value=v, frequency=frequency, subject_id=subj_id, visit_id=visit_id, repository=self, from_study=from_study, **kwargs) for k, v in list(dct.items())) if self.PROV_DIR in dirs: if from_study is None: raise ArcanaRepositoryError( "Found provenance directory in session directory (i.e." " not in study-specific sub-directory)") base_prov_dir = op.join(session_path, self.PROV_DIR) for fname in os.listdir(base_prov_dir): all_records.append(Record.load( split_extension(fname)[0], frequency, subj_id, visit_id, from_study, op.join(base_prov_dir, fname))) return all_filesets, all_fields, all_records
[ "def", "find_data", "(", "self", ",", "subject_ids", "=", "None", ",", "visit_ids", "=", "None", ",", "*", "*", "kwargs", ")", ":", "all_filesets", "=", "[", "]", "all_fields", "=", "[", "]", "all_records", "=", "[", "]", "for", "session_path", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "self", ".", "root_dir", ")", ":", "relpath", "=", "op", ".", "relpath", "(", "session_path", ",", "self", ".", "root_dir", ")", "path_parts", "=", "relpath", ".", "split", "(", "op", ".", "sep", ")", "if", "relpath", "!=", "'.'", "else", "[", "]", "ids", "=", "self", ".", "_extract_ids_from_path", "(", "path_parts", ",", "dirs", ",", "files", ")", "if", "ids", "is", "None", ":", "continue", "subj_id", ",", "visit_id", ",", "from_study", "=", "ids", "# Check for summaries and filtered IDs", "if", "subj_id", "==", "self", ".", "SUMMARY_NAME", ":", "subj_id", "=", "None", "elif", "subject_ids", "is", "not", "None", "and", "subj_id", "not", "in", "subject_ids", ":", "continue", "if", "visit_id", "==", "self", ".", "SUMMARY_NAME", ":", "visit_id", "=", "None", "elif", "visit_ids", "is", "not", "None", "and", "visit_id", "not", "in", "visit_ids", ":", "continue", "# Map IDs into ID space of study", "subj_id", "=", "self", ".", "map_subject_id", "(", "subj_id", ")", "visit_id", "=", "self", ".", "map_visit_id", "(", "visit_id", ")", "# Determine frequency of session|summary", "if", "(", "subj_id", ",", "visit_id", ")", "==", "(", "None", ",", "None", ")", ":", "frequency", "=", "'per_study'", "elif", "subj_id", "is", "None", ":", "frequency", "=", "'per_visit'", "elif", "visit_id", "is", "None", ":", "frequency", "=", "'per_subject'", "else", ":", "frequency", "=", "'per_session'", "filtered_files", "=", "self", ".", "_filter_files", "(", "files", ",", "session_path", ")", "for", "fname", "in", "filtered_files", ":", "basename", "=", "split_extension", "(", "fname", ")", "[", "0", "]", "all_filesets", ".", "append", "(", "Fileset", ".", "from_path", "(", "op", ".", "join", "(", "session_path", ",", "fname", ")", ",", "frequency", "=", "frequency", ",", "subject_id", "=", "subj_id", ",", "visit_id", "=", "visit_id", ",", "repository", "=", "self", ",", "from_study", "=", "from_study", ",", "potential_aux_files", "=", "[", "f", "for", "f", "in", "filtered_files", "if", "(", "split_extension", "(", "f", ")", "[", "0", "]", "==", "basename", "and", "f", "!=", "fname", ")", "]", ",", "*", "*", "kwargs", ")", ")", "for", "fname", "in", "self", ".", "_filter_dirs", "(", "dirs", ",", "session_path", ")", ":", "all_filesets", ".", "append", "(", "Fileset", ".", "from_path", "(", "op", ".", "join", "(", "session_path", ",", "fname", ")", ",", "frequency", "=", "frequency", ",", "subject_id", "=", "subj_id", ",", "visit_id", "=", "visit_id", ",", "repository", "=", "self", ",", "from_study", "=", "from_study", ",", "*", "*", "kwargs", ")", ")", "if", "self", ".", "FIELDS_FNAME", "in", "files", ":", "with", "open", "(", "op", ".", "join", "(", "session_path", ",", "self", ".", "FIELDS_FNAME", ")", ",", "'r'", ")", "as", "f", ":", "dct", "=", "json", ".", "load", "(", "f", ")", "all_fields", ".", "extend", "(", "Field", "(", "name", "=", "k", ",", "value", "=", "v", ",", "frequency", "=", "frequency", ",", "subject_id", "=", "subj_id", ",", "visit_id", "=", "visit_id", ",", "repository", "=", "self", ",", "from_study", "=", "from_study", ",", "*", "*", "kwargs", ")", "for", "k", ",", "v", "in", "list", "(", "dct", ".", "items", "(", ")", ")", ")", "if", "self", ".", "PROV_DIR", "in", "dirs", ":", "if", "from_study", "is", "None", ":", "raise", "ArcanaRepositoryError", "(", "\"Found provenance directory in session directory (i.e.\"", "\" not in study-specific sub-directory)\"", ")", "base_prov_dir", "=", "op", ".", "join", "(", "session_path", ",", "self", ".", "PROV_DIR", ")", "for", "fname", "in", "os", ".", "listdir", "(", "base_prov_dir", ")", ":", "all_records", ".", "append", "(", "Record", ".", "load", "(", "split_extension", "(", "fname", ")", "[", "0", "]", ",", "frequency", ",", "subj_id", ",", "visit_id", ",", "from_study", ",", "op", ".", "join", "(", "base_prov_dir", ",", "fname", ")", ")", ")", "return", "all_filesets", ",", "all_fields", ",", "all_records" ]
Find all data within a repository, registering filesets, fields and provenance with the found_fileset, found_field and found_provenance methods, respectively Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned Returns ------- filesets : list[Fileset] All the filesets found in the repository fields : list[Field] All the fields found in the repository records : list[Record] The provenance records found in the repository
[ "Find", "all", "data", "within", "a", "repository", "registering", "filesets", "fields", "and", "provenance", "with", "the", "found_fileset", "found_field", "and", "found_provenance", "methods", "respectively" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L195-L295
MonashBI/arcana
arcana/repository/basic.py
BasicRepo.guess_depth
def guess_depth(self, root_dir): """ Try to guess the depth of a directory repository (i.e. whether it has sub-folders for multiple subjects or visits, depending on where files and/or derived label files are found in the hierarchy of sub-directories under the root dir. Parameters ---------- root_dir : str Path to the root directory of the repository """ deepest = -1 for path, dirs, files in os.walk(root_dir): depth = self.path_depth(path) filtered_files = self._filter_files(files, path) if filtered_files: logger.info("Guessing depth of directory repository at '{}' is" " {} due to unfiltered files ('{}') in '{}'" .format(root_dir, depth, "', '".join(filtered_files), path)) return depth if self.PROV_DIR in dirs: depth_to_return = max(depth - 1, 0) logger.info("Guessing depth of directory repository at '{}' is" "{} due to \"Derived label file\" in '{}'" .format(root_dir, depth_to_return, path)) return depth_to_return if depth >= self.MAX_DEPTH: logger.info("Guessing depth of directory repository at '{}' is" " {} as '{}' is already at maximum depth" .format(root_dir, self.MAX_DEPTH, path)) return self.MAX_DEPTH try: for fpath in chain(filtered_files, self._filter_dirs(dirs, path)): Fileset.from_path(fpath) except ArcanaError: pass else: if depth > deepest: deepest = depth if deepest == -1: raise ArcanaRepositoryError( "Could not guess depth of '{}' repository as did not find " "a valid session directory within sub-directories." .format(root_dir)) return deepest
python
def guess_depth(self, root_dir): """ Try to guess the depth of a directory repository (i.e. whether it has sub-folders for multiple subjects or visits, depending on where files and/or derived label files are found in the hierarchy of sub-directories under the root dir. Parameters ---------- root_dir : str Path to the root directory of the repository """ deepest = -1 for path, dirs, files in os.walk(root_dir): depth = self.path_depth(path) filtered_files = self._filter_files(files, path) if filtered_files: logger.info("Guessing depth of directory repository at '{}' is" " {} due to unfiltered files ('{}') in '{}'" .format(root_dir, depth, "', '".join(filtered_files), path)) return depth if self.PROV_DIR in dirs: depth_to_return = max(depth - 1, 0) logger.info("Guessing depth of directory repository at '{}' is" "{} due to \"Derived label file\" in '{}'" .format(root_dir, depth_to_return, path)) return depth_to_return if depth >= self.MAX_DEPTH: logger.info("Guessing depth of directory repository at '{}' is" " {} as '{}' is already at maximum depth" .format(root_dir, self.MAX_DEPTH, path)) return self.MAX_DEPTH try: for fpath in chain(filtered_files, self._filter_dirs(dirs, path)): Fileset.from_path(fpath) except ArcanaError: pass else: if depth > deepest: deepest = depth if deepest == -1: raise ArcanaRepositoryError( "Could not guess depth of '{}' repository as did not find " "a valid session directory within sub-directories." .format(root_dir)) return deepest
[ "def", "guess_depth", "(", "self", ",", "root_dir", ")", ":", "deepest", "=", "-", "1", "for", "path", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "root_dir", ")", ":", "depth", "=", "self", ".", "path_depth", "(", "path", ")", "filtered_files", "=", "self", ".", "_filter_files", "(", "files", ",", "path", ")", "if", "filtered_files", ":", "logger", ".", "info", "(", "\"Guessing depth of directory repository at '{}' is\"", "\" {} due to unfiltered files ('{}') in '{}'\"", ".", "format", "(", "root_dir", ",", "depth", ",", "\"', '\"", ".", "join", "(", "filtered_files", ")", ",", "path", ")", ")", "return", "depth", "if", "self", ".", "PROV_DIR", "in", "dirs", ":", "depth_to_return", "=", "max", "(", "depth", "-", "1", ",", "0", ")", "logger", ".", "info", "(", "\"Guessing depth of directory repository at '{}' is\"", "\"{} due to \\\"Derived label file\\\" in '{}'\"", ".", "format", "(", "root_dir", ",", "depth_to_return", ",", "path", ")", ")", "return", "depth_to_return", "if", "depth", ">=", "self", ".", "MAX_DEPTH", ":", "logger", ".", "info", "(", "\"Guessing depth of directory repository at '{}' is\"", "\" {} as '{}' is already at maximum depth\"", ".", "format", "(", "root_dir", ",", "self", ".", "MAX_DEPTH", ",", "path", ")", ")", "return", "self", ".", "MAX_DEPTH", "try", ":", "for", "fpath", "in", "chain", "(", "filtered_files", ",", "self", ".", "_filter_dirs", "(", "dirs", ",", "path", ")", ")", ":", "Fileset", ".", "from_path", "(", "fpath", ")", "except", "ArcanaError", ":", "pass", "else", ":", "if", "depth", ">", "deepest", ":", "deepest", "=", "depth", "if", "deepest", "==", "-", "1", ":", "raise", "ArcanaRepositoryError", "(", "\"Could not guess depth of '{}' repository as did not find \"", "\"a valid session directory within sub-directories.\"", ".", "format", "(", "root_dir", ")", ")", "return", "deepest" ]
Try to guess the depth of a directory repository (i.e. whether it has sub-folders for multiple subjects or visits, depending on where files and/or derived label files are found in the hierarchy of sub-directories under the root dir. Parameters ---------- root_dir : str Path to the root directory of the repository
[ "Try", "to", "guess", "the", "depth", "of", "a", "directory", "repository", "(", "i", ".", "e", ".", "whether", "it", "has", "sub", "-", "folders", "for", "multiple", "subjects", "or", "visits", "depending", "on", "where", "files", "and", "/", "or", "derived", "label", "files", "are", "found", "in", "the", "hierarchy", "of", "sub", "-", "directories", "under", "the", "root", "dir", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/basic.py#L376-L423
tym-xqo/nerium
nerium/data_source.py
get_data_source
def get_data_source(query): """ Get data source connection metadata based on config. Prefer, in order: - Query file frontmatter - `db.yaml` file in query subdirectory - DATABASE_URL in environment To allow for possible non-SQL sources to be contributed, we only return a config value here, and leave connecting to concrete resultset module, as required values and connection method might change. (This is also why we return a dict, despite having a single value in the SQL case.) """ # *** GET CONNECTION PARAMS: *** # from frontmatter try: return query.metadata['data_source'] except KeyError: try: return dict(url=query.metadata['database_url']) except KeyError: pass # from file in directory if present db_file = query.path.parent / 'db.yaml' if db_file.is_file(): with open(db_file, 'r') as dbf: db_meta = yaml.safe_load(dbf.read()) try: return db_meta['data_source'] except KeyError: try: return dict(url=db_meta['database_url']) except KeyError: pass # Use env['DATABASE_URL']/sqlite if nothing else is configured return dict( url=os.getenv('DATABASE_URL', 'sqlite:///'))
python
def get_data_source(query): """ Get data source connection metadata based on config. Prefer, in order: - Query file frontmatter - `db.yaml` file in query subdirectory - DATABASE_URL in environment To allow for possible non-SQL sources to be contributed, we only return a config value here, and leave connecting to concrete resultset module, as required values and connection method might change. (This is also why we return a dict, despite having a single value in the SQL case.) """ # *** GET CONNECTION PARAMS: *** # from frontmatter try: return query.metadata['data_source'] except KeyError: try: return dict(url=query.metadata['database_url']) except KeyError: pass # from file in directory if present db_file = query.path.parent / 'db.yaml' if db_file.is_file(): with open(db_file, 'r') as dbf: db_meta = yaml.safe_load(dbf.read()) try: return db_meta['data_source'] except KeyError: try: return dict(url=db_meta['database_url']) except KeyError: pass # Use env['DATABASE_URL']/sqlite if nothing else is configured return dict( url=os.getenv('DATABASE_URL', 'sqlite:///'))
[ "def", "get_data_source", "(", "query", ")", ":", "# *** GET CONNECTION PARAMS: ***", "# from frontmatter", "try", ":", "return", "query", ".", "metadata", "[", "'data_source'", "]", "except", "KeyError", ":", "try", ":", "return", "dict", "(", "url", "=", "query", ".", "metadata", "[", "'database_url'", "]", ")", "except", "KeyError", ":", "pass", "# from file in directory if present", "db_file", "=", "query", ".", "path", ".", "parent", "/", "'db.yaml'", "if", "db_file", ".", "is_file", "(", ")", ":", "with", "open", "(", "db_file", ",", "'r'", ")", "as", "dbf", ":", "db_meta", "=", "yaml", ".", "safe_load", "(", "dbf", ".", "read", "(", ")", ")", "try", ":", "return", "db_meta", "[", "'data_source'", "]", "except", "KeyError", ":", "try", ":", "return", "dict", "(", "url", "=", "db_meta", "[", "'database_url'", "]", ")", "except", "KeyError", ":", "pass", "# Use env['DATABASE_URL']/sqlite if nothing else is configured", "return", "dict", "(", "url", "=", "os", ".", "getenv", "(", "'DATABASE_URL'", ",", "'sqlite:///'", ")", ")" ]
Get data source connection metadata based on config. Prefer, in order: - Query file frontmatter - `db.yaml` file in query subdirectory - DATABASE_URL in environment To allow for possible non-SQL sources to be contributed, we only return a config value here, and leave connecting to concrete resultset module, as required values and connection method might change. (This is also why we return a dict, despite having a single value in the SQL case.)
[ "Get", "data", "source", "connection", "metadata", "based", "on", "config", ".", "Prefer", "in", "order", ":", "-", "Query", "file", "frontmatter", "-", "db", ".", "yaml", "file", "in", "query", "subdirectory", "-", "DATABASE_URL", "in", "environment" ]
train
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/data_source.py#L6-L43
MonashBI/arcana
arcana/data/collection.py
BaseCollection.item
def item(self, subject_id=None, visit_id=None): """ Returns a particular fileset|field in the collection corresponding to the given subject and visit_ids. subject_id and visit_id must be provided for relevant frequencies. Note that subject_id/visit_id can also be provided for non-relevant frequencies, they will just be ignored. Parameter --------- subject_id : str The subject id of the item to return visit_id : str The visit id of the item to return """ if self.frequency == 'per_session': if subject_id is None or visit_id is None: raise ArcanaError( "The 'subject_id' ({}) and 'visit_id' ({}) must be " "provided to get an item from {}".format( subject_id, visit_id, self)) try: subj_dct = self._collection[subject_id] except KeyError: raise ArcanaIndexError( subject_id, "{} not a subject ID in '{}' collection ({})" .format(subject_id, self.name, ', '.join(self._collection.keys()))) try: fileset = subj_dct[visit_id] except KeyError: raise ArcanaIndexError( visit_id, "{} not a visit ID in subject {} of '{}' " "collection ({})" .format(visit_id, subject_id, self.name, ', '.join(subj_dct.keys()))) elif self.frequency == 'per_subject': if subject_id is None: raise ArcanaError( "The 'subject_id' arg must be provided to get " "the match from {}" .format(self)) try: fileset = self._collection[subject_id] except KeyError: raise ArcanaIndexError( subject_id, "{} not a subject ID in '{}' collection ({})" .format(subject_id, self.name, ', '.join(self._collection.keys()))) elif self.frequency == 'per_visit': if visit_id is None: raise ArcanaError( "The 'visit_id' arg must be provided to get " "the match from {}" .format(self)) try: fileset = self._collection[visit_id] except KeyError: raise ArcanaIndexError( visit_id, "{} not a visit ID in '{}' collection ({})" .format(visit_id, self.name, ', '.join(self._collection.keys()))) elif self.frequency == 'per_study': try: fileset = self._collection[0] except IndexError: raise ArcanaIndexError( "'{}' Collection is empty so doesn't have a " "per_study node".format(self.name)) return fileset
python
def item(self, subject_id=None, visit_id=None): """ Returns a particular fileset|field in the collection corresponding to the given subject and visit_ids. subject_id and visit_id must be provided for relevant frequencies. Note that subject_id/visit_id can also be provided for non-relevant frequencies, they will just be ignored. Parameter --------- subject_id : str The subject id of the item to return visit_id : str The visit id of the item to return """ if self.frequency == 'per_session': if subject_id is None or visit_id is None: raise ArcanaError( "The 'subject_id' ({}) and 'visit_id' ({}) must be " "provided to get an item from {}".format( subject_id, visit_id, self)) try: subj_dct = self._collection[subject_id] except KeyError: raise ArcanaIndexError( subject_id, "{} not a subject ID in '{}' collection ({})" .format(subject_id, self.name, ', '.join(self._collection.keys()))) try: fileset = subj_dct[visit_id] except KeyError: raise ArcanaIndexError( visit_id, "{} not a visit ID in subject {} of '{}' " "collection ({})" .format(visit_id, subject_id, self.name, ', '.join(subj_dct.keys()))) elif self.frequency == 'per_subject': if subject_id is None: raise ArcanaError( "The 'subject_id' arg must be provided to get " "the match from {}" .format(self)) try: fileset = self._collection[subject_id] except KeyError: raise ArcanaIndexError( subject_id, "{} not a subject ID in '{}' collection ({})" .format(subject_id, self.name, ', '.join(self._collection.keys()))) elif self.frequency == 'per_visit': if visit_id is None: raise ArcanaError( "The 'visit_id' arg must be provided to get " "the match from {}" .format(self)) try: fileset = self._collection[visit_id] except KeyError: raise ArcanaIndexError( visit_id, "{} not a visit ID in '{}' collection ({})" .format(visit_id, self.name, ', '.join(self._collection.keys()))) elif self.frequency == 'per_study': try: fileset = self._collection[0] except IndexError: raise ArcanaIndexError( "'{}' Collection is empty so doesn't have a " "per_study node".format(self.name)) return fileset
[ "def", "item", "(", "self", ",", "subject_id", "=", "None", ",", "visit_id", "=", "None", ")", ":", "if", "self", ".", "frequency", "==", "'per_session'", ":", "if", "subject_id", "is", "None", "or", "visit_id", "is", "None", ":", "raise", "ArcanaError", "(", "\"The 'subject_id' ({}) and 'visit_id' ({}) must be \"", "\"provided to get an item from {}\"", ".", "format", "(", "subject_id", ",", "visit_id", ",", "self", ")", ")", "try", ":", "subj_dct", "=", "self", ".", "_collection", "[", "subject_id", "]", "except", "KeyError", ":", "raise", "ArcanaIndexError", "(", "subject_id", ",", "\"{} not a subject ID in '{}' collection ({})\"", ".", "format", "(", "subject_id", ",", "self", ".", "name", ",", "', '", ".", "join", "(", "self", ".", "_collection", ".", "keys", "(", ")", ")", ")", ")", "try", ":", "fileset", "=", "subj_dct", "[", "visit_id", "]", "except", "KeyError", ":", "raise", "ArcanaIndexError", "(", "visit_id", ",", "\"{} not a visit ID in subject {} of '{}' \"", "\"collection ({})\"", ".", "format", "(", "visit_id", ",", "subject_id", ",", "self", ".", "name", ",", "', '", ".", "join", "(", "subj_dct", ".", "keys", "(", ")", ")", ")", ")", "elif", "self", ".", "frequency", "==", "'per_subject'", ":", "if", "subject_id", "is", "None", ":", "raise", "ArcanaError", "(", "\"The 'subject_id' arg must be provided to get \"", "\"the match from {}\"", ".", "format", "(", "self", ")", ")", "try", ":", "fileset", "=", "self", ".", "_collection", "[", "subject_id", "]", "except", "KeyError", ":", "raise", "ArcanaIndexError", "(", "subject_id", ",", "\"{} not a subject ID in '{}' collection ({})\"", ".", "format", "(", "subject_id", ",", "self", ".", "name", ",", "', '", ".", "join", "(", "self", ".", "_collection", ".", "keys", "(", ")", ")", ")", ")", "elif", "self", ".", "frequency", "==", "'per_visit'", ":", "if", "visit_id", "is", "None", ":", "raise", "ArcanaError", "(", "\"The 'visit_id' arg must be provided to get \"", "\"the match from {}\"", ".", "format", "(", "self", ")", ")", "try", ":", "fileset", "=", "self", ".", "_collection", "[", "visit_id", "]", "except", "KeyError", ":", "raise", "ArcanaIndexError", "(", "visit_id", ",", "\"{} not a visit ID in '{}' collection ({})\"", ".", "format", "(", "visit_id", ",", "self", ".", "name", ",", "', '", ".", "join", "(", "self", ".", "_collection", ".", "keys", "(", ")", ")", ")", ")", "elif", "self", ".", "frequency", "==", "'per_study'", ":", "try", ":", "fileset", "=", "self", ".", "_collection", "[", "0", "]", "except", "IndexError", ":", "raise", "ArcanaIndexError", "(", "\"'{}' Collection is empty so doesn't have a \"", "\"per_study node\"", ".", "format", "(", "self", ".", "name", ")", ")", "return", "fileset" ]
Returns a particular fileset|field in the collection corresponding to the given subject and visit_ids. subject_id and visit_id must be provided for relevant frequencies. Note that subject_id/visit_id can also be provided for non-relevant frequencies, they will just be ignored. Parameter --------- subject_id : str The subject id of the item to return visit_id : str The visit id of the item to return
[ "Returns", "a", "particular", "fileset|field", "in", "the", "collection", "corresponding", "to", "the", "given", "subject", "and", "visit_ids", ".", "subject_id", "and", "visit_id", "must", "be", "provided", "for", "relevant", "frequencies", ".", "Note", "that", "subject_id", "/", "visit_id", "can", "also", "be", "provided", "for", "non", "-", "relevant", "frequencies", "they", "will", "just", "be", "ignored", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/collection.py#L85-L159
MonashBI/arcana
arcana/data/collection.py
BaseCollection.bind
def bind(self, study, **kwargs): # @UnusedVariable """ Used for duck typing Collection objects with Spec and Match in source and sink initiation. Checks IDs match sessions in study. """ if self.frequency == 'per_subject': tree_subject_ids = list(study.tree.subject_ids) subject_ids = list(self._collection.keys()) if tree_subject_ids != subject_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(subject_ids), "', '".join(tree_subject_ids))) elif self.frequency == 'per_visit': tree_visit_ids = list(study.tree.visit_ids) visit_ids = list(self._collection.keys()) if tree_visit_ids != visit_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(visit_ids), "', '".join(tree_visit_ids))) elif self.frequency == 'per_session': for subject in study.tree.subjects: if subject.id not in self._collection: raise ArcanaUsageError( "Study subject ID '{}' was not found in colleciton " "provided to '{}' (found '{}')".format( subject.id, self.name, "', '".join(self._collection.keys()))) for session in subject.sessions: if session.visit_id not in self._collection[subject.id]: raise ArcanaUsageError( "Study visit ID '{}' for subject '{}' was not " "found in colleciton provided to '{}' (found '{}')" .format(subject.id, self.name, "', '".join( self._collection[subject.id].keys())))
python
def bind(self, study, **kwargs): # @UnusedVariable """ Used for duck typing Collection objects with Spec and Match in source and sink initiation. Checks IDs match sessions in study. """ if self.frequency == 'per_subject': tree_subject_ids = list(study.tree.subject_ids) subject_ids = list(self._collection.keys()) if tree_subject_ids != subject_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(subject_ids), "', '".join(tree_subject_ids))) elif self.frequency == 'per_visit': tree_visit_ids = list(study.tree.visit_ids) visit_ids = list(self._collection.keys()) if tree_visit_ids != visit_ids: raise ArcanaUsageError( "Subject IDs in collection provided to '{}' ('{}') " "do not match Study tree ('{}')".format( self.name, "', '".join(visit_ids), "', '".join(tree_visit_ids))) elif self.frequency == 'per_session': for subject in study.tree.subjects: if subject.id not in self._collection: raise ArcanaUsageError( "Study subject ID '{}' was not found in colleciton " "provided to '{}' (found '{}')".format( subject.id, self.name, "', '".join(self._collection.keys()))) for session in subject.sessions: if session.visit_id not in self._collection[subject.id]: raise ArcanaUsageError( "Study visit ID '{}' for subject '{}' was not " "found in colleciton provided to '{}' (found '{}')" .format(subject.id, self.name, "', '".join( self._collection[subject.id].keys())))
[ "def", "bind", "(", "self", ",", "study", ",", "*", "*", "kwargs", ")", ":", "# @UnusedVariable", "if", "self", ".", "frequency", "==", "'per_subject'", ":", "tree_subject_ids", "=", "list", "(", "study", ".", "tree", ".", "subject_ids", ")", "subject_ids", "=", "list", "(", "self", ".", "_collection", ".", "keys", "(", ")", ")", "if", "tree_subject_ids", "!=", "subject_ids", ":", "raise", "ArcanaUsageError", "(", "\"Subject IDs in collection provided to '{}' ('{}') \"", "\"do not match Study tree ('{}')\"", ".", "format", "(", "self", ".", "name", ",", "\"', '\"", ".", "join", "(", "subject_ids", ")", ",", "\"', '\"", ".", "join", "(", "tree_subject_ids", ")", ")", ")", "elif", "self", ".", "frequency", "==", "'per_visit'", ":", "tree_visit_ids", "=", "list", "(", "study", ".", "tree", ".", "visit_ids", ")", "visit_ids", "=", "list", "(", "self", ".", "_collection", ".", "keys", "(", ")", ")", "if", "tree_visit_ids", "!=", "visit_ids", ":", "raise", "ArcanaUsageError", "(", "\"Subject IDs in collection provided to '{}' ('{}') \"", "\"do not match Study tree ('{}')\"", ".", "format", "(", "self", ".", "name", ",", "\"', '\"", ".", "join", "(", "visit_ids", ")", ",", "\"', '\"", ".", "join", "(", "tree_visit_ids", ")", ")", ")", "elif", "self", ".", "frequency", "==", "'per_session'", ":", "for", "subject", "in", "study", ".", "tree", ".", "subjects", ":", "if", "subject", ".", "id", "not", "in", "self", ".", "_collection", ":", "raise", "ArcanaUsageError", "(", "\"Study subject ID '{}' was not found in colleciton \"", "\"provided to '{}' (found '{}')\"", ".", "format", "(", "subject", ".", "id", ",", "self", ".", "name", ",", "\"', '\"", ".", "join", "(", "self", ".", "_collection", ".", "keys", "(", ")", ")", ")", ")", "for", "session", "in", "subject", ".", "sessions", ":", "if", "session", ".", "visit_id", "not", "in", "self", ".", "_collection", "[", "subject", ".", "id", "]", ":", "raise", "ArcanaUsageError", "(", "\"Study visit ID '{}' for subject '{}' was not \"", "\"found in colleciton provided to '{}' (found '{}')\"", ".", "format", "(", "subject", ".", "id", ",", "self", ".", "name", ",", "\"', '\"", ".", "join", "(", "self", ".", "_collection", "[", "subject", ".", "id", "]", ".", "keys", "(", ")", ")", ")", ")" ]
Used for duck typing Collection objects with Spec and Match in source and sink initiation. Checks IDs match sessions in study.
[ "Used", "for", "duck", "typing", "Collection", "objects", "with", "Spec", "and", "Match", "in", "source", "and", "sink", "initiation", ".", "Checks", "IDs", "match", "sessions", "in", "study", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/collection.py#L167-L205
MonashBI/arcana
arcana/repository/tree.py
TreeNode.fileset
def fileset(self, name, from_study=None, format=None): # @ReservedAssignment @IgnorePep8 """ Gets the fileset named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | FilesetSpec The name of the fileset or a spec matching the given name from_study : str | None Name of the study that produced the fileset if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. format : FileFormat | str | None Either the format of the fileset to return or the name of the format. If None and only a single fileset is found for the given name and study then that is returned otherwise an exception is raised """ if isinstance(name, BaseFileset): if from_study is None and name.derived: from_study = name.study.name name = name.name try: format_dct = self._filesets[(name, from_study)] except KeyError: available = [ ('{}(format={})'.format(f.name, f._resource_name) if f._resource_name is not None else f.name) for f in self.filesets if f.from_study == from_study] other_studies = [ (f.from_study if f.from_study is not None else '<root>') for f in self.filesets if f.name == name] if other_studies: msg = (". NB: matching fileset(s) found for '{}' study(ies) " "('{}')".format(name, "', '".join(other_studies))) else: msg = '' raise ArcanaNameError( name, ("{} doesn't have a fileset named '{}'{} " "(available '{}'){}" .format(self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), "', '".join(available), msg))) else: if format is None: all_formats = list(format_dct.values()) if len(all_formats) > 1: raise ArcanaNameError( "Multiple filesets found for '{}'{} in {} with formats" " {}. Need to specify a format" .format(name, ("in '{}'".format(from_study) if from_study is not None else ''), self, "', '".join(format_dct.keys()))) fileset = all_formats[0] else: try: if isinstance(format, str): fileset = format_dct[format] else: try: fileset = format_dct[format.ext] except KeyError: fileset = None for rname, rfileset in format_dct.items(): if rname in format.resource_names( self.tree.repository.type): fileset = rfileset break if fileset is None: raise except KeyError: raise ArcanaNameError( format, ("{} doesn't have a fileset named '{}'{} with " "format '{}' (available '{}'){}" .format(self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), format, "', '".join(format_dct.keys()), msg))) return fileset
python
def fileset(self, name, from_study=None, format=None): # @ReservedAssignment @IgnorePep8 """ Gets the fileset named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | FilesetSpec The name of the fileset or a spec matching the given name from_study : str | None Name of the study that produced the fileset if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. format : FileFormat | str | None Either the format of the fileset to return or the name of the format. If None and only a single fileset is found for the given name and study then that is returned otherwise an exception is raised """ if isinstance(name, BaseFileset): if from_study is None and name.derived: from_study = name.study.name name = name.name try: format_dct = self._filesets[(name, from_study)] except KeyError: available = [ ('{}(format={})'.format(f.name, f._resource_name) if f._resource_name is not None else f.name) for f in self.filesets if f.from_study == from_study] other_studies = [ (f.from_study if f.from_study is not None else '<root>') for f in self.filesets if f.name == name] if other_studies: msg = (". NB: matching fileset(s) found for '{}' study(ies) " "('{}')".format(name, "', '".join(other_studies))) else: msg = '' raise ArcanaNameError( name, ("{} doesn't have a fileset named '{}'{} " "(available '{}'){}" .format(self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), "', '".join(available), msg))) else: if format is None: all_formats = list(format_dct.values()) if len(all_formats) > 1: raise ArcanaNameError( "Multiple filesets found for '{}'{} in {} with formats" " {}. Need to specify a format" .format(name, ("in '{}'".format(from_study) if from_study is not None else ''), self, "', '".join(format_dct.keys()))) fileset = all_formats[0] else: try: if isinstance(format, str): fileset = format_dct[format] else: try: fileset = format_dct[format.ext] except KeyError: fileset = None for rname, rfileset in format_dct.items(): if rname in format.resource_names( self.tree.repository.type): fileset = rfileset break if fileset is None: raise except KeyError: raise ArcanaNameError( format, ("{} doesn't have a fileset named '{}'{} with " "format '{}' (available '{}'){}" .format(self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), format, "', '".join(format_dct.keys()), msg))) return fileset
[ "def", "fileset", "(", "self", ",", "name", ",", "from_study", "=", "None", ",", "format", "=", "None", ")", ":", "# @ReservedAssignment @IgnorePep8", "if", "isinstance", "(", "name", ",", "BaseFileset", ")", ":", "if", "from_study", "is", "None", "and", "name", ".", "derived", ":", "from_study", "=", "name", ".", "study", ".", "name", "name", "=", "name", ".", "name", "try", ":", "format_dct", "=", "self", ".", "_filesets", "[", "(", "name", ",", "from_study", ")", "]", "except", "KeyError", ":", "available", "=", "[", "(", "'{}(format={})'", ".", "format", "(", "f", ".", "name", ",", "f", ".", "_resource_name", ")", "if", "f", ".", "_resource_name", "is", "not", "None", "else", "f", ".", "name", ")", "for", "f", "in", "self", ".", "filesets", "if", "f", ".", "from_study", "==", "from_study", "]", "other_studies", "=", "[", "(", "f", ".", "from_study", "if", "f", ".", "from_study", "is", "not", "None", "else", "'<root>'", ")", "for", "f", "in", "self", ".", "filesets", "if", "f", ".", "name", "==", "name", "]", "if", "other_studies", ":", "msg", "=", "(", "\". NB: matching fileset(s) found for '{}' study(ies) \"", "\"('{}')\"", ".", "format", "(", "name", ",", "\"', '\"", ".", "join", "(", "other_studies", ")", ")", ")", "else", ":", "msg", "=", "''", "raise", "ArcanaNameError", "(", "name", ",", "(", "\"{} doesn't have a fileset named '{}'{} \"", "\"(available '{}'){}\"", ".", "format", "(", "self", ",", "name", ",", "(", "\" from study '{}'\"", ".", "format", "(", "from_study", ")", "if", "from_study", "is", "not", "None", "else", "''", ")", ",", "\"', '\"", ".", "join", "(", "available", ")", ",", "msg", ")", ")", ")", "else", ":", "if", "format", "is", "None", ":", "all_formats", "=", "list", "(", "format_dct", ".", "values", "(", ")", ")", "if", "len", "(", "all_formats", ")", ">", "1", ":", "raise", "ArcanaNameError", "(", "\"Multiple filesets found for '{}'{} in {} with formats\"", "\" {}. Need to specify a format\"", ".", "format", "(", "name", ",", "(", "\"in '{}'\"", ".", "format", "(", "from_study", ")", "if", "from_study", "is", "not", "None", "else", "''", ")", ",", "self", ",", "\"', '\"", ".", "join", "(", "format_dct", ".", "keys", "(", ")", ")", ")", ")", "fileset", "=", "all_formats", "[", "0", "]", "else", ":", "try", ":", "if", "isinstance", "(", "format", ",", "str", ")", ":", "fileset", "=", "format_dct", "[", "format", "]", "else", ":", "try", ":", "fileset", "=", "format_dct", "[", "format", ".", "ext", "]", "except", "KeyError", ":", "fileset", "=", "None", "for", "rname", ",", "rfileset", "in", "format_dct", ".", "items", "(", ")", ":", "if", "rname", "in", "format", ".", "resource_names", "(", "self", ".", "tree", ".", "repository", ".", "type", ")", ":", "fileset", "=", "rfileset", "break", "if", "fileset", "is", "None", ":", "raise", "except", "KeyError", ":", "raise", "ArcanaNameError", "(", "format", ",", "(", "\"{} doesn't have a fileset named '{}'{} with \"", "\"format '{}' (available '{}'){}\"", ".", "format", "(", "self", ",", "name", ",", "(", "\" from study '{}'\"", ".", "format", "(", "from_study", ")", "if", "from_study", "is", "not", "None", "else", "''", ")", ",", "format", ",", "\"', '\"", ".", "join", "(", "format_dct", ".", "keys", "(", ")", ")", ",", "msg", ")", ")", ")", "return", "fileset" ]
Gets the fileset named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | FilesetSpec The name of the fileset or a spec matching the given name from_study : str | None Name of the study that produced the fileset if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. format : FileFormat | str | None Either the format of the fileset to return or the name of the format. If None and only a single fileset is found for the given name and study then that is returned otherwise an exception is raised
[ "Gets", "the", "fileset", "named", "name", "produced", "by", "the", "Study", "named", "study", "if", "provided", ".", "If", "a", "spec", "is", "passed", "instead", "of", "a", "str", "to", "the", "name", "argument", "then", "the", "study", "will", "be", "set", "from", "the", "spec", "iff", "it", "is", "derived" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L114-L199
MonashBI/arcana
arcana/repository/tree.py
TreeNode.field
def field(self, name, from_study=None): """ Gets the field named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | BaseField The name of the field or a spec matching the given name study : str | None Name of the study that produced the field if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. """ if isinstance(name, BaseField): if from_study is None and name.derived: from_study = name.study.name name = name.name try: return self._fields[(name, from_study)] except KeyError: available = [d.name for d in self.fields if d.from_study == from_study] other_studies = [(d.from_study if d.from_study is not None else '<root>') for d in self.fields if d.name == name] if other_studies: msg = (". NB: matching field(s) found for '{}' study(ies) " "('{}')".format(name, "', '".join(other_studies))) else: msg = '' raise ArcanaNameError( name, ("{} doesn't have a field named '{}'{} " "(available '{}')" .format( self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), "', '".join(available), msg)))
python
def field(self, name, from_study=None): """ Gets the field named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | BaseField The name of the field or a spec matching the given name study : str | None Name of the study that produced the field if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead. """ if isinstance(name, BaseField): if from_study is None and name.derived: from_study = name.study.name name = name.name try: return self._fields[(name, from_study)] except KeyError: available = [d.name for d in self.fields if d.from_study == from_study] other_studies = [(d.from_study if d.from_study is not None else '<root>') for d in self.fields if d.name == name] if other_studies: msg = (". NB: matching field(s) found for '{}' study(ies) " "('{}')".format(name, "', '".join(other_studies))) else: msg = '' raise ArcanaNameError( name, ("{} doesn't have a field named '{}'{} " "(available '{}')" .format( self, name, (" from study '{}'".format(from_study) if from_study is not None else ''), "', '".join(available), msg)))
[ "def", "field", "(", "self", ",", "name", ",", "from_study", "=", "None", ")", ":", "if", "isinstance", "(", "name", ",", "BaseField", ")", ":", "if", "from_study", "is", "None", "and", "name", ".", "derived", ":", "from_study", "=", "name", ".", "study", ".", "name", "name", "=", "name", ".", "name", "try", ":", "return", "self", ".", "_fields", "[", "(", "name", ",", "from_study", ")", "]", "except", "KeyError", ":", "available", "=", "[", "d", ".", "name", "for", "d", "in", "self", ".", "fields", "if", "d", ".", "from_study", "==", "from_study", "]", "other_studies", "=", "[", "(", "d", ".", "from_study", "if", "d", ".", "from_study", "is", "not", "None", "else", "'<root>'", ")", "for", "d", "in", "self", ".", "fields", "if", "d", ".", "name", "==", "name", "]", "if", "other_studies", ":", "msg", "=", "(", "\". NB: matching field(s) found for '{}' study(ies) \"", "\"('{}')\"", ".", "format", "(", "name", ",", "\"', '\"", ".", "join", "(", "other_studies", ")", ")", ")", "else", ":", "msg", "=", "''", "raise", "ArcanaNameError", "(", "name", ",", "(", "\"{} doesn't have a field named '{}'{} \"", "\"(available '{}')\"", ".", "format", "(", "self", ",", "name", ",", "(", "\" from study '{}'\"", ".", "format", "(", "from_study", ")", "if", "from_study", "is", "not", "None", "else", "''", ")", ",", "\"', '\"", ".", "join", "(", "available", ")", ",", "msg", ")", ")", ")" ]
Gets the field named 'name' produced by the Study named 'study' if provided. If a spec is passed instead of a str to the name argument, then the study will be set from the spec iff it is derived Parameters ---------- name : str | BaseField The name of the field or a spec matching the given name study : str | None Name of the study that produced the field if derived. If None and a spec is passed instaed of string to the name argument then the study name will be taken from the spec instead.
[ "Gets", "the", "field", "named", "name", "produced", "by", "the", "Study", "named", "study", "if", "provided", ".", "If", "a", "spec", "is", "passed", "instead", "of", "a", "str", "to", "the", "name", "argument", "then", "the", "study", "will", "be", "set", "from", "the", "spec", "iff", "it", "is", "derived" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L201-L241
MonashBI/arcana
arcana/repository/tree.py
TreeNode.record
def record(self, pipeline_name, from_study): """ Returns the provenance record for a given pipeline Parameters ---------- pipeline_name : str The name of the pipeline that generated the record from_study : str The name of the study that the pipeline was generated from Returns ------- record : arcana.provenance.Record The provenance record generated by the specified pipeline """ try: return self._records[(pipeline_name, from_study)] except KeyError: found = [] for sname, pnames in groupby(sorted(self._records, key=itemgetter(1)), key=itemgetter(1)): found.append( "'{}' for '{}'".format("', '".join(p for p, _ in pnames), sname)) raise ArcanaNameError( (pipeline_name, from_study), ("{} doesn't have a provenance record for pipeline '{}' " "for '{}' study (found {})".format( self, pipeline_name, from_study, '; '.join(found))))
python
def record(self, pipeline_name, from_study): """ Returns the provenance record for a given pipeline Parameters ---------- pipeline_name : str The name of the pipeline that generated the record from_study : str The name of the study that the pipeline was generated from Returns ------- record : arcana.provenance.Record The provenance record generated by the specified pipeline """ try: return self._records[(pipeline_name, from_study)] except KeyError: found = [] for sname, pnames in groupby(sorted(self._records, key=itemgetter(1)), key=itemgetter(1)): found.append( "'{}' for '{}'".format("', '".join(p for p, _ in pnames), sname)) raise ArcanaNameError( (pipeline_name, from_study), ("{} doesn't have a provenance record for pipeline '{}' " "for '{}' study (found {})".format( self, pipeline_name, from_study, '; '.join(found))))
[ "def", "record", "(", "self", ",", "pipeline_name", ",", "from_study", ")", ":", "try", ":", "return", "self", ".", "_records", "[", "(", "pipeline_name", ",", "from_study", ")", "]", "except", "KeyError", ":", "found", "=", "[", "]", "for", "sname", ",", "pnames", "in", "groupby", "(", "sorted", "(", "self", ".", "_records", ",", "key", "=", "itemgetter", "(", "1", ")", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ")", ":", "found", ".", "append", "(", "\"'{}' for '{}'\"", ".", "format", "(", "\"', '\"", ".", "join", "(", "p", "for", "p", ",", "_", "in", "pnames", ")", ",", "sname", ")", ")", "raise", "ArcanaNameError", "(", "(", "pipeline_name", ",", "from_study", ")", ",", "(", "\"{} doesn't have a provenance record for pipeline '{}' \"", "\"for '{}' study (found {})\"", ".", "format", "(", "self", ",", "pipeline_name", ",", "from_study", ",", "'; '", ".", "join", "(", "found", ")", ")", ")", ")" ]
Returns the provenance record for a given pipeline Parameters ---------- pipeline_name : str The name of the pipeline that generated the record from_study : str The name of the study that the pipeline was generated from Returns ------- record : arcana.provenance.Record The provenance record generated by the specified pipeline
[ "Returns", "the", "provenance", "record", "for", "a", "given", "pipeline" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L243-L274
MonashBI/arcana
arcana/repository/tree.py
TreeNode.find_mismatch
def find_mismatch(self, other, indent=''): """ Highlights where two nodes differ in a human-readable form Parameters ---------- other : TreeNode The node to compare indent : str The white-space with which to indent output string Returns ------- mismatch : str The human-readable mismatch string """ if self != other: mismatch = "\n{}{}".format(indent, type(self).__name__) else: mismatch = '' sub_indent = indent + ' ' if len(list(self.filesets)) != len(list(other.filesets)): mismatch += ('\n{indent}mismatching summary fileset lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.filesets)), len(list(other.filesets)), list(self.filesets), list(other.filesets), indent=sub_indent)) else: for s, o in zip(self.filesets, other.filesets): mismatch += s.find_mismatch(o, indent=sub_indent) if len(list(self.fields)) != len(list(other.fields)): mismatch += ('\n{indent}mismatching summary field lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.fields)), len(list(other.fields)), list(self.fields), list(other.fields), indent=sub_indent)) else: for s, o in zip(self.fields, other.fields): mismatch += s.find_mismatch(o, indent=sub_indent) return mismatch
python
def find_mismatch(self, other, indent=''): """ Highlights where two nodes differ in a human-readable form Parameters ---------- other : TreeNode The node to compare indent : str The white-space with which to indent output string Returns ------- mismatch : str The human-readable mismatch string """ if self != other: mismatch = "\n{}{}".format(indent, type(self).__name__) else: mismatch = '' sub_indent = indent + ' ' if len(list(self.filesets)) != len(list(other.filesets)): mismatch += ('\n{indent}mismatching summary fileset lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.filesets)), len(list(other.filesets)), list(self.filesets), list(other.filesets), indent=sub_indent)) else: for s, o in zip(self.filesets, other.filesets): mismatch += s.find_mismatch(o, indent=sub_indent) if len(list(self.fields)) != len(list(other.fields)): mismatch += ('\n{indent}mismatching summary field lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.fields)), len(list(other.fields)), list(self.fields), list(other.fields), indent=sub_indent)) else: for s, o in zip(self.fields, other.fields): mismatch += s.find_mismatch(o, indent=sub_indent) return mismatch
[ "def", "find_mismatch", "(", "self", ",", "other", ",", "indent", "=", "''", ")", ":", "if", "self", "!=", "other", ":", "mismatch", "=", "\"\\n{}{}\"", ".", "format", "(", "indent", ",", "type", "(", "self", ")", ".", "__name__", ")", "else", ":", "mismatch", "=", "''", "sub_indent", "=", "indent", "+", "' '", "if", "len", "(", "list", "(", "self", ".", "filesets", ")", ")", "!=", "len", "(", "list", "(", "other", ".", "filesets", ")", ")", ":", "mismatch", "+=", "(", "'\\n{indent}mismatching summary fileset lengths '", "'(self={} vs other={}): '", "'\\n{indent} self={}\\n{indent} other={}'", ".", "format", "(", "len", "(", "list", "(", "self", ".", "filesets", ")", ")", ",", "len", "(", "list", "(", "other", ".", "filesets", ")", ")", ",", "list", "(", "self", ".", "filesets", ")", ",", "list", "(", "other", ".", "filesets", ")", ",", "indent", "=", "sub_indent", ")", ")", "else", ":", "for", "s", ",", "o", "in", "zip", "(", "self", ".", "filesets", ",", "other", ".", "filesets", ")", ":", "mismatch", "+=", "s", ".", "find_mismatch", "(", "o", ",", "indent", "=", "sub_indent", ")", "if", "len", "(", "list", "(", "self", ".", "fields", ")", ")", "!=", "len", "(", "list", "(", "other", ".", "fields", ")", ")", ":", "mismatch", "+=", "(", "'\\n{indent}mismatching summary field lengths '", "'(self={} vs other={}): '", "'\\n{indent} self={}\\n{indent} other={}'", ".", "format", "(", "len", "(", "list", "(", "self", ".", "fields", ")", ")", ",", "len", "(", "list", "(", "other", ".", "fields", ")", ")", ",", "list", "(", "self", ".", "fields", ")", ",", "list", "(", "other", ".", "fields", ")", ",", "indent", "=", "sub_indent", ")", ")", "else", ":", "for", "s", ",", "o", "in", "zip", "(", "self", ".", "fields", ",", "other", ".", "fields", ")", ":", "mismatch", "+=", "s", ".", "find_mismatch", "(", "o", ",", "indent", "=", "sub_indent", ")", "return", "mismatch" ]
Highlights where two nodes differ in a human-readable form Parameters ---------- other : TreeNode The node to compare indent : str The white-space with which to indent output string Returns ------- mismatch : str The human-readable mismatch string
[ "Highlights", "where", "two", "nodes", "differ", "in", "a", "human", "-", "readable", "form" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L283-L328
MonashBI/arcana
arcana/repository/tree.py
Tree.nodes
def nodes(self, frequency=None): """ Returns an iterator over all nodes in the tree for the specified frequency. If no frequency is specified then all nodes are returned Parameters ---------- frequency : str | None The frequency of the nodes to iterate over. If None all frequencies are returned Returns ------- nodes : iterable[TreeNode] """ if frequency is None: nodes = chain(*(self._nodes(f) for f in ('per_study', 'per_subject', 'per_visit', 'per_session'))) else: nodes = self._nodes(frequency=frequency) return nodes
python
def nodes(self, frequency=None): """ Returns an iterator over all nodes in the tree for the specified frequency. If no frequency is specified then all nodes are returned Parameters ---------- frequency : str | None The frequency of the nodes to iterate over. If None all frequencies are returned Returns ------- nodes : iterable[TreeNode] """ if frequency is None: nodes = chain(*(self._nodes(f) for f in ('per_study', 'per_subject', 'per_visit', 'per_session'))) else: nodes = self._nodes(frequency=frequency) return nodes
[ "def", "nodes", "(", "self", ",", "frequency", "=", "None", ")", ":", "if", "frequency", "is", "None", ":", "nodes", "=", "chain", "(", "*", "(", "self", ".", "_nodes", "(", "f", ")", "for", "f", "in", "(", "'per_study'", ",", "'per_subject'", ",", "'per_visit'", ",", "'per_session'", ")", ")", ")", "else", ":", "nodes", "=", "self", ".", "_nodes", "(", "frequency", "=", "frequency", ")", "return", "nodes" ]
Returns an iterator over all nodes in the tree for the specified frequency. If no frequency is specified then all nodes are returned Parameters ---------- frequency : str | None The frequency of the nodes to iterate over. If None all frequencies are returned Returns ------- nodes : iterable[TreeNode]
[ "Returns", "an", "iterator", "over", "all", "nodes", "in", "the", "tree", "for", "the", "specified", "frequency", ".", "If", "no", "frequency", "is", "specified", "then", "all", "nodes", "are", "returned" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L488-L509
MonashBI/arcana
arcana/repository/tree.py
Tree.find_mismatch
def find_mismatch(self, other, indent=''): """ Used in debugging unittests """ mismatch = super(Tree, self).find_mismatch(other, indent) sub_indent = indent + ' ' if len(list(self.subjects)) != len(list(other.subjects)): mismatch += ('\n{indent}mismatching subject lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.subjects)), len(list(other.subjects)), list(self.subjects), list(other.subjects), indent=sub_indent)) else: for s, o in zip(self.subjects, other.subjects): mismatch += s.find_mismatch(o, indent=sub_indent) if len(list(self.visits)) != len(list(other.visits)): mismatch += ('\n{indent}mismatching visit lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.visits)), len(list(other.visits)), list(self.visits), list(other.visits), indent=sub_indent)) else: for s, o in zip(self.visits, other.visits): mismatch += s.find_mismatch(o, indent=sub_indent) return mismatch
python
def find_mismatch(self, other, indent=''): """ Used in debugging unittests """ mismatch = super(Tree, self).find_mismatch(other, indent) sub_indent = indent + ' ' if len(list(self.subjects)) != len(list(other.subjects)): mismatch += ('\n{indent}mismatching subject lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.subjects)), len(list(other.subjects)), list(self.subjects), list(other.subjects), indent=sub_indent)) else: for s, o in zip(self.subjects, other.subjects): mismatch += s.find_mismatch(o, indent=sub_indent) if len(list(self.visits)) != len(list(other.visits)): mismatch += ('\n{indent}mismatching visit lengths ' '(self={} vs other={}): ' '\n{indent} self={}\n{indent} other={}' .format(len(list(self.visits)), len(list(other.visits)), list(self.visits), list(other.visits), indent=sub_indent)) else: for s, o in zip(self.visits, other.visits): mismatch += s.find_mismatch(o, indent=sub_indent) return mismatch
[ "def", "find_mismatch", "(", "self", ",", "other", ",", "indent", "=", "''", ")", ":", "mismatch", "=", "super", "(", "Tree", ",", "self", ")", ".", "find_mismatch", "(", "other", ",", "indent", ")", "sub_indent", "=", "indent", "+", "' '", "if", "len", "(", "list", "(", "self", ".", "subjects", ")", ")", "!=", "len", "(", "list", "(", "other", ".", "subjects", ")", ")", ":", "mismatch", "+=", "(", "'\\n{indent}mismatching subject lengths '", "'(self={} vs other={}): '", "'\\n{indent} self={}\\n{indent} other={}'", ".", "format", "(", "len", "(", "list", "(", "self", ".", "subjects", ")", ")", ",", "len", "(", "list", "(", "other", ".", "subjects", ")", ")", ",", "list", "(", "self", ".", "subjects", ")", ",", "list", "(", "other", ".", "subjects", ")", ",", "indent", "=", "sub_indent", ")", ")", "else", ":", "for", "s", ",", "o", "in", "zip", "(", "self", ".", "subjects", ",", "other", ".", "subjects", ")", ":", "mismatch", "+=", "s", ".", "find_mismatch", "(", "o", ",", "indent", "=", "sub_indent", ")", "if", "len", "(", "list", "(", "self", ".", "visits", ")", ")", "!=", "len", "(", "list", "(", "other", ".", "visits", ")", ")", ":", "mismatch", "+=", "(", "'\\n{indent}mismatching visit lengths '", "'(self={} vs other={}): '", "'\\n{indent} self={}\\n{indent} other={}'", ".", "format", "(", "len", "(", "list", "(", "self", ".", "visits", ")", ")", ",", "len", "(", "list", "(", "other", ".", "visits", ")", ")", ",", "list", "(", "self", ".", "visits", ")", ",", "list", "(", "other", ".", "visits", ")", ",", "indent", "=", "sub_indent", ")", ")", "else", ":", "for", "s", ",", "o", "in", "zip", "(", "self", ".", "visits", ",", "other", ".", "visits", ")", ":", "mismatch", "+=", "s", ".", "find_mismatch", "(", "o", ",", "indent", "=", "sub_indent", ")", "return", "mismatch" ]
Used in debugging unittests
[ "Used", "in", "debugging", "unittests" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L524-L554
MonashBI/arcana
arcana/repository/tree.py
Tree._fill_empty_sessions
def _fill_empty_sessions(self, fill_subjects, fill_visits): """ Fill in tree with additional empty subjects and/or visits to allow the study to pull its inputs from external repositories """ if fill_subjects is None: fill_subjects = [s.id for s in self.subjects] if fill_visits is None: fill_visits = [v.id for v in self.complete_visits] for subject_id in fill_subjects: try: subject = self.subject(subject_id) except ArcanaNameError: subject = self._subjects[subject_id] = Subject( subject_id, [], [], []) for visit_id in fill_visits: try: subject.session(visit_id) except ArcanaNameError: session = Session(subject_id, visit_id, [], []) subject._sessions[visit_id] = session try: visit = self.visit(visit_id) except ArcanaNameError: visit = self._visits[visit_id] = Visit( visit_id, [], [], []) visit._sessions[subject_id] = session
python
def _fill_empty_sessions(self, fill_subjects, fill_visits): """ Fill in tree with additional empty subjects and/or visits to allow the study to pull its inputs from external repositories """ if fill_subjects is None: fill_subjects = [s.id for s in self.subjects] if fill_visits is None: fill_visits = [v.id for v in self.complete_visits] for subject_id in fill_subjects: try: subject = self.subject(subject_id) except ArcanaNameError: subject = self._subjects[subject_id] = Subject( subject_id, [], [], []) for visit_id in fill_visits: try: subject.session(visit_id) except ArcanaNameError: session = Session(subject_id, visit_id, [], []) subject._sessions[visit_id] = session try: visit = self.visit(visit_id) except ArcanaNameError: visit = self._visits[visit_id] = Visit( visit_id, [], [], []) visit._sessions[subject_id] = session
[ "def", "_fill_empty_sessions", "(", "self", ",", "fill_subjects", ",", "fill_visits", ")", ":", "if", "fill_subjects", "is", "None", ":", "fill_subjects", "=", "[", "s", ".", "id", "for", "s", "in", "self", ".", "subjects", "]", "if", "fill_visits", "is", "None", ":", "fill_visits", "=", "[", "v", ".", "id", "for", "v", "in", "self", ".", "complete_visits", "]", "for", "subject_id", "in", "fill_subjects", ":", "try", ":", "subject", "=", "self", ".", "subject", "(", "subject_id", ")", "except", "ArcanaNameError", ":", "subject", "=", "self", ".", "_subjects", "[", "subject_id", "]", "=", "Subject", "(", "subject_id", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "for", "visit_id", "in", "fill_visits", ":", "try", ":", "subject", ".", "session", "(", "visit_id", ")", "except", "ArcanaNameError", ":", "session", "=", "Session", "(", "subject_id", ",", "visit_id", ",", "[", "]", ",", "[", "]", ")", "subject", ".", "_sessions", "[", "visit_id", "]", "=", "session", "try", ":", "visit", "=", "self", ".", "visit", "(", "visit_id", ")", "except", "ArcanaNameError", ":", "visit", "=", "self", ".", "_visits", "[", "visit_id", "]", "=", "Visit", "(", "visit_id", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "visit", ".", "_sessions", "[", "subject_id", "]", "=", "session" ]
Fill in tree with additional empty subjects and/or visits to allow the study to pull its inputs from external repositories
[ "Fill", "in", "tree", "with", "additional", "empty", "subjects", "and", "/", "or", "visits", "to", "allow", "the", "study", "to", "pull", "its", "inputs", "from", "external", "repositories" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L563-L589
MonashBI/arcana
arcana/repository/tree.py
Tree.construct
def construct(cls, repository, filesets=(), fields=(), records=(), file_formats=(), **kwargs): """ Return the hierarchical tree of the filesets and fields stored in a repository Parameters ---------- respository : Repository The repository that the tree comes from filesets : list[Fileset] List of all filesets in the tree fields : list[Field] List of all fields in the tree records : list[Record] List of all records in the tree Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, session and fileset information for the repository """ # Sort the data by subject and visit ID filesets_dict = defaultdict(list) for fset in filesets: if file_formats: fset.set_format(file_formats) filesets_dict[(fset.subject_id, fset.visit_id)].append(fset) fields_dict = defaultdict(list) for field in fields: fields_dict[(field.subject_id, field.visit_id)].append(field) records_dict = defaultdict(list) for record in records: records_dict[(record.subject_id, record.visit_id)].append(record) # Create all sessions subj_sessions = defaultdict(list) visit_sessions = defaultdict(list) for sess_id in set(chain(filesets_dict, fields_dict, records_dict)): if None in sess_id: continue # Save summaries for later subj_id, visit_id = sess_id session = Session( subject_id=subj_id, visit_id=visit_id, filesets=filesets_dict[sess_id], fields=fields_dict[sess_id], records=records_dict[sess_id]) subj_sessions[subj_id].append(session) visit_sessions[visit_id].append(session) subjects = [] for subj_id in subj_sessions: subjects.append(Subject( subj_id, sorted(subj_sessions[subj_id]), filesets_dict[(subj_id, None)], fields_dict[(subj_id, None)], records_dict[(subj_id, None)])) visits = [] for visit_id in visit_sessions: visits.append(Visit( visit_id, sorted(visit_sessions[visit_id]), filesets_dict[(None, visit_id)], fields_dict[(None, visit_id)], records_dict[(None, visit_id)])) return Tree(sorted(subjects), sorted(visits), repository, filesets_dict[(None, None)], fields_dict[(None, None)], records_dict[(None, None)], **kwargs)
python
def construct(cls, repository, filesets=(), fields=(), records=(), file_formats=(), **kwargs): """ Return the hierarchical tree of the filesets and fields stored in a repository Parameters ---------- respository : Repository The repository that the tree comes from filesets : list[Fileset] List of all filesets in the tree fields : list[Field] List of all fields in the tree records : list[Record] List of all records in the tree Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, session and fileset information for the repository """ # Sort the data by subject and visit ID filesets_dict = defaultdict(list) for fset in filesets: if file_formats: fset.set_format(file_formats) filesets_dict[(fset.subject_id, fset.visit_id)].append(fset) fields_dict = defaultdict(list) for field in fields: fields_dict[(field.subject_id, field.visit_id)].append(field) records_dict = defaultdict(list) for record in records: records_dict[(record.subject_id, record.visit_id)].append(record) # Create all sessions subj_sessions = defaultdict(list) visit_sessions = defaultdict(list) for sess_id in set(chain(filesets_dict, fields_dict, records_dict)): if None in sess_id: continue # Save summaries for later subj_id, visit_id = sess_id session = Session( subject_id=subj_id, visit_id=visit_id, filesets=filesets_dict[sess_id], fields=fields_dict[sess_id], records=records_dict[sess_id]) subj_sessions[subj_id].append(session) visit_sessions[visit_id].append(session) subjects = [] for subj_id in subj_sessions: subjects.append(Subject( subj_id, sorted(subj_sessions[subj_id]), filesets_dict[(subj_id, None)], fields_dict[(subj_id, None)], records_dict[(subj_id, None)])) visits = [] for visit_id in visit_sessions: visits.append(Visit( visit_id, sorted(visit_sessions[visit_id]), filesets_dict[(None, visit_id)], fields_dict[(None, visit_id)], records_dict[(None, visit_id)])) return Tree(sorted(subjects), sorted(visits), repository, filesets_dict[(None, None)], fields_dict[(None, None)], records_dict[(None, None)], **kwargs)
[ "def", "construct", "(", "cls", ",", "repository", ",", "filesets", "=", "(", ")", ",", "fields", "=", "(", ")", ",", "records", "=", "(", ")", ",", "file_formats", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "# Sort the data by subject and visit ID", "filesets_dict", "=", "defaultdict", "(", "list", ")", "for", "fset", "in", "filesets", ":", "if", "file_formats", ":", "fset", ".", "set_format", "(", "file_formats", ")", "filesets_dict", "[", "(", "fset", ".", "subject_id", ",", "fset", ".", "visit_id", ")", "]", ".", "append", "(", "fset", ")", "fields_dict", "=", "defaultdict", "(", "list", ")", "for", "field", "in", "fields", ":", "fields_dict", "[", "(", "field", ".", "subject_id", ",", "field", ".", "visit_id", ")", "]", ".", "append", "(", "field", ")", "records_dict", "=", "defaultdict", "(", "list", ")", "for", "record", "in", "records", ":", "records_dict", "[", "(", "record", ".", "subject_id", ",", "record", ".", "visit_id", ")", "]", ".", "append", "(", "record", ")", "# Create all sessions", "subj_sessions", "=", "defaultdict", "(", "list", ")", "visit_sessions", "=", "defaultdict", "(", "list", ")", "for", "sess_id", "in", "set", "(", "chain", "(", "filesets_dict", ",", "fields_dict", ",", "records_dict", ")", ")", ":", "if", "None", "in", "sess_id", ":", "continue", "# Save summaries for later", "subj_id", ",", "visit_id", "=", "sess_id", "session", "=", "Session", "(", "subject_id", "=", "subj_id", ",", "visit_id", "=", "visit_id", ",", "filesets", "=", "filesets_dict", "[", "sess_id", "]", ",", "fields", "=", "fields_dict", "[", "sess_id", "]", ",", "records", "=", "records_dict", "[", "sess_id", "]", ")", "subj_sessions", "[", "subj_id", "]", ".", "append", "(", "session", ")", "visit_sessions", "[", "visit_id", "]", ".", "append", "(", "session", ")", "subjects", "=", "[", "]", "for", "subj_id", "in", "subj_sessions", ":", "subjects", ".", "append", "(", "Subject", "(", "subj_id", ",", "sorted", "(", "subj_sessions", "[", "subj_id", "]", ")", ",", "filesets_dict", "[", "(", "subj_id", ",", "None", ")", "]", ",", "fields_dict", "[", "(", "subj_id", ",", "None", ")", "]", ",", "records_dict", "[", "(", "subj_id", ",", "None", ")", "]", ")", ")", "visits", "=", "[", "]", "for", "visit_id", "in", "visit_sessions", ":", "visits", ".", "append", "(", "Visit", "(", "visit_id", ",", "sorted", "(", "visit_sessions", "[", "visit_id", "]", ")", ",", "filesets_dict", "[", "(", "None", ",", "visit_id", ")", "]", ",", "fields_dict", "[", "(", "None", ",", "visit_id", ")", "]", ",", "records_dict", "[", "(", "None", ",", "visit_id", ")", "]", ")", ")", "return", "Tree", "(", "sorted", "(", "subjects", ")", ",", "sorted", "(", "visits", ")", ",", "repository", ",", "filesets_dict", "[", "(", "None", ",", "None", ")", "]", ",", "fields_dict", "[", "(", "None", ",", "None", ")", "]", ",", "records_dict", "[", "(", "None", ",", "None", ")", "]", ",", "*", "*", "kwargs", ")" ]
Return the hierarchical tree of the filesets and fields stored in a repository Parameters ---------- respository : Repository The repository that the tree comes from filesets : list[Fileset] List of all filesets in the tree fields : list[Field] List of all fields in the tree records : list[Record] List of all records in the tree Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, session and fileset information for the repository
[ "Return", "the", "hierarchical", "tree", "of", "the", "filesets", "and", "fields", "stored", "in", "a", "repository" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L592-L664
MonashBI/arcana
arcana/repository/tree.py
Session.nodes
def nodes(self, frequency=None): """ Returns all nodes of the specified frequency that are related to the given Session Parameters ---------- frequency : str | None The frequency of the nodes to return Returns ------- nodes : iterable[TreeNode] All nodes related to the Session for the specified frequency """ if frequency is None: [] elif frequency == 'per_session': return [self] elif frequency in ('per_visit', 'per_subject'): return [self.parent] elif frequency == 'per_study': return [self.parent.parent]
python
def nodes(self, frequency=None): """ Returns all nodes of the specified frequency that are related to the given Session Parameters ---------- frequency : str | None The frequency of the nodes to return Returns ------- nodes : iterable[TreeNode] All nodes related to the Session for the specified frequency """ if frequency is None: [] elif frequency == 'per_session': return [self] elif frequency in ('per_visit', 'per_subject'): return [self.parent] elif frequency == 'per_study': return [self.parent.parent]
[ "def", "nodes", "(", "self", ",", "frequency", "=", "None", ")", ":", "if", "frequency", "is", "None", ":", "[", "]", "elif", "frequency", "==", "'per_session'", ":", "return", "[", "self", "]", "elif", "frequency", "in", "(", "'per_visit'", ",", "'per_subject'", ")", ":", "return", "[", "self", ".", "parent", "]", "elif", "frequency", "==", "'per_study'", ":", "return", "[", "self", ".", "parent", ".", "parent", "]" ]
Returns all nodes of the specified frequency that are related to the given Session Parameters ---------- frequency : str | None The frequency of the nodes to return Returns ------- nodes : iterable[TreeNode] All nodes related to the Session for the specified frequency
[ "Returns", "all", "nodes", "of", "the", "specified", "frequency", "that", "are", "related", "to", "the", "given", "Session" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L1015-L1037
gwww/elkm1
elkm1_lib/proto.py
Connection.write_data
def write_data(self, data, response_required=None, timeout=5.0, raw=False): """Write data on the asyncio Protocol""" if self._transport is None: return if self._paused: return if self._waiting_for_response: LOG.debug("queueing write %s", data) self._queued_writes.append((data, response_required, timeout)) return if response_required: self._waiting_for_response = response_required if timeout > 0: self._timeout_task = self.loop.call_later( timeout, self._response_required_timeout) if not raw: cksum = 256 - reduce(lambda x, y: x+y, map(ord, data)) % 256 data = data + '{:02X}'.format(cksum) if int(data[0:2], 16) != len(data)-2: LOG.debug("message length wrong: %s", data) LOG.debug("write_data '%s'", data) self._transport.write((data + '\r\n').encode())
python
def write_data(self, data, response_required=None, timeout=5.0, raw=False): """Write data on the asyncio Protocol""" if self._transport is None: return if self._paused: return if self._waiting_for_response: LOG.debug("queueing write %s", data) self._queued_writes.append((data, response_required, timeout)) return if response_required: self._waiting_for_response = response_required if timeout > 0: self._timeout_task = self.loop.call_later( timeout, self._response_required_timeout) if not raw: cksum = 256 - reduce(lambda x, y: x+y, map(ord, data)) % 256 data = data + '{:02X}'.format(cksum) if int(data[0:2], 16) != len(data)-2: LOG.debug("message length wrong: %s", data) LOG.debug("write_data '%s'", data) self._transport.write((data + '\r\n').encode())
[ "def", "write_data", "(", "self", ",", "data", ",", "response_required", "=", "None", ",", "timeout", "=", "5.0", ",", "raw", "=", "False", ")", ":", "if", "self", ".", "_transport", "is", "None", ":", "return", "if", "self", ".", "_paused", ":", "return", "if", "self", ".", "_waiting_for_response", ":", "LOG", ".", "debug", "(", "\"queueing write %s\"", ",", "data", ")", "self", ".", "_queued_writes", ".", "append", "(", "(", "data", ",", "response_required", ",", "timeout", ")", ")", "return", "if", "response_required", ":", "self", ".", "_waiting_for_response", "=", "response_required", "if", "timeout", ">", "0", ":", "self", ".", "_timeout_task", "=", "self", ".", "loop", ".", "call_later", "(", "timeout", ",", "self", ".", "_response_required_timeout", ")", "if", "not", "raw", ":", "cksum", "=", "256", "-", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ",", "map", "(", "ord", ",", "data", ")", ")", "%", "256", "data", "=", "data", "+", "'{:02X}'", ".", "format", "(", "cksum", ")", "if", "int", "(", "data", "[", "0", ":", "2", "]", ",", "16", ")", "!=", "len", "(", "data", ")", "-", "2", ":", "LOG", ".", "debug", "(", "\"message length wrong: %s\"", ",", "data", ")", "LOG", ".", "debug", "(", "\"write_data '%s'\"", ",", "data", ")", "self", ".", "_transport", ".", "write", "(", "(", "data", "+", "'\\r\\n'", ")", ".", "encode", "(", ")", ")" ]
Write data on the asyncio Protocol
[ "Write", "data", "on", "the", "asyncio", "Protocol" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/proto.py#L82-L108
tym-xqo/nerium
nerium/utils.py
unwrap_querystring_lists
def unwrap_querystring_lists(obj): """Convert responder querystring params, pulling values out of list if there's only one. """ new_dict = { key: (obj[key][0] if len(obj[key]) == 1 else obj[key]) for key in obj.keys() } return new_dict
python
def unwrap_querystring_lists(obj): """Convert responder querystring params, pulling values out of list if there's only one. """ new_dict = { key: (obj[key][0] if len(obj[key]) == 1 else obj[key]) for key in obj.keys() } return new_dict
[ "def", "unwrap_querystring_lists", "(", "obj", ")", ":", "new_dict", "=", "{", "key", ":", "(", "obj", "[", "key", "]", "[", "0", "]", "if", "len", "(", "obj", "[", "key", "]", ")", "==", "1", "else", "obj", "[", "key", "]", ")", "for", "key", "in", "obj", ".", "keys", "(", ")", "}", "return", "new_dict" ]
Convert responder querystring params, pulling values out of list if there's only one.
[ "Convert", "responder", "querystring", "params", "pulling", "values", "out", "of", "list", "if", "there", "s", "only", "one", "." ]
train
https://github.com/tym-xqo/nerium/blob/b234847d95f37c3a49dff15a189205fe5bbbc05f/nerium/utils.py#L1-L10
MonashBI/arcana
arcana/data/input.py
BaseInput.pipeline_getter
def pipeline_getter(self): "For duck-typing with *Spec types" if not self.derivable: raise ArcanaUsageError( "There is no pipeline getter for {} because it doesn't " "fallback to a derived spec".format(self)) return self._fallback.pipeline_getter
python
def pipeline_getter(self): "For duck-typing with *Spec types" if not self.derivable: raise ArcanaUsageError( "There is no pipeline getter for {} because it doesn't " "fallback to a derived spec".format(self)) return self._fallback.pipeline_getter
[ "def", "pipeline_getter", "(", "self", ")", ":", "if", "not", "self", ".", "derivable", ":", "raise", "ArcanaUsageError", "(", "\"There is no pipeline getter for {} because it doesn't \"", "\"fallback to a derived spec\"", ".", "format", "(", "self", ")", ")", "return", "self", ".", "_fallback", ".", "pipeline_getter" ]
For duck-typing with *Spec types
[ "For", "duck", "-", "typing", "with", "*", "Spec", "types" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/input.py#L135-L141
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.prerequisites
def prerequisites(self): """ Iterates through the inputs of the pipelinen and determines the all prerequisite pipelines """ # Loop through the inputs to the pipeline and add the instancemethods # for the pipelines to generate each of the processed inputs prereqs = defaultdict(set) for input in self.inputs: # @ReservedAssignment spec = self._study.spec(input) # Could be an input to the study or optional acquired spec if spec.is_spec and spec.derived: prereqs[spec.pipeline_getter].add(input.name) return prereqs
python
def prerequisites(self): """ Iterates through the inputs of the pipelinen and determines the all prerequisite pipelines """ # Loop through the inputs to the pipeline and add the instancemethods # for the pipelines to generate each of the processed inputs prereqs = defaultdict(set) for input in self.inputs: # @ReservedAssignment spec = self._study.spec(input) # Could be an input to the study or optional acquired spec if spec.is_spec and spec.derived: prereqs[spec.pipeline_getter].add(input.name) return prereqs
[ "def", "prerequisites", "(", "self", ")", ":", "# Loop through the inputs to the pipeline and add the instancemethods", "# for the pipelines to generate each of the processed inputs", "prereqs", "=", "defaultdict", "(", "set", ")", "for", "input", "in", "self", ".", "inputs", ":", "# @ReservedAssignment", "spec", "=", "self", ".", "_study", ".", "spec", "(", "input", ")", "# Could be an input to the study or optional acquired spec", "if", "spec", ".", "is_spec", "and", "spec", ".", "derived", ":", "prereqs", "[", "spec", ".", "pipeline_getter", "]", ".", "add", "(", "input", ".", "name", ")", "return", "prereqs" ]
Iterates through the inputs of the pipelinen and determines the all prerequisite pipelines
[ "Iterates", "through", "the", "inputs", "of", "the", "pipelinen", "and", "determines", "the", "all", "prerequisite", "pipelines" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L147-L160
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.study_inputs
def study_inputs(self): """ Returns all inputs of the study used by the pipeline, including inputs of prerequisites (and their prerequisites recursively) """ return set(chain( (i for i in self.inputs if not i.derived), *(self.study.pipeline(p, required_outputs=r).study_inputs for p, r in self.prerequisites.items())))
python
def study_inputs(self): """ Returns all inputs of the study used by the pipeline, including inputs of prerequisites (and their prerequisites recursively) """ return set(chain( (i for i in self.inputs if not i.derived), *(self.study.pipeline(p, required_outputs=r).study_inputs for p, r in self.prerequisites.items())))
[ "def", "study_inputs", "(", "self", ")", ":", "return", "set", "(", "chain", "(", "(", "i", "for", "i", "in", "self", ".", "inputs", "if", "not", "i", ".", "derived", ")", ",", "*", "(", "self", ".", "study", ".", "pipeline", "(", "p", ",", "required_outputs", "=", "r", ")", ".", "study_inputs", "for", "p", ",", "r", "in", "self", ".", "prerequisites", ".", "items", "(", ")", ")", ")", ")" ]
Returns all inputs of the study used by the pipeline, including inputs of prerequisites (and their prerequisites recursively)
[ "Returns", "all", "inputs", "of", "the", "study", "used", "by", "the", "pipeline", "including", "inputs", "of", "prerequisites", "(", "and", "their", "prerequisites", "recursively", ")" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L163-L171
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.add
def add(self, name, interface, inputs=None, outputs=None, requirements=None, wall_time=None, annotations=None, **kwargs): """ Adds a processing Node to the pipeline Parameters ---------- name : str Name for the node interface : nipype.Interface The interface to use for the node inputs : dict[str, (str, FileFormat) | (Node, str)] Connections from inputs of the pipeline and outputs of other nodes to inputs of node. The keys of the dictionary are the field names and the values are 2-tuple containing either the name of the data spec and the data format it is expected in for pipeline inputs or the sending Node and the the name of an output of the sending Node. Note that pipeline inputs can be specified outside this method using the 'connect_input' method and connections between nodes with the the 'connect' method. outputs : dict[str, (str, FileFormat)] Connections to outputs of the pipeline from fields of the interface. The keys of the dictionary are the names of the data specs that will be written to and the values are the interface field name and the data format it is produced in. Note that output connections can also be specified using the 'connect_output' method. requirements : list(Requirement) List of required packages need for the node to run (default: []) wall_time : float Time required to execute the node in minutes (default: 1) mem_gb : int Required memory for the node in GB n_procs : int Preferred number of threads to run the node on (default: 1) annotations : dict[str, *] Additional annotations to add to the node, which may be used by the Processor node to optimise execution (e.g. 'gpu': True) iterfield : str Name of field to be passed an iterable to iterator over. If present, a MapNode will be created instead of a regular node joinsource : str Name of iterator field to join. Typically one of the implicit iterators (i.e. Study.SUBJECT_ID or Study.VISIT_ID) to join over the subjects and/or visits joinfield : str Name of field to pass the joined list when creating a JoinNode Returns ------- node : Node The Node object that has been added to the pipeline """ if annotations is None: annotations = {} if requirements is None: requirements = [] if wall_time is None: wall_time = self.study.processor.default_wall_time if 'mem_gb' not in kwargs or kwargs['mem_gb'] is None: kwargs['mem_gb'] = self.study.processor.default_mem_gb if 'iterfield' in kwargs: if 'joinfield' in kwargs or 'joinsource' in kwargs: raise ArcanaDesignError( "Cannot provide both joinsource and iterfield to when " "attempting to add '{}' node to {}" .foramt(name, self._error_msg_loc)) node_cls = self.study.environment.node_types['map'] elif 'joinsource' in kwargs or 'joinfield' in kwargs: if not ('joinfield' in kwargs and 'joinsource' in kwargs): raise ArcanaDesignError( "Both joinsource and joinfield kwargs are required to " "create a JoinNode (see {})".format(name, self._error_msg_loc)) joinsource = kwargs['joinsource'] if joinsource in self.study.ITERFIELDS: self._iterator_joins.add(joinsource) node_cls = self.study.environment.node_types['join'] # Prepend name of pipeline of joinsource to match name of nodes kwargs['joinsource'] = '{}_{}'.format(self.name, joinsource) else: node_cls = self.study.environment.node_types['base'] # Create node node = node_cls(self.study.environment, interface, name="{}_{}".format(self._name, name), requirements=requirements, wall_time=wall_time, annotations=annotations, **kwargs) # Ensure node is added to workflow self._workflow.add_nodes([node]) # Connect inputs, outputs and internal connections if inputs is not None: assert isinstance(inputs, dict) for node_input, connect_from in inputs.items(): if isinstance(connect_from[0], basestring): input_spec, input_format = connect_from self.connect_input(input_spec, node, node_input, input_format) else: conn_node, conn_field = connect_from self.connect(conn_node, conn_field, node, node_input) if outputs is not None: assert isinstance(outputs, dict) for output_spec, (node_output, output_format) in outputs.items(): self.connect_output(output_spec, node, node_output, output_format) return node
python
def add(self, name, interface, inputs=None, outputs=None, requirements=None, wall_time=None, annotations=None, **kwargs): """ Adds a processing Node to the pipeline Parameters ---------- name : str Name for the node interface : nipype.Interface The interface to use for the node inputs : dict[str, (str, FileFormat) | (Node, str)] Connections from inputs of the pipeline and outputs of other nodes to inputs of node. The keys of the dictionary are the field names and the values are 2-tuple containing either the name of the data spec and the data format it is expected in for pipeline inputs or the sending Node and the the name of an output of the sending Node. Note that pipeline inputs can be specified outside this method using the 'connect_input' method and connections between nodes with the the 'connect' method. outputs : dict[str, (str, FileFormat)] Connections to outputs of the pipeline from fields of the interface. The keys of the dictionary are the names of the data specs that will be written to and the values are the interface field name and the data format it is produced in. Note that output connections can also be specified using the 'connect_output' method. requirements : list(Requirement) List of required packages need for the node to run (default: []) wall_time : float Time required to execute the node in minutes (default: 1) mem_gb : int Required memory for the node in GB n_procs : int Preferred number of threads to run the node on (default: 1) annotations : dict[str, *] Additional annotations to add to the node, which may be used by the Processor node to optimise execution (e.g. 'gpu': True) iterfield : str Name of field to be passed an iterable to iterator over. If present, a MapNode will be created instead of a regular node joinsource : str Name of iterator field to join. Typically one of the implicit iterators (i.e. Study.SUBJECT_ID or Study.VISIT_ID) to join over the subjects and/or visits joinfield : str Name of field to pass the joined list when creating a JoinNode Returns ------- node : Node The Node object that has been added to the pipeline """ if annotations is None: annotations = {} if requirements is None: requirements = [] if wall_time is None: wall_time = self.study.processor.default_wall_time if 'mem_gb' not in kwargs or kwargs['mem_gb'] is None: kwargs['mem_gb'] = self.study.processor.default_mem_gb if 'iterfield' in kwargs: if 'joinfield' in kwargs or 'joinsource' in kwargs: raise ArcanaDesignError( "Cannot provide both joinsource and iterfield to when " "attempting to add '{}' node to {}" .foramt(name, self._error_msg_loc)) node_cls = self.study.environment.node_types['map'] elif 'joinsource' in kwargs or 'joinfield' in kwargs: if not ('joinfield' in kwargs and 'joinsource' in kwargs): raise ArcanaDesignError( "Both joinsource and joinfield kwargs are required to " "create a JoinNode (see {})".format(name, self._error_msg_loc)) joinsource = kwargs['joinsource'] if joinsource in self.study.ITERFIELDS: self._iterator_joins.add(joinsource) node_cls = self.study.environment.node_types['join'] # Prepend name of pipeline of joinsource to match name of nodes kwargs['joinsource'] = '{}_{}'.format(self.name, joinsource) else: node_cls = self.study.environment.node_types['base'] # Create node node = node_cls(self.study.environment, interface, name="{}_{}".format(self._name, name), requirements=requirements, wall_time=wall_time, annotations=annotations, **kwargs) # Ensure node is added to workflow self._workflow.add_nodes([node]) # Connect inputs, outputs and internal connections if inputs is not None: assert isinstance(inputs, dict) for node_input, connect_from in inputs.items(): if isinstance(connect_from[0], basestring): input_spec, input_format = connect_from self.connect_input(input_spec, node, node_input, input_format) else: conn_node, conn_field = connect_from self.connect(conn_node, conn_field, node, node_input) if outputs is not None: assert isinstance(outputs, dict) for output_spec, (node_output, output_format) in outputs.items(): self.connect_output(output_spec, node, node_output, output_format) return node
[ "def", "add", "(", "self", ",", "name", ",", "interface", ",", "inputs", "=", "None", ",", "outputs", "=", "None", ",", "requirements", "=", "None", ",", "wall_time", "=", "None", ",", "annotations", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "annotations", "is", "None", ":", "annotations", "=", "{", "}", "if", "requirements", "is", "None", ":", "requirements", "=", "[", "]", "if", "wall_time", "is", "None", ":", "wall_time", "=", "self", ".", "study", ".", "processor", ".", "default_wall_time", "if", "'mem_gb'", "not", "in", "kwargs", "or", "kwargs", "[", "'mem_gb'", "]", "is", "None", ":", "kwargs", "[", "'mem_gb'", "]", "=", "self", ".", "study", ".", "processor", ".", "default_mem_gb", "if", "'iterfield'", "in", "kwargs", ":", "if", "'joinfield'", "in", "kwargs", "or", "'joinsource'", "in", "kwargs", ":", "raise", "ArcanaDesignError", "(", "\"Cannot provide both joinsource and iterfield to when \"", "\"attempting to add '{}' node to {}\"", ".", "foramt", "(", "name", ",", "self", ".", "_error_msg_loc", ")", ")", "node_cls", "=", "self", ".", "study", ".", "environment", ".", "node_types", "[", "'map'", "]", "elif", "'joinsource'", "in", "kwargs", "or", "'joinfield'", "in", "kwargs", ":", "if", "not", "(", "'joinfield'", "in", "kwargs", "and", "'joinsource'", "in", "kwargs", ")", ":", "raise", "ArcanaDesignError", "(", "\"Both joinsource and joinfield kwargs are required to \"", "\"create a JoinNode (see {})\"", ".", "format", "(", "name", ",", "self", ".", "_error_msg_loc", ")", ")", "joinsource", "=", "kwargs", "[", "'joinsource'", "]", "if", "joinsource", "in", "self", ".", "study", ".", "ITERFIELDS", ":", "self", ".", "_iterator_joins", ".", "add", "(", "joinsource", ")", "node_cls", "=", "self", ".", "study", ".", "environment", ".", "node_types", "[", "'join'", "]", "# Prepend name of pipeline of joinsource to match name of nodes", "kwargs", "[", "'joinsource'", "]", "=", "'{}_{}'", ".", "format", "(", "self", ".", "name", ",", "joinsource", ")", "else", ":", "node_cls", "=", "self", ".", "study", ".", "environment", ".", "node_types", "[", "'base'", "]", "# Create node", "node", "=", "node_cls", "(", "self", ".", "study", ".", "environment", ",", "interface", ",", "name", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "_name", ",", "name", ")", ",", "requirements", "=", "requirements", ",", "wall_time", "=", "wall_time", ",", "annotations", "=", "annotations", ",", "*", "*", "kwargs", ")", "# Ensure node is added to workflow", "self", ".", "_workflow", ".", "add_nodes", "(", "[", "node", "]", ")", "# Connect inputs, outputs and internal connections", "if", "inputs", "is", "not", "None", ":", "assert", "isinstance", "(", "inputs", ",", "dict", ")", "for", "node_input", ",", "connect_from", "in", "inputs", ".", "items", "(", ")", ":", "if", "isinstance", "(", "connect_from", "[", "0", "]", ",", "basestring", ")", ":", "input_spec", ",", "input_format", "=", "connect_from", "self", ".", "connect_input", "(", "input_spec", ",", "node", ",", "node_input", ",", "input_format", ")", "else", ":", "conn_node", ",", "conn_field", "=", "connect_from", "self", ".", "connect", "(", "conn_node", ",", "conn_field", ",", "node", ",", "node_input", ")", "if", "outputs", "is", "not", "None", ":", "assert", "isinstance", "(", "outputs", ",", "dict", ")", "for", "output_spec", ",", "(", "node_output", ",", "output_format", ")", "in", "outputs", ".", "items", "(", ")", ":", "self", ".", "connect_output", "(", "output_spec", ",", "node", ",", "node_output", ",", "output_format", ")", "return", "node" ]
Adds a processing Node to the pipeline Parameters ---------- name : str Name for the node interface : nipype.Interface The interface to use for the node inputs : dict[str, (str, FileFormat) | (Node, str)] Connections from inputs of the pipeline and outputs of other nodes to inputs of node. The keys of the dictionary are the field names and the values are 2-tuple containing either the name of the data spec and the data format it is expected in for pipeline inputs or the sending Node and the the name of an output of the sending Node. Note that pipeline inputs can be specified outside this method using the 'connect_input' method and connections between nodes with the the 'connect' method. outputs : dict[str, (str, FileFormat)] Connections to outputs of the pipeline from fields of the interface. The keys of the dictionary are the names of the data specs that will be written to and the values are the interface field name and the data format it is produced in. Note that output connections can also be specified using the 'connect_output' method. requirements : list(Requirement) List of required packages need for the node to run (default: []) wall_time : float Time required to execute the node in minutes (default: 1) mem_gb : int Required memory for the node in GB n_procs : int Preferred number of threads to run the node on (default: 1) annotations : dict[str, *] Additional annotations to add to the node, which may be used by the Processor node to optimise execution (e.g. 'gpu': True) iterfield : str Name of field to be passed an iterable to iterator over. If present, a MapNode will be created instead of a regular node joinsource : str Name of iterator field to join. Typically one of the implicit iterators (i.e. Study.SUBJECT_ID or Study.VISIT_ID) to join over the subjects and/or visits joinfield : str Name of field to pass the joined list when creating a JoinNode Returns ------- node : Node The Node object that has been added to the pipeline
[ "Adds", "a", "processing", "Node", "to", "the", "pipeline" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L173-L281
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.connect_input
def connect_input(self, spec_name, node, node_input, format=None, **kwargs): # @ReservedAssignment @IgnorePep8 """ Connects a study fileset_spec as an input to the provided node Parameters ---------- spec_name : str Name of the study data spec (or one of the IDs from the iterator nodes, 'subject_id' or 'visit_id') to connect to the node node : arcana.Node The node to connect the input to node_input : str Name of the input on the node to connect the fileset spec to format : FileFormat | None The file format the input is expected in. If it differs from the format in data spec or of study input then an implicit conversion is performed. If None the file format in the data spec is assumed """ if spec_name in self.study.ITERFIELDS: self._iterator_conns[spec_name].append((node, node_input, format)) else: name = self._map_name(spec_name, self._input_map) if name not in self.study.data_spec_names(): raise ArcanaDesignError( "Proposed input '{}' to {} is not a valid spec name ('{}')" .format(name, self._error_msg_loc, "', '".join(self.study.data_spec_names()))) self._input_conns[name].append((node, node_input, format, kwargs))
python
def connect_input(self, spec_name, node, node_input, format=None, **kwargs): # @ReservedAssignment @IgnorePep8 """ Connects a study fileset_spec as an input to the provided node Parameters ---------- spec_name : str Name of the study data spec (or one of the IDs from the iterator nodes, 'subject_id' or 'visit_id') to connect to the node node : arcana.Node The node to connect the input to node_input : str Name of the input on the node to connect the fileset spec to format : FileFormat | None The file format the input is expected in. If it differs from the format in data spec or of study input then an implicit conversion is performed. If None the file format in the data spec is assumed """ if spec_name in self.study.ITERFIELDS: self._iterator_conns[spec_name].append((node, node_input, format)) else: name = self._map_name(spec_name, self._input_map) if name not in self.study.data_spec_names(): raise ArcanaDesignError( "Proposed input '{}' to {} is not a valid spec name ('{}')" .format(name, self._error_msg_loc, "', '".join(self.study.data_spec_names()))) self._input_conns[name].append((node, node_input, format, kwargs))
[ "def", "connect_input", "(", "self", ",", "spec_name", ",", "node", ",", "node_input", ",", "format", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# @ReservedAssignment @IgnorePep8", "if", "spec_name", "in", "self", ".", "study", ".", "ITERFIELDS", ":", "self", ".", "_iterator_conns", "[", "spec_name", "]", ".", "append", "(", "(", "node", ",", "node_input", ",", "format", ")", ")", "else", ":", "name", "=", "self", ".", "_map_name", "(", "spec_name", ",", "self", ".", "_input_map", ")", "if", "name", "not", "in", "self", ".", "study", ".", "data_spec_names", "(", ")", ":", "raise", "ArcanaDesignError", "(", "\"Proposed input '{}' to {} is not a valid spec name ('{}')\"", ".", "format", "(", "name", ",", "self", ".", "_error_msg_loc", ",", "\"', '\"", ".", "join", "(", "self", ".", "study", ".", "data_spec_names", "(", ")", ")", ")", ")", "self", ".", "_input_conns", "[", "name", "]", ".", "append", "(", "(", "node", ",", "node_input", ",", "format", ",", "kwargs", ")", ")" ]
Connects a study fileset_spec as an input to the provided node Parameters ---------- spec_name : str Name of the study data spec (or one of the IDs from the iterator nodes, 'subject_id' or 'visit_id') to connect to the node node : arcana.Node The node to connect the input to node_input : str Name of the input on the node to connect the fileset spec to format : FileFormat | None The file format the input is expected in. If it differs from the format in data spec or of study input then an implicit conversion is performed. If None the file format in the data spec is assumed
[ "Connects", "a", "study", "fileset_spec", "as", "an", "input", "to", "the", "provided", "node" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L283-L311
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.connect_output
def connect_output(self, spec_name, node, node_output, format=None, # @ReservedAssignment @IgnorePep8 **kwargs): """ Connects an output to a study fileset spec Parameters ---------- spec_name : str Name of the study fileset spec to connect to node : arcana.Node The node to connect the output from node_output : str Name of the output on the node to connect to the fileset format : FileFormat | None The file format the output is returned in. If it differs from the format in data spec then an implicit conversion is performed. If None the it is assumed to be returned in the file format of the entry the data spec """ name = self._map_name(spec_name, self._output_map) if name not in self.study.data_spec_names(): raise ArcanaDesignError( "Proposed output '{}' to {} is not a valid spec name ('{}')" .format(name, self._error_msg_loc, "', '".join(self.study.data_spec_names()))) if name in self._output_conns: prev_node, prev_node_output, _, _ = self._output_conns[name] logger.info( "Reassigning '{}' output from {}:{} to {}:{} in {}" .format(name, prev_node.name, prev_node_output, node.name, node_output, self._error_msg_loc)) self._output_conns[name] = (node, node_output, format, kwargs)
python
def connect_output(self, spec_name, node, node_output, format=None, # @ReservedAssignment @IgnorePep8 **kwargs): """ Connects an output to a study fileset spec Parameters ---------- spec_name : str Name of the study fileset spec to connect to node : arcana.Node The node to connect the output from node_output : str Name of the output on the node to connect to the fileset format : FileFormat | None The file format the output is returned in. If it differs from the format in data spec then an implicit conversion is performed. If None the it is assumed to be returned in the file format of the entry the data spec """ name = self._map_name(spec_name, self._output_map) if name not in self.study.data_spec_names(): raise ArcanaDesignError( "Proposed output '{}' to {} is not a valid spec name ('{}')" .format(name, self._error_msg_loc, "', '".join(self.study.data_spec_names()))) if name in self._output_conns: prev_node, prev_node_output, _, _ = self._output_conns[name] logger.info( "Reassigning '{}' output from {}:{} to {}:{} in {}" .format(name, prev_node.name, prev_node_output, node.name, node_output, self._error_msg_loc)) self._output_conns[name] = (node, node_output, format, kwargs)
[ "def", "connect_output", "(", "self", ",", "spec_name", ",", "node", ",", "node_output", ",", "format", "=", "None", ",", "# @ReservedAssignment @IgnorePep8", "*", "*", "kwargs", ")", ":", "name", "=", "self", ".", "_map_name", "(", "spec_name", ",", "self", ".", "_output_map", ")", "if", "name", "not", "in", "self", ".", "study", ".", "data_spec_names", "(", ")", ":", "raise", "ArcanaDesignError", "(", "\"Proposed output '{}' to {} is not a valid spec name ('{}')\"", ".", "format", "(", "name", ",", "self", ".", "_error_msg_loc", ",", "\"', '\"", ".", "join", "(", "self", ".", "study", ".", "data_spec_names", "(", ")", ")", ")", ")", "if", "name", "in", "self", ".", "_output_conns", ":", "prev_node", ",", "prev_node_output", ",", "_", ",", "_", "=", "self", ".", "_output_conns", "[", "name", "]", "logger", ".", "info", "(", "\"Reassigning '{}' output from {}:{} to {}:{} in {}\"", ".", "format", "(", "name", ",", "prev_node", ".", "name", ",", "prev_node_output", ",", "node", ".", "name", ",", "node_output", ",", "self", ".", "_error_msg_loc", ")", ")", "self", ".", "_output_conns", "[", "name", "]", "=", "(", "node", ",", "node_output", ",", "format", ",", "kwargs", ")" ]
Connects an output to a study fileset spec Parameters ---------- spec_name : str Name of the study fileset spec to connect to node : arcana.Node The node to connect the output from node_output : str Name of the output on the node to connect to the fileset format : FileFormat | None The file format the output is returned in. If it differs from the format in data spec then an implicit conversion is performed. If None the it is assumed to be returned in the file format of the entry the data spec
[ "Connects", "an", "output", "to", "a", "study", "fileset", "spec" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L313-L344
MonashBI/arcana
arcana/pipeline/base.py
Pipeline._map_name
def _map_name(self, name, mapper): """ Maps a spec name to a new value based on the provided mapper """ if mapper is not None: if isinstance(mapper, basestring): name = mapper + name try: name = mapper[name] except KeyError: pass return name
python
def _map_name(self, name, mapper): """ Maps a spec name to a new value based on the provided mapper """ if mapper is not None: if isinstance(mapper, basestring): name = mapper + name try: name = mapper[name] except KeyError: pass return name
[ "def", "_map_name", "(", "self", ",", "name", ",", "mapper", ")", ":", "if", "mapper", "is", "not", "None", ":", "if", "isinstance", "(", "mapper", ",", "basestring", ")", ":", "name", "=", "mapper", "+", "name", "try", ":", "name", "=", "mapper", "[", "name", "]", "except", "KeyError", ":", "pass", "return", "name" ]
Maps a spec name to a new value based on the provided mapper
[ "Maps", "a", "spec", "name", "to", "a", "new", "value", "based", "on", "the", "provided", "mapper" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L346-L357
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.requires_conversion
def requires_conversion(cls, fileset, file_format): """Checks whether the fileset matches the requested file format""" if file_format is None: return False try: filset_format = fileset.format except AttributeError: return False # Field input else: return (file_format != filset_format)
python
def requires_conversion(cls, fileset, file_format): """Checks whether the fileset matches the requested file format""" if file_format is None: return False try: filset_format = fileset.format except AttributeError: return False # Field input else: return (file_format != filset_format)
[ "def", "requires_conversion", "(", "cls", ",", "fileset", ",", "file_format", ")", ":", "if", "file_format", "is", "None", ":", "return", "False", "try", ":", "filset_format", "=", "fileset", ".", "format", "except", "AttributeError", ":", "return", "False", "# Field input", "else", ":", "return", "(", "file_format", "!=", "filset_format", ")" ]
Checks whether the fileset matches the requested file format
[ "Checks", "whether", "the", "fileset", "matches", "the", "requested", "file", "format" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L449-L458
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.save_graph
def save_graph(self, fname, style='flat', format='png', **kwargs): # @ReservedAssignment @IgnorePep8 """ Saves a graph of the pipeline to file Parameters ---------- fname : str The filename for the saved graph style : str The style of the graph, can be one of can be one of 'orig', 'flat', 'exec', 'hierarchical' plot : bool Whether to load and plot the graph after it has been written """ fname = os.path.expanduser(fname) if not fname.endswith('.png'): fname += '.png' orig_dir = os.getcwd() tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) workflow = self._workflow workflow.write_graph(graph2use=style, format=format, **kwargs) os.chdir(orig_dir) try: shutil.move(os.path.join(tmpdir, 'graph_detailed.{}' .format(format)), fname) except IOError as e: if e.errno == errno.ENOENT: shutil.move(os.path.join(tmpdir, 'graph.{}'.format(format)), fname) else: raise shutil.rmtree(tmpdir)
python
def save_graph(self, fname, style='flat', format='png', **kwargs): # @ReservedAssignment @IgnorePep8 """ Saves a graph of the pipeline to file Parameters ---------- fname : str The filename for the saved graph style : str The style of the graph, can be one of can be one of 'orig', 'flat', 'exec', 'hierarchical' plot : bool Whether to load and plot the graph after it has been written """ fname = os.path.expanduser(fname) if not fname.endswith('.png'): fname += '.png' orig_dir = os.getcwd() tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) workflow = self._workflow workflow.write_graph(graph2use=style, format=format, **kwargs) os.chdir(orig_dir) try: shutil.move(os.path.join(tmpdir, 'graph_detailed.{}' .format(format)), fname) except IOError as e: if e.errno == errno.ENOENT: shutil.move(os.path.join(tmpdir, 'graph.{}'.format(format)), fname) else: raise shutil.rmtree(tmpdir)
[ "def", "save_graph", "(", "self", ",", "fname", ",", "style", "=", "'flat'", ",", "format", "=", "'png'", ",", "*", "*", "kwargs", ")", ":", "# @ReservedAssignment @IgnorePep8", "fname", "=", "os", ".", "path", ".", "expanduser", "(", "fname", ")", "if", "not", "fname", ".", "endswith", "(", "'.png'", ")", ":", "fname", "+=", "'.png'", "orig_dir", "=", "os", ".", "getcwd", "(", ")", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "os", ".", "chdir", "(", "tmpdir", ")", "workflow", "=", "self", ".", "_workflow", "workflow", ".", "write_graph", "(", "graph2use", "=", "style", ",", "format", "=", "format", ",", "*", "*", "kwargs", ")", "os", ".", "chdir", "(", "orig_dir", ")", "try", ":", "shutil", ".", "move", "(", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "'graph_detailed.{}'", ".", "format", "(", "format", ")", ")", ",", "fname", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "shutil", ".", "move", "(", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "'graph.{}'", ".", "format", "(", "format", ")", ")", ",", "fname", ")", "else", ":", "raise", "shutil", ".", "rmtree", "(", "tmpdir", ")" ]
Saves a graph of the pipeline to file Parameters ---------- fname : str The filename for the saved graph style : str The style of the graph, can be one of can be one of 'orig', 'flat', 'exec', 'hierarchical' plot : bool Whether to load and plot the graph after it has been written
[ "Saves", "a", "graph", "of", "the", "pipeline", "to", "file" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L473-L505
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.iterators
def iterators(self, frequency=None): """ Returns the iterators (i.e. subject_id, visit_id) that the pipeline iterates over Parameters ---------- frequency : str | None A selected data frequency to use to determine which iterators are required. If None, all input frequencies of the pipeline are assumed """ iterators = set() if frequency is None: input_freqs = list(self.input_frequencies) else: input_freqs = [frequency] for freq in input_freqs: iterators.update(self.study.FREQUENCIES[freq]) return iterators
python
def iterators(self, frequency=None): """ Returns the iterators (i.e. subject_id, visit_id) that the pipeline iterates over Parameters ---------- frequency : str | None A selected data frequency to use to determine which iterators are required. If None, all input frequencies of the pipeline are assumed """ iterators = set() if frequency is None: input_freqs = list(self.input_frequencies) else: input_freqs = [frequency] for freq in input_freqs: iterators.update(self.study.FREQUENCIES[freq]) return iterators
[ "def", "iterators", "(", "self", ",", "frequency", "=", "None", ")", ":", "iterators", "=", "set", "(", ")", "if", "frequency", "is", "None", ":", "input_freqs", "=", "list", "(", "self", ".", "input_frequencies", ")", "else", ":", "input_freqs", "=", "[", "frequency", "]", "for", "freq", "in", "input_freqs", ":", "iterators", ".", "update", "(", "self", ".", "study", ".", "FREQUENCIES", "[", "freq", "]", ")", "return", "iterators" ]
Returns the iterators (i.e. subject_id, visit_id) that the pipeline iterates over Parameters ---------- frequency : str | None A selected data frequency to use to determine which iterators are required. If None, all input frequencies of the pipeline are assumed
[ "Returns", "the", "iterators", "(", "i", ".", "e", ".", "subject_id", "visit_id", ")", "that", "the", "pipeline", "iterates", "over" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L507-L526
MonashBI/arcana
arcana/pipeline/base.py
Pipeline._unwrap_maps
def _unwrap_maps(self, name_maps, name, study=None, **inner_maps): """ Unwraps potentially nested name-mapping dictionaries to get values for name, input_map, output_map and study. Unsed in __init__. Parameters ---------- name_maps : dict A dictionary containing the name_maps to apply to the values name : str Name passed from inner pipeline constructor study : Study The study to bind the pipeline to. Will be overridden by any values in the mods dict inner_maps : dict[str, dict[str,str]] input and output maps from inner pipeline constructors Returns ------- name : str Potentially modified name of the pipeline study : Study Potentially modified study maps : dict[str, dict[str,str]] Potentially modifed input and output maps """ # Set values of name and study name = name_maps.get('name', name) name = name_maps.get('prefix', '') + name study = name_maps.get('study', study) # Flatten input and output maps, combining maps from inner nests with # those in the "mods" dictionary maps = {} for mtype in ('input_map', 'output_map'): try: inner_map = inner_maps[mtype] except KeyError: try: maps[mtype] = name_maps[mtype] # Only outer map except KeyError: pass # No maps else: try: outer_map = name_maps[mtype] except KeyError: maps[mtype] = inner_map # Only inner map else: # Work through different combinations of inner and outer # map types (i.e. str & str, str & dict, dict & str, and # dict & dict) and combine into a single map if isinstance(outer_map, basestring): if isinstance(inner_map, basestring): # Concatenate prefixes maps[mtype] = outer_map + inner_map elif isinstance(inner_map, dict): # Add outer_map prefix to all values in inner map # dictionary maps[mtype] = {k: outer_map + v for k, v in inner_map.items()} else: raise ArcanaDesignError( "Unrecognised type for name map in '{}' " "pipeline can be str or dict[str,str]: {}" .format(name, inner_map)) elif isinstance(outer_map, dict): if isinstance(inner_map, basestring): # Strip inner map prefix from outer dictionary # (which should have prefix included). This should # be an unlikely case I imagine maps[mtype] = {k[len(inner_map):]: v for k, v in outer_map.items()} elif isinstance(inner_map, dict): # Chain outer_map dictionary to inner map # dictionary maps[mtype] = deepcopy(outer_map) maps[mtype].update( {k: outer_map.get(v, v) for k, v in inner_map.items()}) else: raise ArcanaDesignError( "Unrecognised type for name map in '{}' " "pipeline can be str or dict[str,str]: {}" .format(name, inner_map)) else: raise ArcanaDesignError( "Unrecognised type for name map in '{}' " "pipeline can be str or dict[str,str]: {}" .format(name, outer_map)) try: outer_maps = name_maps['name_maps'] except KeyError: pass else: name, study, maps = self._unwrap_maps( outer_maps, name=name, study=study, **maps) return name, study, maps
python
def _unwrap_maps(self, name_maps, name, study=None, **inner_maps): """ Unwraps potentially nested name-mapping dictionaries to get values for name, input_map, output_map and study. Unsed in __init__. Parameters ---------- name_maps : dict A dictionary containing the name_maps to apply to the values name : str Name passed from inner pipeline constructor study : Study The study to bind the pipeline to. Will be overridden by any values in the mods dict inner_maps : dict[str, dict[str,str]] input and output maps from inner pipeline constructors Returns ------- name : str Potentially modified name of the pipeline study : Study Potentially modified study maps : dict[str, dict[str,str]] Potentially modifed input and output maps """ # Set values of name and study name = name_maps.get('name', name) name = name_maps.get('prefix', '') + name study = name_maps.get('study', study) # Flatten input and output maps, combining maps from inner nests with # those in the "mods" dictionary maps = {} for mtype in ('input_map', 'output_map'): try: inner_map = inner_maps[mtype] except KeyError: try: maps[mtype] = name_maps[mtype] # Only outer map except KeyError: pass # No maps else: try: outer_map = name_maps[mtype] except KeyError: maps[mtype] = inner_map # Only inner map else: # Work through different combinations of inner and outer # map types (i.e. str & str, str & dict, dict & str, and # dict & dict) and combine into a single map if isinstance(outer_map, basestring): if isinstance(inner_map, basestring): # Concatenate prefixes maps[mtype] = outer_map + inner_map elif isinstance(inner_map, dict): # Add outer_map prefix to all values in inner map # dictionary maps[mtype] = {k: outer_map + v for k, v in inner_map.items()} else: raise ArcanaDesignError( "Unrecognised type for name map in '{}' " "pipeline can be str or dict[str,str]: {}" .format(name, inner_map)) elif isinstance(outer_map, dict): if isinstance(inner_map, basestring): # Strip inner map prefix from outer dictionary # (which should have prefix included). This should # be an unlikely case I imagine maps[mtype] = {k[len(inner_map):]: v for k, v in outer_map.items()} elif isinstance(inner_map, dict): # Chain outer_map dictionary to inner map # dictionary maps[mtype] = deepcopy(outer_map) maps[mtype].update( {k: outer_map.get(v, v) for k, v in inner_map.items()}) else: raise ArcanaDesignError( "Unrecognised type for name map in '{}' " "pipeline can be str or dict[str,str]: {}" .format(name, inner_map)) else: raise ArcanaDesignError( "Unrecognised type for name map in '{}' " "pipeline can be str or dict[str,str]: {}" .format(name, outer_map)) try: outer_maps = name_maps['name_maps'] except KeyError: pass else: name, study, maps = self._unwrap_maps( outer_maps, name=name, study=study, **maps) return name, study, maps
[ "def", "_unwrap_maps", "(", "self", ",", "name_maps", ",", "name", ",", "study", "=", "None", ",", "*", "*", "inner_maps", ")", ":", "# Set values of name and study", "name", "=", "name_maps", ".", "get", "(", "'name'", ",", "name", ")", "name", "=", "name_maps", ".", "get", "(", "'prefix'", ",", "''", ")", "+", "name", "study", "=", "name_maps", ".", "get", "(", "'study'", ",", "study", ")", "# Flatten input and output maps, combining maps from inner nests with", "# those in the \"mods\" dictionary", "maps", "=", "{", "}", "for", "mtype", "in", "(", "'input_map'", ",", "'output_map'", ")", ":", "try", ":", "inner_map", "=", "inner_maps", "[", "mtype", "]", "except", "KeyError", ":", "try", ":", "maps", "[", "mtype", "]", "=", "name_maps", "[", "mtype", "]", "# Only outer map", "except", "KeyError", ":", "pass", "# No maps", "else", ":", "try", ":", "outer_map", "=", "name_maps", "[", "mtype", "]", "except", "KeyError", ":", "maps", "[", "mtype", "]", "=", "inner_map", "# Only inner map", "else", ":", "# Work through different combinations of inner and outer", "# map types (i.e. str & str, str & dict, dict & str, and", "# dict & dict) and combine into a single map", "if", "isinstance", "(", "outer_map", ",", "basestring", ")", ":", "if", "isinstance", "(", "inner_map", ",", "basestring", ")", ":", "# Concatenate prefixes", "maps", "[", "mtype", "]", "=", "outer_map", "+", "inner_map", "elif", "isinstance", "(", "inner_map", ",", "dict", ")", ":", "# Add outer_map prefix to all values in inner map", "# dictionary", "maps", "[", "mtype", "]", "=", "{", "k", ":", "outer_map", "+", "v", "for", "k", ",", "v", "in", "inner_map", ".", "items", "(", ")", "}", "else", ":", "raise", "ArcanaDesignError", "(", "\"Unrecognised type for name map in '{}' \"", "\"pipeline can be str or dict[str,str]: {}\"", ".", "format", "(", "name", ",", "inner_map", ")", ")", "elif", "isinstance", "(", "outer_map", ",", "dict", ")", ":", "if", "isinstance", "(", "inner_map", ",", "basestring", ")", ":", "# Strip inner map prefix from outer dictionary", "# (which should have prefix included). This should", "# be an unlikely case I imagine", "maps", "[", "mtype", "]", "=", "{", "k", "[", "len", "(", "inner_map", ")", ":", "]", ":", "v", "for", "k", ",", "v", "in", "outer_map", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "inner_map", ",", "dict", ")", ":", "# Chain outer_map dictionary to inner map", "# dictionary", "maps", "[", "mtype", "]", "=", "deepcopy", "(", "outer_map", ")", "maps", "[", "mtype", "]", ".", "update", "(", "{", "k", ":", "outer_map", ".", "get", "(", "v", ",", "v", ")", "for", "k", ",", "v", "in", "inner_map", ".", "items", "(", ")", "}", ")", "else", ":", "raise", "ArcanaDesignError", "(", "\"Unrecognised type for name map in '{}' \"", "\"pipeline can be str or dict[str,str]: {}\"", ".", "format", "(", "name", ",", "inner_map", ")", ")", "else", ":", "raise", "ArcanaDesignError", "(", "\"Unrecognised type for name map in '{}' \"", "\"pipeline can be str or dict[str,str]: {}\"", ".", "format", "(", "name", ",", "outer_map", ")", ")", "try", ":", "outer_maps", "=", "name_maps", "[", "'name_maps'", "]", "except", "KeyError", ":", "pass", "else", ":", "name", ",", "study", ",", "maps", "=", "self", ".", "_unwrap_maps", "(", "outer_maps", ",", "name", "=", "name", ",", "study", "=", "study", ",", "*", "*", "maps", ")", "return", "name", ",", "study", ",", "maps" ]
Unwraps potentially nested name-mapping dictionaries to get values for name, input_map, output_map and study. Unsed in __init__. Parameters ---------- name_maps : dict A dictionary containing the name_maps to apply to the values name : str Name passed from inner pipeline constructor study : Study The study to bind the pipeline to. Will be overridden by any values in the mods dict inner_maps : dict[str, dict[str,str]] input and output maps from inner pipeline constructors Returns ------- name : str Potentially modified name of the pipeline study : Study Potentially modified study maps : dict[str, dict[str,str]] Potentially modifed input and output maps
[ "Unwraps", "potentially", "nested", "name", "-", "mapping", "dictionaries", "to", "get", "values", "for", "name", "input_map", "output_map", "and", "study", ".", "Unsed", "in", "__init__", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L542-L637
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.cap
def cap(self): """ "Caps" the construction of the pipeline, signifying that no more inputs and outputs are expected to be added and therefore the input and output nodes can be created along with the provenance. """ to_cap = (self._inputnodes, self._outputnodes, self._prov) if to_cap == (None, None, None): self._inputnodes = { f: self._make_inputnode(f) for f in self.input_frequencies} self._outputnodes = { f: self._make_outputnode(f) for f in self.output_frequencies} self._prov = self._gen_prov() elif None in to_cap: raise ArcanaError( "If one of _inputnodes, _outputnodes or _prov is not None then" " they all should be in {}".format(self))
python
def cap(self): """ "Caps" the construction of the pipeline, signifying that no more inputs and outputs are expected to be added and therefore the input and output nodes can be created along with the provenance. """ to_cap = (self._inputnodes, self._outputnodes, self._prov) if to_cap == (None, None, None): self._inputnodes = { f: self._make_inputnode(f) for f in self.input_frequencies} self._outputnodes = { f: self._make_outputnode(f) for f in self.output_frequencies} self._prov = self._gen_prov() elif None in to_cap: raise ArcanaError( "If one of _inputnodes, _outputnodes or _prov is not None then" " they all should be in {}".format(self))
[ "def", "cap", "(", "self", ")", ":", "to_cap", "=", "(", "self", ".", "_inputnodes", ",", "self", ".", "_outputnodes", ",", "self", ".", "_prov", ")", "if", "to_cap", "==", "(", "None", ",", "None", ",", "None", ")", ":", "self", ".", "_inputnodes", "=", "{", "f", ":", "self", ".", "_make_inputnode", "(", "f", ")", "for", "f", "in", "self", ".", "input_frequencies", "}", "self", ".", "_outputnodes", "=", "{", "f", ":", "self", ".", "_make_outputnode", "(", "f", ")", "for", "f", "in", "self", ".", "output_frequencies", "}", "self", ".", "_prov", "=", "self", ".", "_gen_prov", "(", ")", "elif", "None", "in", "to_cap", ":", "raise", "ArcanaError", "(", "\"If one of _inputnodes, _outputnodes or _prov is not None then\"", "\" they all should be in {}\"", ".", "format", "(", "self", ")", ")" ]
"Caps" the construction of the pipeline, signifying that no more inputs and outputs are expected to be added and therefore the input and output nodes can be created along with the provenance.
[ "Caps", "the", "construction", "of", "the", "pipeline", "signifying", "that", "no", "more", "inputs", "and", "outputs", "are", "expected", "to", "be", "added", "and", "therefore", "the", "input", "and", "output", "nodes", "can", "be", "created", "along", "with", "the", "provenance", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L694-L710
MonashBI/arcana
arcana/pipeline/base.py
Pipeline._make_inputnode
def _make_inputnode(self, frequency): """ Generates an input node for the given frequency. It also adds implicit file format conversion nodes to the pipeline. Parameters ---------- frequency : str The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or 'per_study') of the input node to retrieve """ # Check to see whether there are any outputs for the given frequency inputs = list(self.frequency_inputs(frequency)) # Get list of input names for the requested frequency, addding fields # to hold iterator IDs input_names = [i.name for i in inputs] input_names.extend(self.study.FREQUENCIES[frequency]) if not input_names: raise ArcanaError( "No inputs to '{}' pipeline for requested freqency '{}'" .format(self.name, frequency)) # Generate input node and connect it to appropriate nodes inputnode = self.add('{}_inputnode'.format(frequency), IdentityInterface(fields=input_names)) # Loop through list of nodes connected to study data specs and # connect them to the newly created input node for input in inputs: # @ReservedAssignment # Keep track of previous conversion nodes to avoid replicating the # conversion for inputs that are used in multiple places prev_conv_nodes = {} for (node, node_in, format, # @ReservedAssignment @IgnorePep8 conv_kwargs) in self._input_conns[input.name]: # If fileset formats differ between study and pipeline # inputs create converter node (if one hasn't been already) # and connect input to that before connecting to inputnode if self.requires_conversion(input, format): try: conv = format.converter_from(input.format, **conv_kwargs) except ArcanaNoConverterError as e: e.msg += ( "which is required to convert '{}' from {} to {} " "for '{}' input of '{}' node".format( input.name, input.format, format, node_in, node.name)) raise e try: in_node = prev_conv_nodes[format.name] except KeyError: in_node = prev_conv_nodes[format.name] = self.add( 'conv_{}_to_{}_format'.format(input.name, format.name), conv.interface, inputs={conv.input: (inputnode, input.name)}, requirements=conv.requirements, mem_gb=conv.mem_gb, wall_time=conv.wall_time) in_node_out = conv.output else: in_node = inputnode in_node_out = input.name self.connect(in_node, in_node_out, node, node_in) # Connect iterator inputs for iterator, conns in self._iterator_conns.items(): # Check to see if this is the right frequency for the iterator # input, i.e. if it is the only iterator for this frequency if self.study.FREQUENCIES[frequency] == (iterator,): for (node, node_in, format) in conns: # @ReservedAssignment self.connect(inputnode, iterator, node, node_in) return inputnode
python
def _make_inputnode(self, frequency): """ Generates an input node for the given frequency. It also adds implicit file format conversion nodes to the pipeline. Parameters ---------- frequency : str The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or 'per_study') of the input node to retrieve """ # Check to see whether there are any outputs for the given frequency inputs = list(self.frequency_inputs(frequency)) # Get list of input names for the requested frequency, addding fields # to hold iterator IDs input_names = [i.name for i in inputs] input_names.extend(self.study.FREQUENCIES[frequency]) if not input_names: raise ArcanaError( "No inputs to '{}' pipeline for requested freqency '{}'" .format(self.name, frequency)) # Generate input node and connect it to appropriate nodes inputnode = self.add('{}_inputnode'.format(frequency), IdentityInterface(fields=input_names)) # Loop through list of nodes connected to study data specs and # connect them to the newly created input node for input in inputs: # @ReservedAssignment # Keep track of previous conversion nodes to avoid replicating the # conversion for inputs that are used in multiple places prev_conv_nodes = {} for (node, node_in, format, # @ReservedAssignment @IgnorePep8 conv_kwargs) in self._input_conns[input.name]: # If fileset formats differ between study and pipeline # inputs create converter node (if one hasn't been already) # and connect input to that before connecting to inputnode if self.requires_conversion(input, format): try: conv = format.converter_from(input.format, **conv_kwargs) except ArcanaNoConverterError as e: e.msg += ( "which is required to convert '{}' from {} to {} " "for '{}' input of '{}' node".format( input.name, input.format, format, node_in, node.name)) raise e try: in_node = prev_conv_nodes[format.name] except KeyError: in_node = prev_conv_nodes[format.name] = self.add( 'conv_{}_to_{}_format'.format(input.name, format.name), conv.interface, inputs={conv.input: (inputnode, input.name)}, requirements=conv.requirements, mem_gb=conv.mem_gb, wall_time=conv.wall_time) in_node_out = conv.output else: in_node = inputnode in_node_out = input.name self.connect(in_node, in_node_out, node, node_in) # Connect iterator inputs for iterator, conns in self._iterator_conns.items(): # Check to see if this is the right frequency for the iterator # input, i.e. if it is the only iterator for this frequency if self.study.FREQUENCIES[frequency] == (iterator,): for (node, node_in, format) in conns: # @ReservedAssignment self.connect(inputnode, iterator, node, node_in) return inputnode
[ "def", "_make_inputnode", "(", "self", ",", "frequency", ")", ":", "# Check to see whether there are any outputs for the given frequency", "inputs", "=", "list", "(", "self", ".", "frequency_inputs", "(", "frequency", ")", ")", "# Get list of input names for the requested frequency, addding fields", "# to hold iterator IDs", "input_names", "=", "[", "i", ".", "name", "for", "i", "in", "inputs", "]", "input_names", ".", "extend", "(", "self", ".", "study", ".", "FREQUENCIES", "[", "frequency", "]", ")", "if", "not", "input_names", ":", "raise", "ArcanaError", "(", "\"No inputs to '{}' pipeline for requested freqency '{}'\"", ".", "format", "(", "self", ".", "name", ",", "frequency", ")", ")", "# Generate input node and connect it to appropriate nodes", "inputnode", "=", "self", ".", "add", "(", "'{}_inputnode'", ".", "format", "(", "frequency", ")", ",", "IdentityInterface", "(", "fields", "=", "input_names", ")", ")", "# Loop through list of nodes connected to study data specs and", "# connect them to the newly created input node", "for", "input", "in", "inputs", ":", "# @ReservedAssignment", "# Keep track of previous conversion nodes to avoid replicating the", "# conversion for inputs that are used in multiple places", "prev_conv_nodes", "=", "{", "}", "for", "(", "node", ",", "node_in", ",", "format", ",", "# @ReservedAssignment @IgnorePep8", "conv_kwargs", ")", "in", "self", ".", "_input_conns", "[", "input", ".", "name", "]", ":", "# If fileset formats differ between study and pipeline", "# inputs create converter node (if one hasn't been already)", "# and connect input to that before connecting to inputnode", "if", "self", ".", "requires_conversion", "(", "input", ",", "format", ")", ":", "try", ":", "conv", "=", "format", ".", "converter_from", "(", "input", ".", "format", ",", "*", "*", "conv_kwargs", ")", "except", "ArcanaNoConverterError", "as", "e", ":", "e", ".", "msg", "+=", "(", "\"which is required to convert '{}' from {} to {} \"", "\"for '{}' input of '{}' node\"", ".", "format", "(", "input", ".", "name", ",", "input", ".", "format", ",", "format", ",", "node_in", ",", "node", ".", "name", ")", ")", "raise", "e", "try", ":", "in_node", "=", "prev_conv_nodes", "[", "format", ".", "name", "]", "except", "KeyError", ":", "in_node", "=", "prev_conv_nodes", "[", "format", ".", "name", "]", "=", "self", ".", "add", "(", "'conv_{}_to_{}_format'", ".", "format", "(", "input", ".", "name", ",", "format", ".", "name", ")", ",", "conv", ".", "interface", ",", "inputs", "=", "{", "conv", ".", "input", ":", "(", "inputnode", ",", "input", ".", "name", ")", "}", ",", "requirements", "=", "conv", ".", "requirements", ",", "mem_gb", "=", "conv", ".", "mem_gb", ",", "wall_time", "=", "conv", ".", "wall_time", ")", "in_node_out", "=", "conv", ".", "output", "else", ":", "in_node", "=", "inputnode", "in_node_out", "=", "input", ".", "name", "self", ".", "connect", "(", "in_node", ",", "in_node_out", ",", "node", ",", "node_in", ")", "# Connect iterator inputs", "for", "iterator", ",", "conns", "in", "self", ".", "_iterator_conns", ".", "items", "(", ")", ":", "# Check to see if this is the right frequency for the iterator", "# input, i.e. if it is the only iterator for this frequency", "if", "self", ".", "study", ".", "FREQUENCIES", "[", "frequency", "]", "==", "(", "iterator", ",", ")", ":", "for", "(", "node", ",", "node_in", ",", "format", ")", "in", "conns", ":", "# @ReservedAssignment", "self", ".", "connect", "(", "inputnode", ",", "iterator", ",", "node", ",", "node_in", ")", "return", "inputnode" ]
Generates an input node for the given frequency. It also adds implicit file format conversion nodes to the pipeline. Parameters ---------- frequency : str The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or 'per_study') of the input node to retrieve
[ "Generates", "an", "input", "node", "for", "the", "given", "frequency", ".", "It", "also", "adds", "implicit", "file", "format", "conversion", "nodes", "to", "the", "pipeline", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L712-L781
MonashBI/arcana
arcana/pipeline/base.py
Pipeline._make_outputnode
def _make_outputnode(self, frequency): """ Generates an output node for the given frequency. It also adds implicit file format conversion nodes to the pipeline. Parameters ---------- frequency : str The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or 'per_study') of the output node to retrieve """ # Check to see whether there are any outputs for the given frequency outputs = list(self.frequency_outputs(frequency)) if not outputs: raise ArcanaError( "No outputs to '{}' pipeline for requested freqency '{}'" .format(self.name, frequency)) # Get list of output names for the requested frequency, addding fields # to hold iterator IDs output_names = [o.name for o in outputs] # Generate output node and connect it to appropriate nodes outputnode = self.add('{}_outputnode'.format(frequency), IdentityInterface(fields=output_names)) # Loop through list of nodes connected to study data specs and # connect them to the newly created output node for output in outputs: # @ReservedAssignment (node, node_out, format, # @ReservedAssignment @IgnorePep8 conv_kwargs) = self._output_conns[output.name] # If fileset formats differ between study and pipeline # outputs create converter node (if one hasn't been already) # and connect output to that before connecting to outputnode if self.requires_conversion(output, format): conv = output.format.converter_from(format, **conv_kwargs) node = self.add( 'conv_{}_from_{}_format'.format(output.name, format.name), conv.interface, inputs={conv.input: (node, node_out)}, requirements=conv.requirements, mem_gb=conv.mem_gb, wall_time=conv.wall_time) node_out = conv.output self.connect(node, node_out, outputnode, output.name) return outputnode
python
def _make_outputnode(self, frequency): """ Generates an output node for the given frequency. It also adds implicit file format conversion nodes to the pipeline. Parameters ---------- frequency : str The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or 'per_study') of the output node to retrieve """ # Check to see whether there are any outputs for the given frequency outputs = list(self.frequency_outputs(frequency)) if not outputs: raise ArcanaError( "No outputs to '{}' pipeline for requested freqency '{}'" .format(self.name, frequency)) # Get list of output names for the requested frequency, addding fields # to hold iterator IDs output_names = [o.name for o in outputs] # Generate output node and connect it to appropriate nodes outputnode = self.add('{}_outputnode'.format(frequency), IdentityInterface(fields=output_names)) # Loop through list of nodes connected to study data specs and # connect them to the newly created output node for output in outputs: # @ReservedAssignment (node, node_out, format, # @ReservedAssignment @IgnorePep8 conv_kwargs) = self._output_conns[output.name] # If fileset formats differ between study and pipeline # outputs create converter node (if one hasn't been already) # and connect output to that before connecting to outputnode if self.requires_conversion(output, format): conv = output.format.converter_from(format, **conv_kwargs) node = self.add( 'conv_{}_from_{}_format'.format(output.name, format.name), conv.interface, inputs={conv.input: (node, node_out)}, requirements=conv.requirements, mem_gb=conv.mem_gb, wall_time=conv.wall_time) node_out = conv.output self.connect(node, node_out, outputnode, output.name) return outputnode
[ "def", "_make_outputnode", "(", "self", ",", "frequency", ")", ":", "# Check to see whether there are any outputs for the given frequency", "outputs", "=", "list", "(", "self", ".", "frequency_outputs", "(", "frequency", ")", ")", "if", "not", "outputs", ":", "raise", "ArcanaError", "(", "\"No outputs to '{}' pipeline for requested freqency '{}'\"", ".", "format", "(", "self", ".", "name", ",", "frequency", ")", ")", "# Get list of output names for the requested frequency, addding fields", "# to hold iterator IDs", "output_names", "=", "[", "o", ".", "name", "for", "o", "in", "outputs", "]", "# Generate output node and connect it to appropriate nodes", "outputnode", "=", "self", ".", "add", "(", "'{}_outputnode'", ".", "format", "(", "frequency", ")", ",", "IdentityInterface", "(", "fields", "=", "output_names", ")", ")", "# Loop through list of nodes connected to study data specs and", "# connect them to the newly created output node", "for", "output", "in", "outputs", ":", "# @ReservedAssignment", "(", "node", ",", "node_out", ",", "format", ",", "# @ReservedAssignment @IgnorePep8", "conv_kwargs", ")", "=", "self", ".", "_output_conns", "[", "output", ".", "name", "]", "# If fileset formats differ between study and pipeline", "# outputs create converter node (if one hasn't been already)", "# and connect output to that before connecting to outputnode", "if", "self", ".", "requires_conversion", "(", "output", ",", "format", ")", ":", "conv", "=", "output", ".", "format", ".", "converter_from", "(", "format", ",", "*", "*", "conv_kwargs", ")", "node", "=", "self", ".", "add", "(", "'conv_{}_from_{}_format'", ".", "format", "(", "output", ".", "name", ",", "format", ".", "name", ")", ",", "conv", ".", "interface", ",", "inputs", "=", "{", "conv", ".", "input", ":", "(", "node", ",", "node_out", ")", "}", ",", "requirements", "=", "conv", ".", "requirements", ",", "mem_gb", "=", "conv", ".", "mem_gb", ",", "wall_time", "=", "conv", ".", "wall_time", ")", "node_out", "=", "conv", ".", "output", "self", ".", "connect", "(", "node", ",", "node_out", ",", "outputnode", ",", "output", ".", "name", ")", "return", "outputnode" ]
Generates an output node for the given frequency. It also adds implicit file format conversion nodes to the pipeline. Parameters ---------- frequency : str The frequency (i.e. 'per_session', 'per_visit', 'per_subject' or 'per_study') of the output node to retrieve
[ "Generates", "an", "output", "node", "for", "the", "given", "frequency", ".", "It", "also", "adds", "implicit", "file", "format", "conversion", "nodes", "to", "the", "pipeline", "." ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L783-L825
MonashBI/arcana
arcana/pipeline/base.py
Pipeline._gen_prov
def _gen_prov(self): """ Extracts provenance information from the pipeline into a PipelineProv object Returns ------- prov : dict[str, *] A dictionary containing the provenance information to record for the pipeline """ # Export worfklow graph to node-link data format wf_dict = nx_json.node_link_data(self.workflow._graph) # Replace references to Node objects with the node's provenance # information and convert to a dict organised by node name to allow it # to be compared more easily. Also change link node-references from # node index to node ID so it is not dependent on the order the nodes # are written to the dictionary (which for Python < 3.7 is guaranteed # to be the same between identical runs) for link in wf_dict['links']: if int(networkx_version.split('.')[0]) < 2: # @UndefinedVariable link['source'] = wf_dict['nodes'][link['source']]['id'].name link['target'] = wf_dict['nodes'][link['target']]['id'].name else: link['source'] = link['source'].name link['target'] = link['target'].name wf_dict['nodes'] = {n['id'].name: n['id'].prov for n in wf_dict['nodes']} # Roundtrip to JSON to convert any tuples into lists so dictionaries # can be compared directly wf_dict = json.loads(json.dumps(wf_dict)) dependency_versions = {d: extract_package_version(d) for d in ARCANA_DEPENDENCIES} pkg_versions = {'arcana': __version__} pkg_versions.update((k, v) for k, v in dependency_versions.items() if v is not None) prov = { '__prov_version__': PROVENANCE_VERSION, 'name': self.name, 'workflow': wf_dict, 'study': self.study.prov, 'pkg_versions': pkg_versions, 'python_version': sys.version, 'joined_ids': self._joined_ids()} return prov
python
def _gen_prov(self): """ Extracts provenance information from the pipeline into a PipelineProv object Returns ------- prov : dict[str, *] A dictionary containing the provenance information to record for the pipeline """ # Export worfklow graph to node-link data format wf_dict = nx_json.node_link_data(self.workflow._graph) # Replace references to Node objects with the node's provenance # information and convert to a dict organised by node name to allow it # to be compared more easily. Also change link node-references from # node index to node ID so it is not dependent on the order the nodes # are written to the dictionary (which for Python < 3.7 is guaranteed # to be the same between identical runs) for link in wf_dict['links']: if int(networkx_version.split('.')[0]) < 2: # @UndefinedVariable link['source'] = wf_dict['nodes'][link['source']]['id'].name link['target'] = wf_dict['nodes'][link['target']]['id'].name else: link['source'] = link['source'].name link['target'] = link['target'].name wf_dict['nodes'] = {n['id'].name: n['id'].prov for n in wf_dict['nodes']} # Roundtrip to JSON to convert any tuples into lists so dictionaries # can be compared directly wf_dict = json.loads(json.dumps(wf_dict)) dependency_versions = {d: extract_package_version(d) for d in ARCANA_DEPENDENCIES} pkg_versions = {'arcana': __version__} pkg_versions.update((k, v) for k, v in dependency_versions.items() if v is not None) prov = { '__prov_version__': PROVENANCE_VERSION, 'name': self.name, 'workflow': wf_dict, 'study': self.study.prov, 'pkg_versions': pkg_versions, 'python_version': sys.version, 'joined_ids': self._joined_ids()} return prov
[ "def", "_gen_prov", "(", "self", ")", ":", "# Export worfklow graph to node-link data format", "wf_dict", "=", "nx_json", ".", "node_link_data", "(", "self", ".", "workflow", ".", "_graph", ")", "# Replace references to Node objects with the node's provenance", "# information and convert to a dict organised by node name to allow it", "# to be compared more easily. Also change link node-references from", "# node index to node ID so it is not dependent on the order the nodes", "# are written to the dictionary (which for Python < 3.7 is guaranteed", "# to be the same between identical runs)", "for", "link", "in", "wf_dict", "[", "'links'", "]", ":", "if", "int", "(", "networkx_version", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "<", "2", ":", "# @UndefinedVariable", "link", "[", "'source'", "]", "=", "wf_dict", "[", "'nodes'", "]", "[", "link", "[", "'source'", "]", "]", "[", "'id'", "]", ".", "name", "link", "[", "'target'", "]", "=", "wf_dict", "[", "'nodes'", "]", "[", "link", "[", "'target'", "]", "]", "[", "'id'", "]", ".", "name", "else", ":", "link", "[", "'source'", "]", "=", "link", "[", "'source'", "]", ".", "name", "link", "[", "'target'", "]", "=", "link", "[", "'target'", "]", ".", "name", "wf_dict", "[", "'nodes'", "]", "=", "{", "n", "[", "'id'", "]", ".", "name", ":", "n", "[", "'id'", "]", ".", "prov", "for", "n", "in", "wf_dict", "[", "'nodes'", "]", "}", "# Roundtrip to JSON to convert any tuples into lists so dictionaries", "# can be compared directly", "wf_dict", "=", "json", ".", "loads", "(", "json", ".", "dumps", "(", "wf_dict", ")", ")", "dependency_versions", "=", "{", "d", ":", "extract_package_version", "(", "d", ")", "for", "d", "in", "ARCANA_DEPENDENCIES", "}", "pkg_versions", "=", "{", "'arcana'", ":", "__version__", "}", "pkg_versions", ".", "update", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "dependency_versions", ".", "items", "(", ")", "if", "v", "is", "not", "None", ")", "prov", "=", "{", "'__prov_version__'", ":", "PROVENANCE_VERSION", ",", "'name'", ":", "self", ".", "name", ",", "'workflow'", ":", "wf_dict", ",", "'study'", ":", "self", ".", "study", ".", "prov", ",", "'pkg_versions'", ":", "pkg_versions", ",", "'python_version'", ":", "sys", ".", "version", ",", "'joined_ids'", ":", "self", ".", "_joined_ids", "(", ")", "}", "return", "prov" ]
Extracts provenance information from the pipeline into a PipelineProv object Returns ------- prov : dict[str, *] A dictionary containing the provenance information to record for the pipeline
[ "Extracts", "provenance", "information", "from", "the", "pipeline", "into", "a", "PipelineProv", "object" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L827-L871
MonashBI/arcana
arcana/pipeline/base.py
Pipeline.expected_record
def expected_record(self, node): """ Constructs the provenance record that would be saved in the given node if the pipeline was run on the current state of the repository Parameters ---------- node : arcana.repository.tree.TreeNode A node of the Tree representation of the study data stored in the repository (i.e. a Session, Visit, Subject or Tree node) Returns ------- expected_record : arcana.provenance.Record The record that would be produced if the pipeline is run over the study tree. """ exp_inputs = {} # Get checksums/values of all inputs that would have been used in # previous runs of an equivalent pipeline to compare with that saved # in provenance to see if any have been updated. for inpt in self.inputs: # @ReservedAssignment # Get iterators present in the input that aren't in this node # and need to be joined iterators_to_join = (self.iterators(inpt.frequency) - self.iterators(node.frequency)) if not iterators_to_join: # No iterators to join so we can just extract the checksums # of the corresponding input exp_inputs[inpt.name] = inpt.collection.item( node.subject_id, node.visit_id).checksums elif len(iterators_to_join) == 1: # Get list of checksums dicts for each node of the input # frequency that relates to the current node exp_inputs[inpt.name] = [ inpt.collection.item(n.subject_id, n.visit_id).checksums for n in node.nodes(inpt.frequency)] else: # In the case where the node is the whole treee and the input # is per_seession, we need to create a list of lists to match # how the checksums are joined in the processor exp_inputs[inpt.name] = [] for subj in node.subjects: exp_inputs[inpt.name].append([ inpt.collection.item(s.subject_id, s.visit_id).checksums for s in subj.sessions]) # Get checksums/value for all outputs of the pipeline. We are assuming # that they exist here (otherwise they will be None) exp_outputs = { o.name: o.collection.item(node.subject_id, node.visit_id).checksums for o in self.outputs} exp_prov = copy(self.prov) if PY2: # Need to convert to unicode strings for Python 2 exp_inputs = json.loads(json.dumps(exp_inputs)) exp_outputs = json.loads(json.dumps(exp_outputs)) exp_prov['inputs'] = exp_inputs exp_prov['outputs'] = exp_outputs exp_prov['joined_ids'] = self._joined_ids() return Record( self.name, node.frequency, node.subject_id, node.visit_id, self.study.name, exp_prov)
python
def expected_record(self, node): """ Constructs the provenance record that would be saved in the given node if the pipeline was run on the current state of the repository Parameters ---------- node : arcana.repository.tree.TreeNode A node of the Tree representation of the study data stored in the repository (i.e. a Session, Visit, Subject or Tree node) Returns ------- expected_record : arcana.provenance.Record The record that would be produced if the pipeline is run over the study tree. """ exp_inputs = {} # Get checksums/values of all inputs that would have been used in # previous runs of an equivalent pipeline to compare with that saved # in provenance to see if any have been updated. for inpt in self.inputs: # @ReservedAssignment # Get iterators present in the input that aren't in this node # and need to be joined iterators_to_join = (self.iterators(inpt.frequency) - self.iterators(node.frequency)) if not iterators_to_join: # No iterators to join so we can just extract the checksums # of the corresponding input exp_inputs[inpt.name] = inpt.collection.item( node.subject_id, node.visit_id).checksums elif len(iterators_to_join) == 1: # Get list of checksums dicts for each node of the input # frequency that relates to the current node exp_inputs[inpt.name] = [ inpt.collection.item(n.subject_id, n.visit_id).checksums for n in node.nodes(inpt.frequency)] else: # In the case where the node is the whole treee and the input # is per_seession, we need to create a list of lists to match # how the checksums are joined in the processor exp_inputs[inpt.name] = [] for subj in node.subjects: exp_inputs[inpt.name].append([ inpt.collection.item(s.subject_id, s.visit_id).checksums for s in subj.sessions]) # Get checksums/value for all outputs of the pipeline. We are assuming # that they exist here (otherwise they will be None) exp_outputs = { o.name: o.collection.item(node.subject_id, node.visit_id).checksums for o in self.outputs} exp_prov = copy(self.prov) if PY2: # Need to convert to unicode strings for Python 2 exp_inputs = json.loads(json.dumps(exp_inputs)) exp_outputs = json.loads(json.dumps(exp_outputs)) exp_prov['inputs'] = exp_inputs exp_prov['outputs'] = exp_outputs exp_prov['joined_ids'] = self._joined_ids() return Record( self.name, node.frequency, node.subject_id, node.visit_id, self.study.name, exp_prov)
[ "def", "expected_record", "(", "self", ",", "node", ")", ":", "exp_inputs", "=", "{", "}", "# Get checksums/values of all inputs that would have been used in", "# previous runs of an equivalent pipeline to compare with that saved", "# in provenance to see if any have been updated.", "for", "inpt", "in", "self", ".", "inputs", ":", "# @ReservedAssignment", "# Get iterators present in the input that aren't in this node", "# and need to be joined", "iterators_to_join", "=", "(", "self", ".", "iterators", "(", "inpt", ".", "frequency", ")", "-", "self", ".", "iterators", "(", "node", ".", "frequency", ")", ")", "if", "not", "iterators_to_join", ":", "# No iterators to join so we can just extract the checksums", "# of the corresponding input", "exp_inputs", "[", "inpt", ".", "name", "]", "=", "inpt", ".", "collection", ".", "item", "(", "node", ".", "subject_id", ",", "node", ".", "visit_id", ")", ".", "checksums", "elif", "len", "(", "iterators_to_join", ")", "==", "1", ":", "# Get list of checksums dicts for each node of the input", "# frequency that relates to the current node", "exp_inputs", "[", "inpt", ".", "name", "]", "=", "[", "inpt", ".", "collection", ".", "item", "(", "n", ".", "subject_id", ",", "n", ".", "visit_id", ")", ".", "checksums", "for", "n", "in", "node", ".", "nodes", "(", "inpt", ".", "frequency", ")", "]", "else", ":", "# In the case where the node is the whole treee and the input", "# is per_seession, we need to create a list of lists to match", "# how the checksums are joined in the processor", "exp_inputs", "[", "inpt", ".", "name", "]", "=", "[", "]", "for", "subj", "in", "node", ".", "subjects", ":", "exp_inputs", "[", "inpt", ".", "name", "]", ".", "append", "(", "[", "inpt", ".", "collection", ".", "item", "(", "s", ".", "subject_id", ",", "s", ".", "visit_id", ")", ".", "checksums", "for", "s", "in", "subj", ".", "sessions", "]", ")", "# Get checksums/value for all outputs of the pipeline. We are assuming", "# that they exist here (otherwise they will be None)", "exp_outputs", "=", "{", "o", ".", "name", ":", "o", ".", "collection", ".", "item", "(", "node", ".", "subject_id", ",", "node", ".", "visit_id", ")", ".", "checksums", "for", "o", "in", "self", ".", "outputs", "}", "exp_prov", "=", "copy", "(", "self", ".", "prov", ")", "if", "PY2", ":", "# Need to convert to unicode strings for Python 2", "exp_inputs", "=", "json", ".", "loads", "(", "json", ".", "dumps", "(", "exp_inputs", ")", ")", "exp_outputs", "=", "json", ".", "loads", "(", "json", ".", "dumps", "(", "exp_outputs", ")", ")", "exp_prov", "[", "'inputs'", "]", "=", "exp_inputs", "exp_prov", "[", "'outputs'", "]", "=", "exp_outputs", "exp_prov", "[", "'joined_ids'", "]", "=", "self", ".", "_joined_ids", "(", ")", "return", "Record", "(", "self", ".", "name", ",", "node", ".", "frequency", ",", "node", ".", "subject_id", ",", "node", ".", "visit_id", ",", "self", ".", "study", ".", "name", ",", "exp_prov", ")" ]
Constructs the provenance record that would be saved in the given node if the pipeline was run on the current state of the repository Parameters ---------- node : arcana.repository.tree.TreeNode A node of the Tree representation of the study data stored in the repository (i.e. a Session, Visit, Subject or Tree node) Returns ------- expected_record : arcana.provenance.Record The record that would be produced if the pipeline is run over the study tree.
[ "Constructs", "the", "provenance", "record", "that", "would", "be", "saved", "in", "the", "given", "node", "if", "the", "pipeline", "was", "run", "on", "the", "current", "state", "of", "the", "repository" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L873-L935
MonashBI/arcana
arcana/pipeline/base.py
Pipeline._joined_ids
def _joined_ids(self): """ Adds the subjects/visits used to generate the derivatives iff there are any joins over them in the pipeline """ joined_prov = {} if self.joins_subjects: joined_prov['subject_ids'] = list(self.study.subject_ids) if self.joins_visits: joined_prov['visit_ids'] = list(self.study.visit_ids) return joined_prov
python
def _joined_ids(self): """ Adds the subjects/visits used to generate the derivatives iff there are any joins over them in the pipeline """ joined_prov = {} if self.joins_subjects: joined_prov['subject_ids'] = list(self.study.subject_ids) if self.joins_visits: joined_prov['visit_ids'] = list(self.study.visit_ids) return joined_prov
[ "def", "_joined_ids", "(", "self", ")", ":", "joined_prov", "=", "{", "}", "if", "self", ".", "joins_subjects", ":", "joined_prov", "[", "'subject_ids'", "]", "=", "list", "(", "self", ".", "study", ".", "subject_ids", ")", "if", "self", ".", "joins_visits", ":", "joined_prov", "[", "'visit_ids'", "]", "=", "list", "(", "self", ".", "study", ".", "visit_ids", ")", "return", "joined_prov" ]
Adds the subjects/visits used to generate the derivatives iff there are any joins over them in the pipeline
[ "Adds", "the", "subjects", "/", "visits", "used", "to", "generate", "the", "derivatives", "iff", "there", "are", "any", "joins", "over", "them", "in", "the", "pipeline" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/pipeline/base.py#L937-L947
MonashBI/arcana
arcana/repository/base.py
Repository.tree
def tree(self, subject_ids=None, visit_ids=None, **kwargs): """ Return the tree of subject and sessions information within a project in the XNAT repository Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, session and fileset information for the repository """ # Find all data present in the repository (filtered by the passed IDs) return Tree.construct( self, *self.find_data(subject_ids=subject_ids, visit_ids=visit_ids), **kwargs)
python
def tree(self, subject_ids=None, visit_ids=None, **kwargs): """ Return the tree of subject and sessions information within a project in the XNAT repository Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, session and fileset information for the repository """ # Find all data present in the repository (filtered by the passed IDs) return Tree.construct( self, *self.find_data(subject_ids=subject_ids, visit_ids=visit_ids), **kwargs)
[ "def", "tree", "(", "self", ",", "subject_ids", "=", "None", ",", "visit_ids", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Find all data present in the repository (filtered by the passed IDs)", "return", "Tree", ".", "construct", "(", "self", ",", "*", "self", ".", "find_data", "(", "subject_ids", "=", "subject_ids", ",", "visit_ids", "=", "visit_ids", ")", ",", "*", "*", "kwargs", ")" ]
Return the tree of subject and sessions information within a project in the XNAT repository Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, session and fileset information for the repository
[ "Return", "the", "tree", "of", "subject", "and", "sessions", "information", "within", "a", "project", "in", "the", "XNAT", "repository" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/base.py#L167-L190
MonashBI/arcana
arcana/repository/base.py
Repository.cached_tree
def cached_tree(self, subject_ids=None, visit_ids=None, fill=False): """ Access the repository tree and caches it for subsequent accesses Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned fill : bool Create empty sessions for any that are missing in the subject_id x visit_id block. Typically only used if all the inputs to the study are coming from different repositories to the one that the derived products are stored in Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, vist, session information and that of the filesets and fields they contain """ if subject_ids is not None: subject_ids = frozenset(subject_ids) if visit_ids is not None: visit_ids = frozenset(visit_ids) try: tree = self._cache[subject_ids][visit_ids] except KeyError: if fill: fill_subjects = subject_ids fill_visits = visit_ids else: fill_subjects = fill_visits = None tree = self.tree( subject_ids=subject_ids, visit_ids=visit_ids, fill_visits=fill_visits, fill_subjects=fill_subjects) # Save the tree within the cache under the given subject/ # visit ID filters and the IDs that were actually returned self._cache[subject_ids][visit_ids] = self._cache[ frozenset(tree.subject_ids)][frozenset(tree.visit_ids)] = tree return tree
python
def cached_tree(self, subject_ids=None, visit_ids=None, fill=False): """ Access the repository tree and caches it for subsequent accesses Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned fill : bool Create empty sessions for any that are missing in the subject_id x visit_id block. Typically only used if all the inputs to the study are coming from different repositories to the one that the derived products are stored in Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, vist, session information and that of the filesets and fields they contain """ if subject_ids is not None: subject_ids = frozenset(subject_ids) if visit_ids is not None: visit_ids = frozenset(visit_ids) try: tree = self._cache[subject_ids][visit_ids] except KeyError: if fill: fill_subjects = subject_ids fill_visits = visit_ids else: fill_subjects = fill_visits = None tree = self.tree( subject_ids=subject_ids, visit_ids=visit_ids, fill_visits=fill_visits, fill_subjects=fill_subjects) # Save the tree within the cache under the given subject/ # visit ID filters and the IDs that were actually returned self._cache[subject_ids][visit_ids] = self._cache[ frozenset(tree.subject_ids)][frozenset(tree.visit_ids)] = tree return tree
[ "def", "cached_tree", "(", "self", ",", "subject_ids", "=", "None", ",", "visit_ids", "=", "None", ",", "fill", "=", "False", ")", ":", "if", "subject_ids", "is", "not", "None", ":", "subject_ids", "=", "frozenset", "(", "subject_ids", ")", "if", "visit_ids", "is", "not", "None", ":", "visit_ids", "=", "frozenset", "(", "visit_ids", ")", "try", ":", "tree", "=", "self", ".", "_cache", "[", "subject_ids", "]", "[", "visit_ids", "]", "except", "KeyError", ":", "if", "fill", ":", "fill_subjects", "=", "subject_ids", "fill_visits", "=", "visit_ids", "else", ":", "fill_subjects", "=", "fill_visits", "=", "None", "tree", "=", "self", ".", "tree", "(", "subject_ids", "=", "subject_ids", ",", "visit_ids", "=", "visit_ids", ",", "fill_visits", "=", "fill_visits", ",", "fill_subjects", "=", "fill_subjects", ")", "# Save the tree within the cache under the given subject/", "# visit ID filters and the IDs that were actually returned", "self", ".", "_cache", "[", "subject_ids", "]", "[", "visit_ids", "]", "=", "self", ".", "_cache", "[", "frozenset", "(", "tree", ".", "subject_ids", ")", "]", "[", "frozenset", "(", "tree", ".", "visit_ids", ")", "]", "=", "tree", "return", "tree" ]
Access the repository tree and caches it for subsequent accesses Parameters ---------- subject_ids : list(str) List of subject IDs with which to filter the tree with. If None all are returned visit_ids : list(str) List of visit IDs with which to filter the tree with. If None all are returned fill : bool Create empty sessions for any that are missing in the subject_id x visit_id block. Typically only used if all the inputs to the study are coming from different repositories to the one that the derived products are stored in Returns ------- tree : arcana.repository.Tree A hierarchical tree of subject, vist, session information and that of the filesets and fields they contain
[ "Access", "the", "repository", "tree", "and", "caches", "it", "for", "subsequent", "accesses" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/base.py#L192-L236
MonashBI/arcana
arcana/data/file_format.py
FileFormat.resource_names
def resource_names(self, repo_type): """ Names of resources used to store the format on a given repository type. Defaults to the name of the name of the format """ try: names = self._resource_names[repo_type] except KeyError: names = [self.name, self.name.upper()] return names
python
def resource_names(self, repo_type): """ Names of resources used to store the format on a given repository type. Defaults to the name of the name of the format """ try: names = self._resource_names[repo_type] except KeyError: names = [self.name, self.name.upper()] return names
[ "def", "resource_names", "(", "self", ",", "repo_type", ")", ":", "try", ":", "names", "=", "self", ".", "_resource_names", "[", "repo_type", "]", "except", "KeyError", ":", "names", "=", "[", "self", ".", "name", ",", "self", ".", "name", ".", "upper", "(", ")", "]", "return", "names" ]
Names of resources used to store the format on a given repository type. Defaults to the name of the name of the format
[ "Names", "of", "resources", "used", "to", "store", "the", "format", "on", "a", "given", "repository", "type", ".", "Defaults", "to", "the", "name", "of", "the", "name", "of", "the", "format" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/file_format.py#L150-L159
MonashBI/arcana
arcana/data/file_format.py
FileFormat.default_aux_file_paths
def default_aux_file_paths(self, primary_path): """ Get the default paths for auxiliary files relative to the path of the primary file, i.e. the same name as the primary path with a different extension Parameters ---------- primary_path : str Path to the primary file in the fileset Returns ------- aux_paths : dict[str, str] A dictionary of auxiliary file names and default paths """ return dict((n, primary_path[:-len(self.ext)] + ext) for n, ext in self.aux_files.items())
python
def default_aux_file_paths(self, primary_path): """ Get the default paths for auxiliary files relative to the path of the primary file, i.e. the same name as the primary path with a different extension Parameters ---------- primary_path : str Path to the primary file in the fileset Returns ------- aux_paths : dict[str, str] A dictionary of auxiliary file names and default paths """ return dict((n, primary_path[:-len(self.ext)] + ext) for n, ext in self.aux_files.items())
[ "def", "default_aux_file_paths", "(", "self", ",", "primary_path", ")", ":", "return", "dict", "(", "(", "n", ",", "primary_path", "[", ":", "-", "len", "(", "self", ".", "ext", ")", "]", "+", "ext", ")", "for", "n", ",", "ext", "in", "self", ".", "aux_files", ".", "items", "(", ")", ")" ]
Get the default paths for auxiliary files relative to the path of the primary file, i.e. the same name as the primary path with a different extension Parameters ---------- primary_path : str Path to the primary file in the fileset Returns ------- aux_paths : dict[str, str] A dictionary of auxiliary file names and default paths
[ "Get", "the", "default", "paths", "for", "auxiliary", "files", "relative", "to", "the", "path", "of", "the", "primary", "file", "i", ".", "e", ".", "the", "same", "name", "as", "the", "primary", "path", "with", "a", "different", "extension" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/file_format.py#L161-L178
MonashBI/arcana
arcana/data/file_format.py
FileFormat.assort_files
def assort_files(self, candidates): """ Assorts candidate files into primary and auxiliary (and ignored) files corresponding to the format by their file extensions. Can be overridden in specialised subclasses to assort files based on other characteristics Parameters ---------- candidates : list[str] The list of filenames to assort Returns ------- primary_file : str Path to the selected primary file aux_files : dict[str, str] A dictionary mapping the auxiliary file name to the selected path """ by_ext = defaultdict(list) for path in candidates: by_ext[split_extension(path)[1].lower()].append(path) try: primary_file = by_ext[self.ext] except KeyError: raise ArcanaFileFormatError( "No files match primary file extension of {} out of " "potential candidates of {}" .format(self, "', '".join(candidates))) if not primary_file: raise ArcanaFileFormatError( "No potential files for primary file of {}".format(self)) elif len(primary_file) > 1: raise ArcanaFileFormatError( "Multiple potential files for '{}' primary file of {}" .format("', '".join(primary_file), self)) else: primary_file = primary_file[0] aux_files = {} for aux_name, aux_ext in self.aux_files.items(): try: aux = by_ext[aux_ext] except KeyError: raise ArcanaFileFormatError( "No files match auxiliary file extension '{}' of {} out of" " potential candidates of {}" .format(aux_ext, self, "', '".join(candidates))) if len(aux) > 1: raise ArcanaFileFormatError( "Multiple potential files for '{}' auxiliary file ext. " "({}) of {}".format("', '".join(aux), self)) aux_files[aux_name] = aux[0] return primary_file, aux_files
python
def assort_files(self, candidates): """ Assorts candidate files into primary and auxiliary (and ignored) files corresponding to the format by their file extensions. Can be overridden in specialised subclasses to assort files based on other characteristics Parameters ---------- candidates : list[str] The list of filenames to assort Returns ------- primary_file : str Path to the selected primary file aux_files : dict[str, str] A dictionary mapping the auxiliary file name to the selected path """ by_ext = defaultdict(list) for path in candidates: by_ext[split_extension(path)[1].lower()].append(path) try: primary_file = by_ext[self.ext] except KeyError: raise ArcanaFileFormatError( "No files match primary file extension of {} out of " "potential candidates of {}" .format(self, "', '".join(candidates))) if not primary_file: raise ArcanaFileFormatError( "No potential files for primary file of {}".format(self)) elif len(primary_file) > 1: raise ArcanaFileFormatError( "Multiple potential files for '{}' primary file of {}" .format("', '".join(primary_file), self)) else: primary_file = primary_file[0] aux_files = {} for aux_name, aux_ext in self.aux_files.items(): try: aux = by_ext[aux_ext] except KeyError: raise ArcanaFileFormatError( "No files match auxiliary file extension '{}' of {} out of" " potential candidates of {}" .format(aux_ext, self, "', '".join(candidates))) if len(aux) > 1: raise ArcanaFileFormatError( "Multiple potential files for '{}' auxiliary file ext. " "({}) of {}".format("', '".join(aux), self)) aux_files[aux_name] = aux[0] return primary_file, aux_files
[ "def", "assort_files", "(", "self", ",", "candidates", ")", ":", "by_ext", "=", "defaultdict", "(", "list", ")", "for", "path", "in", "candidates", ":", "by_ext", "[", "split_extension", "(", "path", ")", "[", "1", "]", ".", "lower", "(", ")", "]", ".", "append", "(", "path", ")", "try", ":", "primary_file", "=", "by_ext", "[", "self", ".", "ext", "]", "except", "KeyError", ":", "raise", "ArcanaFileFormatError", "(", "\"No files match primary file extension of {} out of \"", "\"potential candidates of {}\"", ".", "format", "(", "self", ",", "\"', '\"", ".", "join", "(", "candidates", ")", ")", ")", "if", "not", "primary_file", ":", "raise", "ArcanaFileFormatError", "(", "\"No potential files for primary file of {}\"", ".", "format", "(", "self", ")", ")", "elif", "len", "(", "primary_file", ")", ">", "1", ":", "raise", "ArcanaFileFormatError", "(", "\"Multiple potential files for '{}' primary file of {}\"", ".", "format", "(", "\"', '\"", ".", "join", "(", "primary_file", ")", ",", "self", ")", ")", "else", ":", "primary_file", "=", "primary_file", "[", "0", "]", "aux_files", "=", "{", "}", "for", "aux_name", ",", "aux_ext", "in", "self", ".", "aux_files", ".", "items", "(", ")", ":", "try", ":", "aux", "=", "by_ext", "[", "aux_ext", "]", "except", "KeyError", ":", "raise", "ArcanaFileFormatError", "(", "\"No files match auxiliary file extension '{}' of {} out of\"", "\" potential candidates of {}\"", ".", "format", "(", "aux_ext", ",", "self", ",", "\"', '\"", ".", "join", "(", "candidates", ")", ")", ")", "if", "len", "(", "aux", ")", ">", "1", ":", "raise", "ArcanaFileFormatError", "(", "\"Multiple potential files for '{}' auxiliary file ext. \"", "\"({}) of {}\"", ".", "format", "(", "\"', '\"", ".", "join", "(", "aux", ")", ",", "self", ")", ")", "aux_files", "[", "aux_name", "]", "=", "aux", "[", "0", "]", "return", "primary_file", ",", "aux_files" ]
Assorts candidate files into primary and auxiliary (and ignored) files corresponding to the format by their file extensions. Can be overridden in specialised subclasses to assort files based on other characteristics Parameters ---------- candidates : list[str] The list of filenames to assort Returns ------- primary_file : str Path to the selected primary file aux_files : dict[str, str] A dictionary mapping the auxiliary file name to the selected path
[ "Assorts", "candidate", "files", "into", "primary", "and", "auxiliary", "(", "and", "ignored", ")", "files", "corresponding", "to", "the", "format", "by", "their", "file", "extensions", ".", "Can", "be", "overridden", "in", "specialised", "subclasses", "to", "assort", "files", "based", "on", "other", "characteristics" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/file_format.py#L214-L267
MonashBI/arcana
arcana/data/file_format.py
FileFormat.matches
def matches(self, fileset): """ Checks to see whether the format matches the given fileset Parameters ---------- fileset : Fileset The fileset to check """ if fileset._resource_name is not None: return (fileset._resource_name in self.resource_names( fileset.repository.type)) elif self.directory: if op.isdir(fileset.path): if self.within_dir_exts is None: return True else: # Get set of all extensions in the directory return self.within_dir_exts == frozenset( split_extension(f)[1] for f in os.listdir(fileset.path) if not f.startswith('.')) else: return False else: if op.isfile(fileset.path): all_paths = [fileset.path] + fileset._potential_aux_files try: primary_path = self.assort_files(all_paths)[0] except ArcanaFileFormatError: return False else: return primary_path == fileset.path else: return False
python
def matches(self, fileset): """ Checks to see whether the format matches the given fileset Parameters ---------- fileset : Fileset The fileset to check """ if fileset._resource_name is not None: return (fileset._resource_name in self.resource_names( fileset.repository.type)) elif self.directory: if op.isdir(fileset.path): if self.within_dir_exts is None: return True else: # Get set of all extensions in the directory return self.within_dir_exts == frozenset( split_extension(f)[1] for f in os.listdir(fileset.path) if not f.startswith('.')) else: return False else: if op.isfile(fileset.path): all_paths = [fileset.path] + fileset._potential_aux_files try: primary_path = self.assort_files(all_paths)[0] except ArcanaFileFormatError: return False else: return primary_path == fileset.path else: return False
[ "def", "matches", "(", "self", ",", "fileset", ")", ":", "if", "fileset", ".", "_resource_name", "is", "not", "None", ":", "return", "(", "fileset", ".", "_resource_name", "in", "self", ".", "resource_names", "(", "fileset", ".", "repository", ".", "type", ")", ")", "elif", "self", ".", "directory", ":", "if", "op", ".", "isdir", "(", "fileset", ".", "path", ")", ":", "if", "self", ".", "within_dir_exts", "is", "None", ":", "return", "True", "else", ":", "# Get set of all extensions in the directory", "return", "self", ".", "within_dir_exts", "==", "frozenset", "(", "split_extension", "(", "f", ")", "[", "1", "]", "for", "f", "in", "os", ".", "listdir", "(", "fileset", ".", "path", ")", "if", "not", "f", ".", "startswith", "(", "'.'", ")", ")", "else", ":", "return", "False", "else", ":", "if", "op", ".", "isfile", "(", "fileset", ".", "path", ")", ":", "all_paths", "=", "[", "fileset", ".", "path", "]", "+", "fileset", ".", "_potential_aux_files", "try", ":", "primary_path", "=", "self", ".", "assort_files", "(", "all_paths", ")", "[", "0", "]", "except", "ArcanaFileFormatError", ":", "return", "False", "else", ":", "return", "primary_path", "==", "fileset", ".", "path", "else", ":", "return", "False" ]
Checks to see whether the format matches the given fileset Parameters ---------- fileset : Fileset The fileset to check
[ "Checks", "to", "see", "whether", "the", "format", "matches", "the", "given", "fileset" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/file_format.py#L269-L302
MonashBI/arcana
arcana/data/file_format.py
FileFormat.set_converter
def set_converter(self, file_format, converter): """ Register a Converter and the FileFormat that it is able to convert from Parameters ---------- converter : Converter The converter to register file_format : FileFormat The file format that can be converted into this format """ self._converters[file_format.name] = (file_format, converter)
python
def set_converter(self, file_format, converter): """ Register a Converter and the FileFormat that it is able to convert from Parameters ---------- converter : Converter The converter to register file_format : FileFormat The file format that can be converted into this format """ self._converters[file_format.name] = (file_format, converter)
[ "def", "set_converter", "(", "self", ",", "file_format", ",", "converter", ")", ":", "self", ".", "_converters", "[", "file_format", ".", "name", "]", "=", "(", "file_format", ",", "converter", ")" ]
Register a Converter and the FileFormat that it is able to convert from Parameters ---------- converter : Converter The converter to register file_format : FileFormat The file format that can be converted into this format
[ "Register", "a", "Converter", "and", "the", "FileFormat", "that", "it", "is", "able", "to", "convert", "from" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/file_format.py#L304-L315
gwww/elkm1
elkm1_lib/util.py
parse_url
def parse_url(url): """Parse a Elk connection string """ scheme, dest = url.split('://') host = None ssl_context = None if scheme == 'elk': host, port = dest.split(':') if ':' in dest else (dest, 2101) elif scheme == 'elks': host, port = dest.split(':') if ':' in dest else (dest, 2601) ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ssl_context.verify_mode = ssl.CERT_NONE elif scheme == 'serial': host, port = dest.split(':') if ':' in dest else (dest, 115200) else: raise ValueError("Invalid scheme '%s'" % scheme) return (scheme, host, int(port), ssl_context)
python
def parse_url(url): """Parse a Elk connection string """ scheme, dest = url.split('://') host = None ssl_context = None if scheme == 'elk': host, port = dest.split(':') if ':' in dest else (dest, 2101) elif scheme == 'elks': host, port = dest.split(':') if ':' in dest else (dest, 2601) ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ssl_context.verify_mode = ssl.CERT_NONE elif scheme == 'serial': host, port = dest.split(':') if ':' in dest else (dest, 115200) else: raise ValueError("Invalid scheme '%s'" % scheme) return (scheme, host, int(port), ssl_context)
[ "def", "parse_url", "(", "url", ")", ":", "scheme", ",", "dest", "=", "url", ".", "split", "(", "'://'", ")", "host", "=", "None", "ssl_context", "=", "None", "if", "scheme", "==", "'elk'", ":", "host", ",", "port", "=", "dest", ".", "split", "(", "':'", ")", "if", "':'", "in", "dest", "else", "(", "dest", ",", "2101", ")", "elif", "scheme", "==", "'elks'", ":", "host", ",", "port", "=", "dest", ".", "split", "(", "':'", ")", "if", "':'", "in", "dest", "else", "(", "dest", ",", "2601", ")", "ssl_context", "=", "ssl", ".", "SSLContext", "(", "ssl", ".", "PROTOCOL_TLSv1", ")", "ssl_context", ".", "verify_mode", "=", "ssl", ".", "CERT_NONE", "elif", "scheme", "==", "'serial'", ":", "host", ",", "port", "=", "dest", ".", "split", "(", "':'", ")", "if", "':'", "in", "dest", "else", "(", "dest", ",", "115200", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid scheme '%s'\"", "%", "scheme", ")", "return", "(", "scheme", ",", "host", ",", "int", "(", "port", ")", ",", "ssl_context", ")" ]
Parse a Elk connection string
[ "Parse", "a", "Elk", "connection", "string" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/util.py#L14-L29
gwww/elkm1
elkm1_lib/util.py
pretty_const
def pretty_const(value): """Make a constant pretty for printing in GUI""" words = value.split('_') pretty = words[0].capitalize() for word in words[1:]: pretty += ' ' + word.lower() return pretty
python
def pretty_const(value): """Make a constant pretty for printing in GUI""" words = value.split('_') pretty = words[0].capitalize() for word in words[1:]: pretty += ' ' + word.lower() return pretty
[ "def", "pretty_const", "(", "value", ")", ":", "words", "=", "value", ".", "split", "(", "'_'", ")", "pretty", "=", "words", "[", "0", "]", ".", "capitalize", "(", ")", "for", "word", "in", "words", "[", "1", ":", "]", ":", "pretty", "+=", "' '", "+", "word", ".", "lower", "(", ")", "return", "pretty" ]
Make a constant pretty for printing in GUI
[ "Make", "a", "constant", "pretty", "for", "printing", "in", "GUI" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/util.py#L32-L38
gwww/elkm1
elkm1_lib/util.py
username
def username(elk, user_number): """Return name of user.""" if user_number >= 0 and user_number < elk.users.max_elements: return elk.users[user_number].name if user_number == 201: return "*Program*" if user_number == 202: return "*Elk RP*" if user_number == 203: return "*Quick arm*" return ""
python
def username(elk, user_number): """Return name of user.""" if user_number >= 0 and user_number < elk.users.max_elements: return elk.users[user_number].name if user_number == 201: return "*Program*" if user_number == 202: return "*Elk RP*" if user_number == 203: return "*Quick arm*" return ""
[ "def", "username", "(", "elk", ",", "user_number", ")", ":", "if", "user_number", ">=", "0", "and", "user_number", "<", "elk", ".", "users", ".", "max_elements", ":", "return", "elk", ".", "users", "[", "user_number", "]", ".", "name", "if", "user_number", "==", "201", ":", "return", "\"*Program*\"", "if", "user_number", "==", "202", ":", "return", "\"*Elk RP*\"", "if", "user_number", "==", "203", ":", "return", "\"*Quick arm*\"", "return", "\"\"" ]
Return name of user.
[ "Return", "name", "of", "user", "." ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/util.py#L40-L50
gwww/elkm1
elkm1_lib/elk.py
Elk._connect
async def _connect(self, connection_lost_callbk=None): """Asyncio connection to Elk.""" self.connection_lost_callbk = connection_lost_callbk url = self._config['url'] LOG.info("Connecting to ElkM1 at %s", url) scheme, dest, param, ssl_context = parse_url(url) conn = partial(Connection, self.loop, self._connected, self._disconnected, self._got_data, self._timeout) try: if scheme == 'serial': await serial_asyncio.create_serial_connection( self.loop, conn, dest, baudrate=param) else: await asyncio.wait_for(self.loop.create_connection( conn, host=dest, port=param, ssl=ssl_context), timeout=30) except (ValueError, OSError, asyncio.TimeoutError) as err: LOG.warning("Could not connect to ElkM1 (%s). Retrying in %d seconds", err, self._connection_retry_timer) self.loop.call_later(self._connection_retry_timer, self.connect) self._connection_retry_timer = 2 * self._connection_retry_timer \ if self._connection_retry_timer < 32 else 60
python
async def _connect(self, connection_lost_callbk=None): """Asyncio connection to Elk.""" self.connection_lost_callbk = connection_lost_callbk url = self._config['url'] LOG.info("Connecting to ElkM1 at %s", url) scheme, dest, param, ssl_context = parse_url(url) conn = partial(Connection, self.loop, self._connected, self._disconnected, self._got_data, self._timeout) try: if scheme == 'serial': await serial_asyncio.create_serial_connection( self.loop, conn, dest, baudrate=param) else: await asyncio.wait_for(self.loop.create_connection( conn, host=dest, port=param, ssl=ssl_context), timeout=30) except (ValueError, OSError, asyncio.TimeoutError) as err: LOG.warning("Could not connect to ElkM1 (%s). Retrying in %d seconds", err, self._connection_retry_timer) self.loop.call_later(self._connection_retry_timer, self.connect) self._connection_retry_timer = 2 * self._connection_retry_timer \ if self._connection_retry_timer < 32 else 60
[ "async", "def", "_connect", "(", "self", ",", "connection_lost_callbk", "=", "None", ")", ":", "self", ".", "connection_lost_callbk", "=", "connection_lost_callbk", "url", "=", "self", ".", "_config", "[", "'url'", "]", "LOG", ".", "info", "(", "\"Connecting to ElkM1 at %s\"", ",", "url", ")", "scheme", ",", "dest", ",", "param", ",", "ssl_context", "=", "parse_url", "(", "url", ")", "conn", "=", "partial", "(", "Connection", ",", "self", ".", "loop", ",", "self", ".", "_connected", ",", "self", ".", "_disconnected", ",", "self", ".", "_got_data", ",", "self", ".", "_timeout", ")", "try", ":", "if", "scheme", "==", "'serial'", ":", "await", "serial_asyncio", ".", "create_serial_connection", "(", "self", ".", "loop", ",", "conn", ",", "dest", ",", "baudrate", "=", "param", ")", "else", ":", "await", "asyncio", ".", "wait_for", "(", "self", ".", "loop", ".", "create_connection", "(", "conn", ",", "host", "=", "dest", ",", "port", "=", "param", ",", "ssl", "=", "ssl_context", ")", ",", "timeout", "=", "30", ")", "except", "(", "ValueError", ",", "OSError", ",", "asyncio", ".", "TimeoutError", ")", "as", "err", ":", "LOG", ".", "warning", "(", "\"Could not connect to ElkM1 (%s). Retrying in %d seconds\"", ",", "err", ",", "self", ".", "_connection_retry_timer", ")", "self", ".", "loop", ".", "call_later", "(", "self", ".", "_connection_retry_timer", ",", "self", ".", "connect", ")", "self", ".", "_connection_retry_timer", "=", "2", "*", "self", ".", "_connection_retry_timer", "if", "self", ".", "_connection_retry_timer", "<", "32", "else", "60" ]
Asyncio connection to Elk.
[ "Asyncio", "connection", "to", "Elk", "." ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elk.py#L50-L70
gwww/elkm1
elkm1_lib/elk.py
Elk._connected
def _connected(self, transport, conn): """Login and sync the ElkM1 panel to memory.""" LOG.info("Connected to ElkM1") self._conn = conn self._transport = transport self._connection_retry_timer = 1 if url_scheme_is_secure(self._config['url']): self._conn.write_data(self._config['userid'], raw=True) self._conn.write_data(self._config['password'], raw=True) self.call_sync_handlers() if not self._config['url'].startswith('serial://'): self._heartbeat = self.loop.call_later(120, self._reset_connection)
python
def _connected(self, transport, conn): """Login and sync the ElkM1 panel to memory.""" LOG.info("Connected to ElkM1") self._conn = conn self._transport = transport self._connection_retry_timer = 1 if url_scheme_is_secure(self._config['url']): self._conn.write_data(self._config['userid'], raw=True) self._conn.write_data(self._config['password'], raw=True) self.call_sync_handlers() if not self._config['url'].startswith('serial://'): self._heartbeat = self.loop.call_later(120, self._reset_connection)
[ "def", "_connected", "(", "self", ",", "transport", ",", "conn", ")", ":", "LOG", ".", "info", "(", "\"Connected to ElkM1\"", ")", "self", ".", "_conn", "=", "conn", "self", ".", "_transport", "=", "transport", "self", ".", "_connection_retry_timer", "=", "1", "if", "url_scheme_is_secure", "(", "self", ".", "_config", "[", "'url'", "]", ")", ":", "self", ".", "_conn", ".", "write_data", "(", "self", ".", "_config", "[", "'userid'", "]", ",", "raw", "=", "True", ")", "self", ".", "_conn", ".", "write_data", "(", "self", ".", "_config", "[", "'password'", "]", ",", "raw", "=", "True", ")", "self", ".", "call_sync_handlers", "(", ")", "if", "not", "self", ".", "_config", "[", "'url'", "]", ".", "startswith", "(", "'serial://'", ")", ":", "self", ".", "_heartbeat", "=", "self", ".", "loop", ".", "call_later", "(", "120", ",", "self", ".", "_reset_connection", ")" ]
Login and sync the ElkM1 panel to memory.
[ "Login", "and", "sync", "the", "ElkM1", "panel", "to", "memory", "." ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elk.py#L72-L83
gwww/elkm1
elkm1_lib/elk.py
Elk._sd_handler
def _sd_handler(self, desc_type, unit, desc, show_on_keypad): """Text description""" if desc_type not in self._descriptions_in_progress: LOG.debug("Text description response ignored for " + str(desc_type)) return (max_units, results, callback) = self._descriptions_in_progress[desc_type] if unit < 0 or unit >= max_units: callback(results) del self._descriptions_in_progress[desc_type] return results[unit] = desc self.send(sd_encode(desc_type=desc_type, unit=unit+1))
python
def _sd_handler(self, desc_type, unit, desc, show_on_keypad): """Text description""" if desc_type not in self._descriptions_in_progress: LOG.debug("Text description response ignored for " + str(desc_type)) return (max_units, results, callback) = self._descriptions_in_progress[desc_type] if unit < 0 or unit >= max_units: callback(results) del self._descriptions_in_progress[desc_type] return results[unit] = desc self.send(sd_encode(desc_type=desc_type, unit=unit+1))
[ "def", "_sd_handler", "(", "self", ",", "desc_type", ",", "unit", ",", "desc", ",", "show_on_keypad", ")", ":", "if", "desc_type", "not", "in", "self", ".", "_descriptions_in_progress", ":", "LOG", ".", "debug", "(", "\"Text description response ignored for \"", "+", "str", "(", "desc_type", ")", ")", "return", "(", "max_units", ",", "results", ",", "callback", ")", "=", "self", ".", "_descriptions_in_progress", "[", "desc_type", "]", "if", "unit", "<", "0", "or", "unit", ">=", "max_units", ":", "callback", "(", "results", ")", "del", "self", ".", "_descriptions_in_progress", "[", "desc_type", "]", "return", "results", "[", "unit", "]", "=", "desc", "self", ".", "send", "(", "sd_encode", "(", "desc_type", "=", "desc_type", ",", "unit", "=", "unit", "+", "1", ")", ")" ]
Text description
[ "Text", "description" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elk.py#L129-L142
gwww/elkm1
elkm1_lib/elk.py
Elk.send
def send(self, msg): """Send a message to Elk panel.""" if self._conn: self._conn.write_data(msg.message, msg.response_command)
python
def send(self, msg): """Send a message to Elk panel.""" if self._conn: self._conn.write_data(msg.message, msg.response_command)
[ "def", "send", "(", "self", ",", "msg", ")", ":", "if", "self", ".", "_conn", ":", "self", ".", "_conn", ".", "write_data", "(", "msg", ".", "message", ",", "msg", ".", "response_command", ")" ]
Send a message to Elk panel.
[ "Send", "a", "message", "to", "Elk", "panel", "." ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/elk.py#L156-L159
gwww/elkm1
elkm1_lib/zones.py
Zones.sync
def sync(self): """Retrieve zones from ElkM1""" self.elk.send(az_encode()) self.elk.send(zd_encode()) self.elk.send(zp_encode()) self.elk.send(zs_encode()) self.get_descriptions(TextDescriptions.ZONE.value)
python
def sync(self): """Retrieve zones from ElkM1""" self.elk.send(az_encode()) self.elk.send(zd_encode()) self.elk.send(zp_encode()) self.elk.send(zs_encode()) self.get_descriptions(TextDescriptions.ZONE.value)
[ "def", "sync", "(", "self", ")", ":", "self", ".", "elk", ".", "send", "(", "az_encode", "(", ")", ")", "self", ".", "elk", ".", "send", "(", "zd_encode", "(", ")", ")", "self", ".", "elk", ".", "send", "(", "zp_encode", "(", ")", ")", "self", ".", "elk", ".", "send", "(", "zs_encode", "(", ")", ")", "self", ".", "get_descriptions", "(", "TextDescriptions", ".", "ZONE", ".", "value", ")" ]
Retrieve zones from ElkM1
[ "Retrieve", "zones", "from", "ElkM1" ]
train
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/zones.py#L50-L56
erikvw/django-collect-offline
django_collect_offline/offline_model.py
OfflineModel.has_offline_historical_manager_or_raise
def has_offline_historical_manager_or_raise(self): """Raises an exception if model uses a history manager and historical model history_id is not a UUIDField. Note: expected to use edc_model.HistoricalRecords instead of simple_history.HistoricalRecords. """ try: model = self.instance.__class__.history.model except AttributeError: model = self.instance.__class__ field = [field for field in model._meta.fields if field.name == "history_id"] if field and not isinstance(field[0], UUIDField): raise OfflineHistoricalManagerError( f"Field 'history_id' of historical model " f"'{model._meta.app_label}.{model._meta.model_name}' " "must be an UUIDfield. " "For history = HistoricalRecords() use edc_model.HistoricalRecords instead of " "simple_history.HistoricalRecords(). " f"See '{self.instance._meta.app_label}.{self.instance._meta.model_name}'." )
python
def has_offline_historical_manager_or_raise(self): """Raises an exception if model uses a history manager and historical model history_id is not a UUIDField. Note: expected to use edc_model.HistoricalRecords instead of simple_history.HistoricalRecords. """ try: model = self.instance.__class__.history.model except AttributeError: model = self.instance.__class__ field = [field for field in model._meta.fields if field.name == "history_id"] if field and not isinstance(field[0], UUIDField): raise OfflineHistoricalManagerError( f"Field 'history_id' of historical model " f"'{model._meta.app_label}.{model._meta.model_name}' " "must be an UUIDfield. " "For history = HistoricalRecords() use edc_model.HistoricalRecords instead of " "simple_history.HistoricalRecords(). " f"See '{self.instance._meta.app_label}.{self.instance._meta.model_name}'." )
[ "def", "has_offline_historical_manager_or_raise", "(", "self", ")", ":", "try", ":", "model", "=", "self", ".", "instance", ".", "__class__", ".", "history", ".", "model", "except", "AttributeError", ":", "model", "=", "self", ".", "instance", ".", "__class__", "field", "=", "[", "field", "for", "field", "in", "model", ".", "_meta", ".", "fields", "if", "field", ".", "name", "==", "\"history_id\"", "]", "if", "field", "and", "not", "isinstance", "(", "field", "[", "0", "]", ",", "UUIDField", ")", ":", "raise", "OfflineHistoricalManagerError", "(", "f\"Field 'history_id' of historical model \"", "f\"'{model._meta.app_label}.{model._meta.model_name}' \"", "\"must be an UUIDfield. \"", "\"For history = HistoricalRecords() use edc_model.HistoricalRecords instead of \"", "\"simple_history.HistoricalRecords(). \"", "f\"See '{self.instance._meta.app_label}.{self.instance._meta.model_name}'.\"", ")" ]
Raises an exception if model uses a history manager and historical model history_id is not a UUIDField. Note: expected to use edc_model.HistoricalRecords instead of simple_history.HistoricalRecords.
[ "Raises", "an", "exception", "if", "model", "uses", "a", "history", "manager", "and", "historical", "model", "history_id", "is", "not", "a", "UUIDField", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/offline_model.py#L71-L91
erikvw/django-collect-offline
django_collect_offline/offline_model.py
OfflineModel.primary_key_field
def primary_key_field(self): """Return the primary key field. Is `id` in most cases. Is `history_id` for Historical models. """ return [field for field in self.instance._meta.fields if field.primary_key][0]
python
def primary_key_field(self): """Return the primary key field. Is `id` in most cases. Is `history_id` for Historical models. """ return [field for field in self.instance._meta.fields if field.primary_key][0]
[ "def", "primary_key_field", "(", "self", ")", ":", "return", "[", "field", "for", "field", "in", "self", ".", "instance", ".", "_meta", ".", "fields", "if", "field", ".", "primary_key", "]", "[", "0", "]" ]
Return the primary key field. Is `id` in most cases. Is `history_id` for Historical models.
[ "Return", "the", "primary", "key", "field", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/offline_model.py#L103-L108
erikvw/django-collect-offline
django_collect_offline/offline_model.py
OfflineModel.to_outgoing_transaction
def to_outgoing_transaction(self, using, created=None, deleted=None): """ Serialize the model instance to an AES encrypted json object and saves the json object to the OutgoingTransaction model. """ OutgoingTransaction = django_apps.get_model( "django_collect_offline", "OutgoingTransaction" ) created = True if created is None else created action = INSERT if created else UPDATE timestamp_datetime = ( self.instance.created if created else self.instance.modified ) if not timestamp_datetime: timestamp_datetime = get_utcnow() if deleted: timestamp_datetime = get_utcnow() action = DELETE outgoing_transaction = None if self.is_serialized: hostname = socket.gethostname() outgoing_transaction = OutgoingTransaction.objects.using(using).create( tx_name=self.instance._meta.label_lower, tx_pk=getattr(self.instance, self.primary_key_field.name), tx=self.encrypted_json(), timestamp=timestamp_datetime.strftime("%Y%m%d%H%M%S%f"), producer=f"{hostname}-{using}", action=action, using=using, ) return outgoing_transaction
python
def to_outgoing_transaction(self, using, created=None, deleted=None): """ Serialize the model instance to an AES encrypted json object and saves the json object to the OutgoingTransaction model. """ OutgoingTransaction = django_apps.get_model( "django_collect_offline", "OutgoingTransaction" ) created = True if created is None else created action = INSERT if created else UPDATE timestamp_datetime = ( self.instance.created if created else self.instance.modified ) if not timestamp_datetime: timestamp_datetime = get_utcnow() if deleted: timestamp_datetime = get_utcnow() action = DELETE outgoing_transaction = None if self.is_serialized: hostname = socket.gethostname() outgoing_transaction = OutgoingTransaction.objects.using(using).create( tx_name=self.instance._meta.label_lower, tx_pk=getattr(self.instance, self.primary_key_field.name), tx=self.encrypted_json(), timestamp=timestamp_datetime.strftime("%Y%m%d%H%M%S%f"), producer=f"{hostname}-{using}", action=action, using=using, ) return outgoing_transaction
[ "def", "to_outgoing_transaction", "(", "self", ",", "using", ",", "created", "=", "None", ",", "deleted", "=", "None", ")", ":", "OutgoingTransaction", "=", "django_apps", ".", "get_model", "(", "\"django_collect_offline\"", ",", "\"OutgoingTransaction\"", ")", "created", "=", "True", "if", "created", "is", "None", "else", "created", "action", "=", "INSERT", "if", "created", "else", "UPDATE", "timestamp_datetime", "=", "(", "self", ".", "instance", ".", "created", "if", "created", "else", "self", ".", "instance", ".", "modified", ")", "if", "not", "timestamp_datetime", ":", "timestamp_datetime", "=", "get_utcnow", "(", ")", "if", "deleted", ":", "timestamp_datetime", "=", "get_utcnow", "(", ")", "action", "=", "DELETE", "outgoing_transaction", "=", "None", "if", "self", ".", "is_serialized", ":", "hostname", "=", "socket", ".", "gethostname", "(", ")", "outgoing_transaction", "=", "OutgoingTransaction", ".", "objects", ".", "using", "(", "using", ")", ".", "create", "(", "tx_name", "=", "self", ".", "instance", ".", "_meta", ".", "label_lower", ",", "tx_pk", "=", "getattr", "(", "self", ".", "instance", ",", "self", ".", "primary_key_field", ".", "name", ")", ",", "tx", "=", "self", ".", "encrypted_json", "(", ")", ",", "timestamp", "=", "timestamp_datetime", ".", "strftime", "(", "\"%Y%m%d%H%M%S%f\"", ")", ",", "producer", "=", "f\"{hostname}-{using}\"", ",", "action", "=", "action", ",", "using", "=", "using", ",", ")", "return", "outgoing_transaction" ]
Serialize the model instance to an AES encrypted json object and saves the json object to the OutgoingTransaction model.
[ "Serialize", "the", "model", "instance", "to", "an", "AES", "encrypted", "json", "object", "and", "saves", "the", "json", "object", "to", "the", "OutgoingTransaction", "model", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/offline_model.py#L110-L139
erikvw/django-collect-offline
django_collect_offline/offline_model.py
OfflineModel.encrypted_json
def encrypted_json(self): """Returns an encrypted json serialized from self. """ json = serialize(objects=[self.instance]) encrypted_json = Cryptor().aes_encrypt(json, LOCAL_MODE) return encrypted_json
python
def encrypted_json(self): """Returns an encrypted json serialized from self. """ json = serialize(objects=[self.instance]) encrypted_json = Cryptor().aes_encrypt(json, LOCAL_MODE) return encrypted_json
[ "def", "encrypted_json", "(", "self", ")", ":", "json", "=", "serialize", "(", "objects", "=", "[", "self", ".", "instance", "]", ")", "encrypted_json", "=", "Cryptor", "(", ")", ".", "aes_encrypt", "(", "json", ",", "LOCAL_MODE", ")", "return", "encrypted_json" ]
Returns an encrypted json serialized from self.
[ "Returns", "an", "encrypted", "json", "serialized", "from", "self", "." ]
train
https://github.com/erikvw/django-collect-offline/blob/3d5efd66c68e2db4b060a82b070ae490dc399ca7/django_collect_offline/offline_model.py#L141-L146
MonashBI/arcana
arcana/environment/modules.py
ModulesEnv.map_req
def map_req(self, requirement): """ Maps the name of an Requirement class to the name of the corresponding module in the environment Parameters ---------- requirement : Requirement The requirement to map to the name of a module on the system """ if isinstance(self._packages_map, dict): local_name = self._packages_map.get(requirement, requirement.name) else: local_name = self._packages_map(requirement) return local_name
python
def map_req(self, requirement): """ Maps the name of an Requirement class to the name of the corresponding module in the environment Parameters ---------- requirement : Requirement The requirement to map to the name of a module on the system """ if isinstance(self._packages_map, dict): local_name = self._packages_map.get(requirement, requirement.name) else: local_name = self._packages_map(requirement) return local_name
[ "def", "map_req", "(", "self", ",", "requirement", ")", ":", "if", "isinstance", "(", "self", ".", "_packages_map", ",", "dict", ")", ":", "local_name", "=", "self", ".", "_packages_map", ".", "get", "(", "requirement", ",", "requirement", ".", "name", ")", "else", ":", "local_name", "=", "self", ".", "_packages_map", "(", "requirement", ")", "return", "local_name" ]
Maps the name of an Requirement class to the name of the corresponding module in the environment Parameters ---------- requirement : Requirement The requirement to map to the name of a module on the system
[ "Maps", "the", "name", "of", "an", "Requirement", "class", "to", "the", "name", "of", "the", "corresponding", "module", "in", "the", "environment" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/modules.py#L169-L183
MonashBI/arcana
arcana/environment/modules.py
ModulesEnv.map_version
def map_version(self, requirement, local_version): """ Maps a local version name to one recognised by the Requirement class Parameters ---------- requirement : str Name of the requirement version : str version string """ if isinstance(self._versions_map, dict): version = self._versions_map.get(requirement, {}).get( local_version, local_version) else: version = self._versions_map(requirement, local_version) return version
python
def map_version(self, requirement, local_version): """ Maps a local version name to one recognised by the Requirement class Parameters ---------- requirement : str Name of the requirement version : str version string """ if isinstance(self._versions_map, dict): version = self._versions_map.get(requirement, {}).get( local_version, local_version) else: version = self._versions_map(requirement, local_version) return version
[ "def", "map_version", "(", "self", ",", "requirement", ",", "local_version", ")", ":", "if", "isinstance", "(", "self", ".", "_versions_map", ",", "dict", ")", ":", "version", "=", "self", ".", "_versions_map", ".", "get", "(", "requirement", ",", "{", "}", ")", ".", "get", "(", "local_version", ",", "local_version", ")", "else", ":", "version", "=", "self", ".", "_versions_map", "(", "requirement", ",", "local_version", ")", "return", "version" ]
Maps a local version name to one recognised by the Requirement class Parameters ---------- requirement : str Name of the requirement version : str version string
[ "Maps", "a", "local", "version", "name", "to", "one", "recognised", "by", "the", "Requirement", "class" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/environment/modules.py#L185-L201
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytagfile.py
TagfileProcessor.init_tag_processors
def init_tag_processors(self): '''Register the TagProcessors that are bundled with doxytag2zealdb.''' register = self.register_tag_processor register('class', classTagProcessor(**self.opts)) register('file', fileTagProcessor(**self.opts)) register('namespace', namespaceTagProcessor(**self.opts)) register('struct', structTagProcessor(**self.opts)) register('union', unionTagProcessor(**self.opts)) register('function', functionTagProcessor(**self.opts)) register('define', defineTagProcessor(**self.opts)) register('enumeration', enumerationTagProcessor(**self.opts)) register('enumvalue', enumvalueTagProcessor(**self.opts)) register('typedef', typedefTagProcessor(**self.opts)) register('variable', variableTagProcessor(**self.opts))
python
def init_tag_processors(self): '''Register the TagProcessors that are bundled with doxytag2zealdb.''' register = self.register_tag_processor register('class', classTagProcessor(**self.opts)) register('file', fileTagProcessor(**self.opts)) register('namespace', namespaceTagProcessor(**self.opts)) register('struct', structTagProcessor(**self.opts)) register('union', unionTagProcessor(**self.opts)) register('function', functionTagProcessor(**self.opts)) register('define', defineTagProcessor(**self.opts)) register('enumeration', enumerationTagProcessor(**self.opts)) register('enumvalue', enumvalueTagProcessor(**self.opts)) register('typedef', typedefTagProcessor(**self.opts)) register('variable', variableTagProcessor(**self.opts))
[ "def", "init_tag_processors", "(", "self", ")", ":", "register", "=", "self", ".", "register_tag_processor", "register", "(", "'class'", ",", "classTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'file'", ",", "fileTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'namespace'", ",", "namespaceTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'struct'", ",", "structTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'union'", ",", "unionTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'function'", ",", "functionTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'define'", ",", "defineTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'enumeration'", ",", "enumerationTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'enumvalue'", ",", "enumvalueTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'typedef'", ",", "typedefTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")", "register", "(", "'variable'", ",", "variableTagProcessor", "(", "*", "*", "self", ".", "opts", ")", ")" ]
Register the TagProcessors that are bundled with doxytag2zealdb.
[ "Register", "the", "TagProcessors", "that", "are", "bundled", "with", "doxytag2zealdb", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytagfile.py#L76-L90
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytagfile.py
TagfileProcessor.process
def process(self): '''Run all tag processors.''' for tag_proc in self.tag_procs: before_count = self.entry_count self.run_tag_processor(tag_proc) after_count = self.entry_count if self.verbose: print('Inserted %d entries for "%s" tag processor' % ( after_count - before_count, tag_proc), file=sys.stderr) if self.verbose: print('Inserted %d entries overall' % self.entry_count, file=sys.stderr)
python
def process(self): '''Run all tag processors.''' for tag_proc in self.tag_procs: before_count = self.entry_count self.run_tag_processor(tag_proc) after_count = self.entry_count if self.verbose: print('Inserted %d entries for "%s" tag processor' % ( after_count - before_count, tag_proc), file=sys.stderr) if self.verbose: print('Inserted %d entries overall' % self.entry_count, file=sys.stderr)
[ "def", "process", "(", "self", ")", ":", "for", "tag_proc", "in", "self", ".", "tag_procs", ":", "before_count", "=", "self", ".", "entry_count", "self", ".", "run_tag_processor", "(", "tag_proc", ")", "after_count", "=", "self", ".", "entry_count", "if", "self", ".", "verbose", ":", "print", "(", "'Inserted %d entries for \"%s\" tag processor'", "%", "(", "after_count", "-", "before_count", ",", "tag_proc", ")", ",", "file", "=", "sys", ".", "stderr", ")", "if", "self", ".", "verbose", ":", "print", "(", "'Inserted %d entries overall'", "%", "self", ".", "entry_count", ",", "file", "=", "sys", ".", "stderr", ")" ]
Run all tag processors.
[ "Run", "all", "tag", "processors", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytagfile.py#L111-L124
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytagfile.py
TagfileProcessor.run_tag_processor
def run_tag_processor(self, tag_proc_name): '''Run a tag processor. Args: tag_proc_name: A string key that maps to the TagProcessor to run. ''' tag_processor = self.tag_procs[tag_proc_name] for tag in tag_processor.find(self.soup): self.process_tag(tag_proc_name, tag)
python
def run_tag_processor(self, tag_proc_name): '''Run a tag processor. Args: tag_proc_name: A string key that maps to the TagProcessor to run. ''' tag_processor = self.tag_procs[tag_proc_name] for tag in tag_processor.find(self.soup): self.process_tag(tag_proc_name, tag)
[ "def", "run_tag_processor", "(", "self", ",", "tag_proc_name", ")", ":", "tag_processor", "=", "self", ".", "tag_procs", "[", "tag_proc_name", "]", "for", "tag", "in", "tag_processor", ".", "find", "(", "self", ".", "soup", ")", ":", "self", ".", "process_tag", "(", "tag_proc_name", ",", "tag", ")" ]
Run a tag processor. Args: tag_proc_name: A string key that maps to the TagProcessor to run.
[ "Run", "a", "tag", "processor", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytagfile.py#L126-L135
vedvyas/doxytag2zealdb
doxytag2zealdb/doxytagfile.py
TagfileProcessor.process_tag
def process_tag(self, tag_proc_name, tag): '''Process a tag with a tag processor and insert a DB entry. Args: tag_proc_name: A string key that maps to the TagProcessor to use. tag: A BeautifulSoup Tag to process. ''' tag_processor = self.tag_procs[tag_proc_name] db_entry = (tag_processor.get_name(tag), tag_processor.get_entry_type(tag), tag_processor.get_filename(tag)) self.zeal_db.insert(*db_entry) self.entry_count += 1
python
def process_tag(self, tag_proc_name, tag): '''Process a tag with a tag processor and insert a DB entry. Args: tag_proc_name: A string key that maps to the TagProcessor to use. tag: A BeautifulSoup Tag to process. ''' tag_processor = self.tag_procs[tag_proc_name] db_entry = (tag_processor.get_name(tag), tag_processor.get_entry_type(tag), tag_processor.get_filename(tag)) self.zeal_db.insert(*db_entry) self.entry_count += 1
[ "def", "process_tag", "(", "self", ",", "tag_proc_name", ",", "tag", ")", ":", "tag_processor", "=", "self", ".", "tag_procs", "[", "tag_proc_name", "]", "db_entry", "=", "(", "tag_processor", ".", "get_name", "(", "tag", ")", ",", "tag_processor", ".", "get_entry_type", "(", "tag", ")", ",", "tag_processor", ".", "get_filename", "(", "tag", ")", ")", "self", ".", "zeal_db", ".", "insert", "(", "*", "db_entry", ")", "self", ".", "entry_count", "+=", "1" ]
Process a tag with a tag processor and insert a DB entry. Args: tag_proc_name: A string key that maps to the TagProcessor to use. tag: A BeautifulSoup Tag to process.
[ "Process", "a", "tag", "with", "a", "tag", "processor", "and", "insert", "a", "DB", "entry", "." ]
train
https://github.com/vedvyas/doxytag2zealdb/blob/8b07a88af6794248f8cfdabb0fda9dd61c777127/doxytag2zealdb/doxytagfile.py#L137-L152
brentp/toolshed
toolshed/__init__.py
groupby
def groupby(iterable, key=0, filter=None): """ wrapper to itertools.groupby that returns a list of each group, rather than a generator and accepts integers or strings as the key and automatically converts them to callables with itemgetter(key) Arguments: iterable: iterable key: string, int or callable that tells how to group Returns: an iterable where each item is the key and a *list* of that group. (itertools.groupby returns a generator of that group). e.g. groupby(iterable, 0) """ if isinstance(key, (basestring, int)): key = itemgetter(key) elif isinstance(key, (tuple, list)): key = itemgetter(*key) for label, grp in igroupby(iterable, key): yield label, list(grp)
python
def groupby(iterable, key=0, filter=None): """ wrapper to itertools.groupby that returns a list of each group, rather than a generator and accepts integers or strings as the key and automatically converts them to callables with itemgetter(key) Arguments: iterable: iterable key: string, int or callable that tells how to group Returns: an iterable where each item is the key and a *list* of that group. (itertools.groupby returns a generator of that group). e.g. groupby(iterable, 0) """ if isinstance(key, (basestring, int)): key = itemgetter(key) elif isinstance(key, (tuple, list)): key = itemgetter(*key) for label, grp in igroupby(iterable, key): yield label, list(grp)
[ "def", "groupby", "(", "iterable", ",", "key", "=", "0", ",", "filter", "=", "None", ")", ":", "if", "isinstance", "(", "key", ",", "(", "basestring", ",", "int", ")", ")", ":", "key", "=", "itemgetter", "(", "key", ")", "elif", "isinstance", "(", "key", ",", "(", "tuple", ",", "list", ")", ")", ":", "key", "=", "itemgetter", "(", "*", "key", ")", "for", "label", ",", "grp", "in", "igroupby", "(", "iterable", ",", "key", ")", ":", "yield", "label", ",", "list", "(", "grp", ")" ]
wrapper to itertools.groupby that returns a list of each group, rather than a generator and accepts integers or strings as the key and automatically converts them to callables with itemgetter(key) Arguments: iterable: iterable key: string, int or callable that tells how to group Returns: an iterable where each item is the key and a *list* of that group. (itertools.groupby returns a generator of that group). e.g. groupby(iterable, 0)
[ "wrapper", "to", "itertools", ".", "groupby", "that", "returns", "a", "list", "of", "each", "group", "rather", "than", "a", "generator", "and", "accepts", "integers", "or", "strings", "as", "the", "key", "and", "automatically", "converts", "them", "to", "callables", "with", "itemgetter", "(", "key", ")" ]
train
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/__init__.py#L14-L35
brentp/toolshed
toolshed/__init__.py
groups_of
def groups_of(n, iterable): """ >>> groups_of(2, range(5)) """ args = [iter(iterable)] * n for x in izip_longest(*args): yield [v for v in x if v is not None]
python
def groups_of(n, iterable): """ >>> groups_of(2, range(5)) """ args = [iter(iterable)] * n for x in izip_longest(*args): yield [v for v in x if v is not None]
[ "def", "groups_of", "(", "n", ",", "iterable", ")", ":", "args", "=", "[", "iter", "(", "iterable", ")", "]", "*", "n", "for", "x", "in", "izip_longest", "(", "*", "args", ")", ":", "yield", "[", "v", "for", "v", "in", "x", "if", "v", "is", "not", "None", "]" ]
>>> groups_of(2, range(5))
[ ">>>", "groups_of", "(", "2", "range", "(", "5", "))" ]
train
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/__init__.py#L42-L48
MonashBI/arcana
arcana/processor/base.py
Processor.run
def run(self, *pipelines, **kwargs): """ Connects all pipelines to that study's repository and runs them in the same NiPype workflow Parameters ---------- pipeline(s) : Pipeline, ... The pipeline to connect to repository required_outputs : list[set[str]] A set of required outputs for each pipeline. If None then all outputs are assumed to be required subject_ids : list[str] The subset of subject IDs to process. If None all available will be processed. Note this is not a duplication of the study and visit IDs passed to the Study __init__, as they define the scope of the analysis and these simply limit the scope of the current run (e.g. to break the analysis into smaller chunks and run separately). Therefore, if the analysis joins over subjects, then all subjects will be processed and this parameter will be ignored. visit_ids : list[str] The same as 'subject_ids' but for visit IDs session_ids : list[str,str] The same as 'subject_ids' and 'visit_ids', except specifies a set of specific combinations in tuples of (subject ID, visit ID). force : bool | 'all' A flag to force the reprocessing of all sessions in the filter array, regardless of whether the parameters|pipeline used to generate them matches the current ones. NB: if True only the final pipeline will be reprocessed (prerequisite pipelines won't run unless they don't match provenance). To process all prerequisite pipelines 'all' should be passed to force. Returns ------- report : ReportNode The final report node, which can be connected to subsequent pipelines """ if not pipelines: raise ArcanaUsageError("No pipelines provided to {}.run" .format(self)) # Get filter kwargs (NB: in Python 3 they could be in the kwarg list) subject_ids = kwargs.pop('subject_ids', None) visit_ids = kwargs.pop('visit_ids', None) session_ids = kwargs.pop('session_ids', None) clean_work_dir = kwargs.pop('clean_work_dir', self._clean_work_dir_between_runs) required_outputs = kwargs.pop('required_outputs', repeat(None)) # Create name by combining pipelines name = '_'.join(p.name for p in pipelines) # Clean work dir if required if clean_work_dir: workflow_work_dir = op.join(self.work_dir, name) if op.exists(workflow_work_dir): shutil.rmtree(workflow_work_dir) # Trim the end of very large names to avoid problems with # workflow names exceeding system limits. name = name[:WORKFLOW_MAX_NAME_LEN] workflow = pe.Workflow(name=name, base_dir=self.work_dir) # Generate filter array to optionally restrict the run to certain # subject and visit IDs. tree = self.study.tree # Create maps from the subject|visit IDs to an index used to represent # them in the filter array subject_inds = {s.id: i for i, s in enumerate(tree.subjects)} visit_inds = {v.id: i for i, v in enumerate(tree.visits)} if not subject_ids and not visit_ids and not session_ids: # No filters applied so create a full filter array filter_array = np.ones((len(subject_inds), len(visit_inds)), dtype=bool) else: # Filters applied so create an empty filter array and populate # from filter lists filter_array = np.zeros((len(subject_inds), len(visit_inds)), dtype=bool) if subject_ids is not None: for subj_id in subject_ids: filter_array[subject_inds[subj_id], :] = True if visit_ids is not None: for visit_id in visit_ids: filter_array[:, visit_inds[visit_id]] = True if session_ids is not None: for subj_id, visit_id in session_ids: filter_array[subject_inds[subj_id], visit_inds[visit_id]] = True if not filter_array.any(): raise ArcanaUsageError( "Provided filters:\n" + (" subject_ids: {}\n".format(', '.join(subject_ids)) if subject_ids is not None else '') + (" visit_ids: {}\n".format(', '.join(visit_ids)) if visit_ids is not None else '') + (" session_ids: {}\n".format(', '.join(session_ids)) if session_ids is not None else '') + "Did not match any sessions in the project:\n" + " subject_ids: {}\n".format(', '.join(subject_inds)) + " visit_ids: {}\n".format(', '.join(visit_inds))) # Stack of pipelines to process in reverse order of required execution stack = OrderedDict() def push_on_stack(pipeline, filt_array, req_outputs): if req_outputs is None: req_outputs = pipeline.output_names if pipeline.name in stack: # Pop pipeline from stack in order to add it to the end of the # stack and ensure it is run before all downstream pipelines prev_pipeline, prev_req_outputs, prev_filt_array = stack.pop( pipeline.name) if pipeline is not prev_pipeline and pipeline != prev_pipeline: raise ArcanaDesignError( "Attempting to run two different pipelines with the " "same name, {} and {}".format(pipeline, prev_pipeline)) # Combined required outputs and filter array with previous # references to the pipeline req_outputs = copy(req_outputs) req_outputs.update(prev_req_outputs) filt_array = filt_array | prev_filt_array # If the pipeline to process contains summary outputs (i.e. 'per- # subject|visit|study' frequency), then we need to "dialate" the # filter array to include IDs across the scope of the study, e.g. # all subjects for per-vist, or all visits for per-subject. output_freqs = set(pipeline.output_frequencies) dialated_filt_array = self._dialate_array(filt_array, pipeline.joins) added = dialated_filt_array ^ filt_array if added.any(): filt_array = dialated_filt_array # Invert the index dictionaries to get index-to-ID maps inv_subject_inds = {v: k for k, v in subject_inds.items()} inv_visit_inds = {v: k for k, v in visit_inds.items()} logger.warning( "Dialated filter array used to process '{}' pipeline to " "include {} subject/visit IDs due to its '{}' summary " "outputs ".format( pipeline.name, ', '.join('({},{})'.format(inv_subject_inds[s], inv_visit_inds[v]) for s, v in zip(*np.nonzero(added))), "' and '".join(output_freqs))) stack[pipeline.name] = pipeline, req_outputs, filt_array # Recursively add all prerequisites to stack for prq_getter, prq_req_outputs in pipeline.prerequisites.items(): try: prereq = pipeline.study.pipeline( prq_getter, prq_req_outputs) push_on_stack(prereq, filt_array, prq_req_outputs) except (ArcanaMissingDataException, ArcanaOutputNotProducedException) as e: e.msg += ("{}, which are required as inputs to the '{}' " "pipeline to produce '{}'".format( pipeline.name, "', '".join(req_outputs))) raise e # Add all primary pipelines to the stack along with their prereqs for pipeline, req_outputs in zip(pipelines, required_outputs): push_on_stack(pipeline, filter_array, req_outputs) # Iterate through stack of required pipelines from upstream to # downstream for pipeline, req_outputs, flt_array in reversed(list(stack.values())): try: self._connect_pipeline( pipeline, req_outputs, workflow, subject_inds, visit_inds, flt_array, **kwargs) except ArcanaNoRunRequiredException: logger.info("Not running '{}' pipeline as its outputs " "are already present in the repository" .format(pipeline.name)) # Save complete graph for debugging purposes # workflow.write_graph(graph2use='flat', format='svg') # print('Graph saved in {} directory'.format(os.getcwd())) # Actually run the generated workflow result = workflow.run(plugin=self._plugin) # Reset the cached tree of filesets in the repository as it will # change after the pipeline has run. self.study.clear_caches() return result
python
def run(self, *pipelines, **kwargs): """ Connects all pipelines to that study's repository and runs them in the same NiPype workflow Parameters ---------- pipeline(s) : Pipeline, ... The pipeline to connect to repository required_outputs : list[set[str]] A set of required outputs for each pipeline. If None then all outputs are assumed to be required subject_ids : list[str] The subset of subject IDs to process. If None all available will be processed. Note this is not a duplication of the study and visit IDs passed to the Study __init__, as they define the scope of the analysis and these simply limit the scope of the current run (e.g. to break the analysis into smaller chunks and run separately). Therefore, if the analysis joins over subjects, then all subjects will be processed and this parameter will be ignored. visit_ids : list[str] The same as 'subject_ids' but for visit IDs session_ids : list[str,str] The same as 'subject_ids' and 'visit_ids', except specifies a set of specific combinations in tuples of (subject ID, visit ID). force : bool | 'all' A flag to force the reprocessing of all sessions in the filter array, regardless of whether the parameters|pipeline used to generate them matches the current ones. NB: if True only the final pipeline will be reprocessed (prerequisite pipelines won't run unless they don't match provenance). To process all prerequisite pipelines 'all' should be passed to force. Returns ------- report : ReportNode The final report node, which can be connected to subsequent pipelines """ if not pipelines: raise ArcanaUsageError("No pipelines provided to {}.run" .format(self)) # Get filter kwargs (NB: in Python 3 they could be in the kwarg list) subject_ids = kwargs.pop('subject_ids', None) visit_ids = kwargs.pop('visit_ids', None) session_ids = kwargs.pop('session_ids', None) clean_work_dir = kwargs.pop('clean_work_dir', self._clean_work_dir_between_runs) required_outputs = kwargs.pop('required_outputs', repeat(None)) # Create name by combining pipelines name = '_'.join(p.name for p in pipelines) # Clean work dir if required if clean_work_dir: workflow_work_dir = op.join(self.work_dir, name) if op.exists(workflow_work_dir): shutil.rmtree(workflow_work_dir) # Trim the end of very large names to avoid problems with # workflow names exceeding system limits. name = name[:WORKFLOW_MAX_NAME_LEN] workflow = pe.Workflow(name=name, base_dir=self.work_dir) # Generate filter array to optionally restrict the run to certain # subject and visit IDs. tree = self.study.tree # Create maps from the subject|visit IDs to an index used to represent # them in the filter array subject_inds = {s.id: i for i, s in enumerate(tree.subjects)} visit_inds = {v.id: i for i, v in enumerate(tree.visits)} if not subject_ids and not visit_ids and not session_ids: # No filters applied so create a full filter array filter_array = np.ones((len(subject_inds), len(visit_inds)), dtype=bool) else: # Filters applied so create an empty filter array and populate # from filter lists filter_array = np.zeros((len(subject_inds), len(visit_inds)), dtype=bool) if subject_ids is not None: for subj_id in subject_ids: filter_array[subject_inds[subj_id], :] = True if visit_ids is not None: for visit_id in visit_ids: filter_array[:, visit_inds[visit_id]] = True if session_ids is not None: for subj_id, visit_id in session_ids: filter_array[subject_inds[subj_id], visit_inds[visit_id]] = True if not filter_array.any(): raise ArcanaUsageError( "Provided filters:\n" + (" subject_ids: {}\n".format(', '.join(subject_ids)) if subject_ids is not None else '') + (" visit_ids: {}\n".format(', '.join(visit_ids)) if visit_ids is not None else '') + (" session_ids: {}\n".format(', '.join(session_ids)) if session_ids is not None else '') + "Did not match any sessions in the project:\n" + " subject_ids: {}\n".format(', '.join(subject_inds)) + " visit_ids: {}\n".format(', '.join(visit_inds))) # Stack of pipelines to process in reverse order of required execution stack = OrderedDict() def push_on_stack(pipeline, filt_array, req_outputs): if req_outputs is None: req_outputs = pipeline.output_names if pipeline.name in stack: # Pop pipeline from stack in order to add it to the end of the # stack and ensure it is run before all downstream pipelines prev_pipeline, prev_req_outputs, prev_filt_array = stack.pop( pipeline.name) if pipeline is not prev_pipeline and pipeline != prev_pipeline: raise ArcanaDesignError( "Attempting to run two different pipelines with the " "same name, {} and {}".format(pipeline, prev_pipeline)) # Combined required outputs and filter array with previous # references to the pipeline req_outputs = copy(req_outputs) req_outputs.update(prev_req_outputs) filt_array = filt_array | prev_filt_array # If the pipeline to process contains summary outputs (i.e. 'per- # subject|visit|study' frequency), then we need to "dialate" the # filter array to include IDs across the scope of the study, e.g. # all subjects for per-vist, or all visits for per-subject. output_freqs = set(pipeline.output_frequencies) dialated_filt_array = self._dialate_array(filt_array, pipeline.joins) added = dialated_filt_array ^ filt_array if added.any(): filt_array = dialated_filt_array # Invert the index dictionaries to get index-to-ID maps inv_subject_inds = {v: k for k, v in subject_inds.items()} inv_visit_inds = {v: k for k, v in visit_inds.items()} logger.warning( "Dialated filter array used to process '{}' pipeline to " "include {} subject/visit IDs due to its '{}' summary " "outputs ".format( pipeline.name, ', '.join('({},{})'.format(inv_subject_inds[s], inv_visit_inds[v]) for s, v in zip(*np.nonzero(added))), "' and '".join(output_freqs))) stack[pipeline.name] = pipeline, req_outputs, filt_array # Recursively add all prerequisites to stack for prq_getter, prq_req_outputs in pipeline.prerequisites.items(): try: prereq = pipeline.study.pipeline( prq_getter, prq_req_outputs) push_on_stack(prereq, filt_array, prq_req_outputs) except (ArcanaMissingDataException, ArcanaOutputNotProducedException) as e: e.msg += ("{}, which are required as inputs to the '{}' " "pipeline to produce '{}'".format( pipeline.name, "', '".join(req_outputs))) raise e # Add all primary pipelines to the stack along with their prereqs for pipeline, req_outputs in zip(pipelines, required_outputs): push_on_stack(pipeline, filter_array, req_outputs) # Iterate through stack of required pipelines from upstream to # downstream for pipeline, req_outputs, flt_array in reversed(list(stack.values())): try: self._connect_pipeline( pipeline, req_outputs, workflow, subject_inds, visit_inds, flt_array, **kwargs) except ArcanaNoRunRequiredException: logger.info("Not running '{}' pipeline as its outputs " "are already present in the repository" .format(pipeline.name)) # Save complete graph for debugging purposes # workflow.write_graph(graph2use='flat', format='svg') # print('Graph saved in {} directory'.format(os.getcwd())) # Actually run the generated workflow result = workflow.run(plugin=self._plugin) # Reset the cached tree of filesets in the repository as it will # change after the pipeline has run. self.study.clear_caches() return result
[ "def", "run", "(", "self", ",", "*", "pipelines", ",", "*", "*", "kwargs", ")", ":", "if", "not", "pipelines", ":", "raise", "ArcanaUsageError", "(", "\"No pipelines provided to {}.run\"", ".", "format", "(", "self", ")", ")", "# Get filter kwargs (NB: in Python 3 they could be in the kwarg list)", "subject_ids", "=", "kwargs", ".", "pop", "(", "'subject_ids'", ",", "None", ")", "visit_ids", "=", "kwargs", ".", "pop", "(", "'visit_ids'", ",", "None", ")", "session_ids", "=", "kwargs", ".", "pop", "(", "'session_ids'", ",", "None", ")", "clean_work_dir", "=", "kwargs", ".", "pop", "(", "'clean_work_dir'", ",", "self", ".", "_clean_work_dir_between_runs", ")", "required_outputs", "=", "kwargs", ".", "pop", "(", "'required_outputs'", ",", "repeat", "(", "None", ")", ")", "# Create name by combining pipelines", "name", "=", "'_'", ".", "join", "(", "p", ".", "name", "for", "p", "in", "pipelines", ")", "# Clean work dir if required", "if", "clean_work_dir", ":", "workflow_work_dir", "=", "op", ".", "join", "(", "self", ".", "work_dir", ",", "name", ")", "if", "op", ".", "exists", "(", "workflow_work_dir", ")", ":", "shutil", ".", "rmtree", "(", "workflow_work_dir", ")", "# Trim the end of very large names to avoid problems with", "# workflow names exceeding system limits.", "name", "=", "name", "[", ":", "WORKFLOW_MAX_NAME_LEN", "]", "workflow", "=", "pe", ".", "Workflow", "(", "name", "=", "name", ",", "base_dir", "=", "self", ".", "work_dir", ")", "# Generate filter array to optionally restrict the run to certain", "# subject and visit IDs.", "tree", "=", "self", ".", "study", ".", "tree", "# Create maps from the subject|visit IDs to an index used to represent", "# them in the filter array", "subject_inds", "=", "{", "s", ".", "id", ":", "i", "for", "i", ",", "s", "in", "enumerate", "(", "tree", ".", "subjects", ")", "}", "visit_inds", "=", "{", "v", ".", "id", ":", "i", "for", "i", ",", "v", "in", "enumerate", "(", "tree", ".", "visits", ")", "}", "if", "not", "subject_ids", "and", "not", "visit_ids", "and", "not", "session_ids", ":", "# No filters applied so create a full filter array", "filter_array", "=", "np", ".", "ones", "(", "(", "len", "(", "subject_inds", ")", ",", "len", "(", "visit_inds", ")", ")", ",", "dtype", "=", "bool", ")", "else", ":", "# Filters applied so create an empty filter array and populate", "# from filter lists", "filter_array", "=", "np", ".", "zeros", "(", "(", "len", "(", "subject_inds", ")", ",", "len", "(", "visit_inds", ")", ")", ",", "dtype", "=", "bool", ")", "if", "subject_ids", "is", "not", "None", ":", "for", "subj_id", "in", "subject_ids", ":", "filter_array", "[", "subject_inds", "[", "subj_id", "]", ",", ":", "]", "=", "True", "if", "visit_ids", "is", "not", "None", ":", "for", "visit_id", "in", "visit_ids", ":", "filter_array", "[", ":", ",", "visit_inds", "[", "visit_id", "]", "]", "=", "True", "if", "session_ids", "is", "not", "None", ":", "for", "subj_id", ",", "visit_id", "in", "session_ids", ":", "filter_array", "[", "subject_inds", "[", "subj_id", "]", ",", "visit_inds", "[", "visit_id", "]", "]", "=", "True", "if", "not", "filter_array", ".", "any", "(", ")", ":", "raise", "ArcanaUsageError", "(", "\"Provided filters:\\n\"", "+", "(", "\" subject_ids: {}\\n\"", ".", "format", "(", "', '", ".", "join", "(", "subject_ids", ")", ")", "if", "subject_ids", "is", "not", "None", "else", "''", ")", "+", "(", "\" visit_ids: {}\\n\"", ".", "format", "(", "', '", ".", "join", "(", "visit_ids", ")", ")", "if", "visit_ids", "is", "not", "None", "else", "''", ")", "+", "(", "\" session_ids: {}\\n\"", ".", "format", "(", "', '", ".", "join", "(", "session_ids", ")", ")", "if", "session_ids", "is", "not", "None", "else", "''", ")", "+", "\"Did not match any sessions in the project:\\n\"", "+", "\" subject_ids: {}\\n\"", ".", "format", "(", "', '", ".", "join", "(", "subject_inds", ")", ")", "+", "\" visit_ids: {}\\n\"", ".", "format", "(", "', '", ".", "join", "(", "visit_inds", ")", ")", ")", "# Stack of pipelines to process in reverse order of required execution", "stack", "=", "OrderedDict", "(", ")", "def", "push_on_stack", "(", "pipeline", ",", "filt_array", ",", "req_outputs", ")", ":", "if", "req_outputs", "is", "None", ":", "req_outputs", "=", "pipeline", ".", "output_names", "if", "pipeline", ".", "name", "in", "stack", ":", "# Pop pipeline from stack in order to add it to the end of the", "# stack and ensure it is run before all downstream pipelines", "prev_pipeline", ",", "prev_req_outputs", ",", "prev_filt_array", "=", "stack", ".", "pop", "(", "pipeline", ".", "name", ")", "if", "pipeline", "is", "not", "prev_pipeline", "and", "pipeline", "!=", "prev_pipeline", ":", "raise", "ArcanaDesignError", "(", "\"Attempting to run two different pipelines with the \"", "\"same name, {} and {}\"", ".", "format", "(", "pipeline", ",", "prev_pipeline", ")", ")", "# Combined required outputs and filter array with previous", "# references to the pipeline", "req_outputs", "=", "copy", "(", "req_outputs", ")", "req_outputs", ".", "update", "(", "prev_req_outputs", ")", "filt_array", "=", "filt_array", "|", "prev_filt_array", "# If the pipeline to process contains summary outputs (i.e. 'per-", "# subject|visit|study' frequency), then we need to \"dialate\" the", "# filter array to include IDs across the scope of the study, e.g.", "# all subjects for per-vist, or all visits for per-subject.", "output_freqs", "=", "set", "(", "pipeline", ".", "output_frequencies", ")", "dialated_filt_array", "=", "self", ".", "_dialate_array", "(", "filt_array", ",", "pipeline", ".", "joins", ")", "added", "=", "dialated_filt_array", "^", "filt_array", "if", "added", ".", "any", "(", ")", ":", "filt_array", "=", "dialated_filt_array", "# Invert the index dictionaries to get index-to-ID maps", "inv_subject_inds", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "subject_inds", ".", "items", "(", ")", "}", "inv_visit_inds", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "visit_inds", ".", "items", "(", ")", "}", "logger", ".", "warning", "(", "\"Dialated filter array used to process '{}' pipeline to \"", "\"include {} subject/visit IDs due to its '{}' summary \"", "\"outputs \"", ".", "format", "(", "pipeline", ".", "name", ",", "', '", ".", "join", "(", "'({},{})'", ".", "format", "(", "inv_subject_inds", "[", "s", "]", ",", "inv_visit_inds", "[", "v", "]", ")", "for", "s", ",", "v", "in", "zip", "(", "*", "np", ".", "nonzero", "(", "added", ")", ")", ")", ",", "\"' and '\"", ".", "join", "(", "output_freqs", ")", ")", ")", "stack", "[", "pipeline", ".", "name", "]", "=", "pipeline", ",", "req_outputs", ",", "filt_array", "# Recursively add all prerequisites to stack", "for", "prq_getter", ",", "prq_req_outputs", "in", "pipeline", ".", "prerequisites", ".", "items", "(", ")", ":", "try", ":", "prereq", "=", "pipeline", ".", "study", ".", "pipeline", "(", "prq_getter", ",", "prq_req_outputs", ")", "push_on_stack", "(", "prereq", ",", "filt_array", ",", "prq_req_outputs", ")", "except", "(", "ArcanaMissingDataException", ",", "ArcanaOutputNotProducedException", ")", "as", "e", ":", "e", ".", "msg", "+=", "(", "\"{}, which are required as inputs to the '{}' \"", "\"pipeline to produce '{}'\"", ".", "format", "(", "pipeline", ".", "name", ",", "\"', '\"", ".", "join", "(", "req_outputs", ")", ")", ")", "raise", "e", "# Add all primary pipelines to the stack along with their prereqs", "for", "pipeline", ",", "req_outputs", "in", "zip", "(", "pipelines", ",", "required_outputs", ")", ":", "push_on_stack", "(", "pipeline", ",", "filter_array", ",", "req_outputs", ")", "# Iterate through stack of required pipelines from upstream to", "# downstream", "for", "pipeline", ",", "req_outputs", ",", "flt_array", "in", "reversed", "(", "list", "(", "stack", ".", "values", "(", ")", ")", ")", ":", "try", ":", "self", ".", "_connect_pipeline", "(", "pipeline", ",", "req_outputs", ",", "workflow", ",", "subject_inds", ",", "visit_inds", ",", "flt_array", ",", "*", "*", "kwargs", ")", "except", "ArcanaNoRunRequiredException", ":", "logger", ".", "info", "(", "\"Not running '{}' pipeline as its outputs \"", "\"are already present in the repository\"", ".", "format", "(", "pipeline", ".", "name", ")", ")", "# Save complete graph for debugging purposes", "# workflow.write_graph(graph2use='flat', format='svg')", "# print('Graph saved in {} directory'.format(os.getcwd()))", "# Actually run the generated workflow", "result", "=", "workflow", ".", "run", "(", "plugin", "=", "self", ".", "_plugin", ")", "# Reset the cached tree of filesets in the repository as it will", "# change after the pipeline has run.", "self", ".", "study", ".", "clear_caches", "(", ")", "return", "result" ]
Connects all pipelines to that study's repository and runs them in the same NiPype workflow Parameters ---------- pipeline(s) : Pipeline, ... The pipeline to connect to repository required_outputs : list[set[str]] A set of required outputs for each pipeline. If None then all outputs are assumed to be required subject_ids : list[str] The subset of subject IDs to process. If None all available will be processed. Note this is not a duplication of the study and visit IDs passed to the Study __init__, as they define the scope of the analysis and these simply limit the scope of the current run (e.g. to break the analysis into smaller chunks and run separately). Therefore, if the analysis joins over subjects, then all subjects will be processed and this parameter will be ignored. visit_ids : list[str] The same as 'subject_ids' but for visit IDs session_ids : list[str,str] The same as 'subject_ids' and 'visit_ids', except specifies a set of specific combinations in tuples of (subject ID, visit ID). force : bool | 'all' A flag to force the reprocessing of all sessions in the filter array, regardless of whether the parameters|pipeline used to generate them matches the current ones. NB: if True only the final pipeline will be reprocessed (prerequisite pipelines won't run unless they don't match provenance). To process all prerequisite pipelines 'all' should be passed to force. Returns ------- report : ReportNode The final report node, which can be connected to subsequent pipelines
[ "Connects", "all", "pipelines", "to", "that", "study", "s", "repository", "and", "runs", "them", "in", "the", "same", "NiPype", "workflow" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/processor/base.py#L147-L328
MonashBI/arcana
arcana/processor/base.py
Processor._connect_pipeline
def _connect_pipeline(self, pipeline, required_outputs, workflow, subject_inds, visit_inds, filter_array, force=False): """ Connects a pipeline to a overarching workflow that sets up iterators over subjects|visits present in the repository (if required) and repository source and sink nodes Parameters ---------- pipeline : Pipeline The pipeline to connect required_outputs : set[str] | None The outputs required to be produced by this pipeline. If None all are deemed to be required workflow : nipype.pipeline.engine.Workflow The overarching workflow to connect the pipeline to subject_inds : dct[str, int] A mapping of subject ID to row index in the filter array visit_inds : dct[str, int] A mapping of visit ID to column index in the filter array filter_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows correspond to subjects and columns correspond to visits in the repository. True values represent a combination of subject & visit ID to include in the current round of processing. Note that if the 'force' flag is not set, sessions won't be reprocessed unless the save provenance doesn't match that of the given pipeline. force : bool | 'all' A flag to force the processing of all sessions in the filter array, regardless of whether the parameters|pipeline used to generate existing data matches the given pipeline """ if self.reprocess == 'force': force = True # Close-off construction of the pipeline and created, input and output # nodes and provenance dictionary pipeline.cap() # Prepend prerequisite pipelines to complete workflow if they need # to be (re)processed final_nodes = [] # The array that represents the subject/visit pairs for which any # prerequisite pipeline will be (re)processed, and which therefore # needs to be included in the processing of the current pipeline. Row # indices correspond to subjects and column indices visits prqs_to_process_array = np.zeros((len(subject_inds), len(visit_inds)), dtype=bool) # The array that represents the subject/visit pairs for which any # prerequisite pipeline will be skipped due to missing inputs. Row # indices correspond to subjects and column indices visits prqs_to_skip_array = np.zeros((len(subject_inds), len(visit_inds)), dtype=bool) for getter_name in pipeline.prerequisites: prereq = pipeline.study.pipeline(getter_name) if prereq.to_process_array.any(): final_nodes.append(prereq.node('final')) prqs_to_process_array |= prereq.to_process_array prqs_to_skip_array |= prereq.to_skip_array # Get list of sessions that need to be processed (i.e. if # they don't contain the outputs of this pipeline) to_process_array, to_protect_array, to_skip_array = self._to_process( pipeline, required_outputs, prqs_to_process_array, prqs_to_skip_array, filter_array, subject_inds, visit_inds, force) # Store the arrays signifying which nodes to process, protect or skip # so they can be passed to downstream pipelines pipeline.to_process_array = to_process_array pipeline.to_protect_array = to_protect_array pipeline.to_skip_array = to_skip_array # Check to see if there are any sessions to process if not to_process_array.any(): raise ArcanaNoRunRequiredException( "No sessions to process for '{}' pipeline" .format(pipeline.name)) # Set up workflow to run the pipeline, loading and saving from the # repository workflow.add_nodes([pipeline._workflow]) # If prerequisite pipelines need to be processed, connect their # "final" nodes to the initial node of this pipeline to ensure that # they are all processed before this pipeline is run. if final_nodes: prereqs = pipeline.add('prereqs', Merge(len(final_nodes))) for i, final_node in enumerate(final_nodes, start=1): workflow.connect(final_node, 'out', prereqs, 'in{}'.format(i)) else: prereqs = None # Construct iterator structure over subjects and sessions to be # processed iter_nodes = self._iterate(pipeline, to_process_array, subject_inds, visit_inds) sources = {} # Loop through each frequency present in the pipeline inputs and # create a corresponding source node for freq in pipeline.input_frequencies: try: inputs = list(pipeline.frequency_inputs(freq)) except ArcanaMissingDataException as e: raise ArcanaMissingDataException( str(e) + ", which is required for pipeline '{}'".format( pipeline.name)) inputnode = pipeline.inputnode(freq) sources[freq] = source = pipeline.add( '{}_source'.format(freq), RepositorySource( i.collection for i in inputs), inputs=({'prereqs': (prereqs, 'out')} if prereqs is not None else {})) # Connect iter_nodes to source and input nodes for iterator in pipeline.iterators(freq): pipeline.connect(iter_nodes[iterator], iterator, source, iterator) pipeline.connect(source, iterator, inputnode, iterator) for input in inputs: # @ReservedAssignment pipeline.connect(source, input.suffixed_name, inputnode, input.name) deiter_nodes = {} def deiter_node_sort_key(it): """ If there are two iter_nodes (i.e. both subject and visit ID) and one depends on the other (i.e. if the visit IDs per subject vary and vice-versa) we need to ensure that the dependent iterator is deiterated (joined) first. """ return iter_nodes[it].itersource is None # Connect all outputs to the repository sink, creating a new sink for # each frequency level (i.e 'per_session', 'per_subject', 'per_visit', # or 'per_study') for freq in pipeline.output_frequencies: outputs = list(pipeline.frequency_outputs(freq)) if pipeline.iterators(freq) - pipeline.iterators(): raise ArcanaDesignError( "Doesn't make sense to output '{}', which are of '{}' " "frequency, when the pipeline only iterates over '{}'" .format("', '".join(o.name for o in outputs), freq, "', '".join(pipeline.iterators()))) outputnode = pipeline.outputnode(freq) # Connect filesets/fields to sink to sink node, skipping outputs # that are study inputs to_connect = {o.suffixed_name: (outputnode, o.name) for o in outputs if o.is_spec} # Connect iterators to sink node to_connect.update( {i: (iter_nodes[i], i) for i in pipeline.iterators()}) # Connect checksums/values from sources to sink node in order to # save in provenance, joining where necessary for input_freq in pipeline.input_frequencies: checksums_to_connect = [ i.checksum_suffixed_name for i in pipeline.frequency_inputs(input_freq)] if not checksums_to_connect: # Rare case of a pipeline with no inputs only iter_nodes # that will only occur in unittests in all likelihood continue # Loop over iterators that need to be joined, i.e. that are # present in the input frequency but not the output frequency # and create join nodes source = sources[input_freq] for iterator in (pipeline.iterators(input_freq) - pipeline.iterators(freq)): join = pipeline.add( '{}_to_{}_{}_checksum_join'.format( input_freq, freq, iterator), IdentityInterface( checksums_to_connect), inputs={ tc: (source, tc) for tc in checksums_to_connect}, joinsource=iterator, joinfield=checksums_to_connect) source = join to_connect.update( {tc: (source, tc) for tc in checksums_to_connect}) # Add sink node sink = pipeline.add( '{}_sink'.format(freq), RepositorySink( (o.collection for o in outputs), pipeline), inputs=to_connect) # "De-iterate" (join) over iterators to get back to single child # node by the time we connect to the final node of the pipeline Set # the sink and subject_id as the default deiterator if there are no # deiterates (i.e. per_study) or to use as the upstream node to # connect the first deiterator for every frequency deiter_nodes[freq] = sink # for per_study the "deiterator" == sink for iterator in sorted(pipeline.iterators(freq), key=deiter_node_sort_key): # Connect to previous deiterator or sink # NB: we only need to keep a reference to the last one in the # chain in order to connect with the "final" node, so we can # overwrite the entry in the 'deiter_nodes' dict deiter_nodes[freq] = pipeline.add( '{}_{}_deiter'.format(freq, iterator), IdentityInterface( ['checksums']), inputs={ 'checksums': (deiter_nodes[freq], 'checksums')}, joinsource=iterator, joinfield='checksums') # Create a final node, which is used to connect with downstream # pipelines pipeline.add( 'final', Merge( len(deiter_nodes)), inputs={ 'in{}'.format(i): (di, 'checksums') for i, di in enumerate(deiter_nodes.values(), start=1)})
python
def _connect_pipeline(self, pipeline, required_outputs, workflow, subject_inds, visit_inds, filter_array, force=False): """ Connects a pipeline to a overarching workflow that sets up iterators over subjects|visits present in the repository (if required) and repository source and sink nodes Parameters ---------- pipeline : Pipeline The pipeline to connect required_outputs : set[str] | None The outputs required to be produced by this pipeline. If None all are deemed to be required workflow : nipype.pipeline.engine.Workflow The overarching workflow to connect the pipeline to subject_inds : dct[str, int] A mapping of subject ID to row index in the filter array visit_inds : dct[str, int] A mapping of visit ID to column index in the filter array filter_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows correspond to subjects and columns correspond to visits in the repository. True values represent a combination of subject & visit ID to include in the current round of processing. Note that if the 'force' flag is not set, sessions won't be reprocessed unless the save provenance doesn't match that of the given pipeline. force : bool | 'all' A flag to force the processing of all sessions in the filter array, regardless of whether the parameters|pipeline used to generate existing data matches the given pipeline """ if self.reprocess == 'force': force = True # Close-off construction of the pipeline and created, input and output # nodes and provenance dictionary pipeline.cap() # Prepend prerequisite pipelines to complete workflow if they need # to be (re)processed final_nodes = [] # The array that represents the subject/visit pairs for which any # prerequisite pipeline will be (re)processed, and which therefore # needs to be included in the processing of the current pipeline. Row # indices correspond to subjects and column indices visits prqs_to_process_array = np.zeros((len(subject_inds), len(visit_inds)), dtype=bool) # The array that represents the subject/visit pairs for which any # prerequisite pipeline will be skipped due to missing inputs. Row # indices correspond to subjects and column indices visits prqs_to_skip_array = np.zeros((len(subject_inds), len(visit_inds)), dtype=bool) for getter_name in pipeline.prerequisites: prereq = pipeline.study.pipeline(getter_name) if prereq.to_process_array.any(): final_nodes.append(prereq.node('final')) prqs_to_process_array |= prereq.to_process_array prqs_to_skip_array |= prereq.to_skip_array # Get list of sessions that need to be processed (i.e. if # they don't contain the outputs of this pipeline) to_process_array, to_protect_array, to_skip_array = self._to_process( pipeline, required_outputs, prqs_to_process_array, prqs_to_skip_array, filter_array, subject_inds, visit_inds, force) # Store the arrays signifying which nodes to process, protect or skip # so they can be passed to downstream pipelines pipeline.to_process_array = to_process_array pipeline.to_protect_array = to_protect_array pipeline.to_skip_array = to_skip_array # Check to see if there are any sessions to process if not to_process_array.any(): raise ArcanaNoRunRequiredException( "No sessions to process for '{}' pipeline" .format(pipeline.name)) # Set up workflow to run the pipeline, loading and saving from the # repository workflow.add_nodes([pipeline._workflow]) # If prerequisite pipelines need to be processed, connect their # "final" nodes to the initial node of this pipeline to ensure that # they are all processed before this pipeline is run. if final_nodes: prereqs = pipeline.add('prereqs', Merge(len(final_nodes))) for i, final_node in enumerate(final_nodes, start=1): workflow.connect(final_node, 'out', prereqs, 'in{}'.format(i)) else: prereqs = None # Construct iterator structure over subjects and sessions to be # processed iter_nodes = self._iterate(pipeline, to_process_array, subject_inds, visit_inds) sources = {} # Loop through each frequency present in the pipeline inputs and # create a corresponding source node for freq in pipeline.input_frequencies: try: inputs = list(pipeline.frequency_inputs(freq)) except ArcanaMissingDataException as e: raise ArcanaMissingDataException( str(e) + ", which is required for pipeline '{}'".format( pipeline.name)) inputnode = pipeline.inputnode(freq) sources[freq] = source = pipeline.add( '{}_source'.format(freq), RepositorySource( i.collection for i in inputs), inputs=({'prereqs': (prereqs, 'out')} if prereqs is not None else {})) # Connect iter_nodes to source and input nodes for iterator in pipeline.iterators(freq): pipeline.connect(iter_nodes[iterator], iterator, source, iterator) pipeline.connect(source, iterator, inputnode, iterator) for input in inputs: # @ReservedAssignment pipeline.connect(source, input.suffixed_name, inputnode, input.name) deiter_nodes = {} def deiter_node_sort_key(it): """ If there are two iter_nodes (i.e. both subject and visit ID) and one depends on the other (i.e. if the visit IDs per subject vary and vice-versa) we need to ensure that the dependent iterator is deiterated (joined) first. """ return iter_nodes[it].itersource is None # Connect all outputs to the repository sink, creating a new sink for # each frequency level (i.e 'per_session', 'per_subject', 'per_visit', # or 'per_study') for freq in pipeline.output_frequencies: outputs = list(pipeline.frequency_outputs(freq)) if pipeline.iterators(freq) - pipeline.iterators(): raise ArcanaDesignError( "Doesn't make sense to output '{}', which are of '{}' " "frequency, when the pipeline only iterates over '{}'" .format("', '".join(o.name for o in outputs), freq, "', '".join(pipeline.iterators()))) outputnode = pipeline.outputnode(freq) # Connect filesets/fields to sink to sink node, skipping outputs # that are study inputs to_connect = {o.suffixed_name: (outputnode, o.name) for o in outputs if o.is_spec} # Connect iterators to sink node to_connect.update( {i: (iter_nodes[i], i) for i in pipeline.iterators()}) # Connect checksums/values from sources to sink node in order to # save in provenance, joining where necessary for input_freq in pipeline.input_frequencies: checksums_to_connect = [ i.checksum_suffixed_name for i in pipeline.frequency_inputs(input_freq)] if not checksums_to_connect: # Rare case of a pipeline with no inputs only iter_nodes # that will only occur in unittests in all likelihood continue # Loop over iterators that need to be joined, i.e. that are # present in the input frequency but not the output frequency # and create join nodes source = sources[input_freq] for iterator in (pipeline.iterators(input_freq) - pipeline.iterators(freq)): join = pipeline.add( '{}_to_{}_{}_checksum_join'.format( input_freq, freq, iterator), IdentityInterface( checksums_to_connect), inputs={ tc: (source, tc) for tc in checksums_to_connect}, joinsource=iterator, joinfield=checksums_to_connect) source = join to_connect.update( {tc: (source, tc) for tc in checksums_to_connect}) # Add sink node sink = pipeline.add( '{}_sink'.format(freq), RepositorySink( (o.collection for o in outputs), pipeline), inputs=to_connect) # "De-iterate" (join) over iterators to get back to single child # node by the time we connect to the final node of the pipeline Set # the sink and subject_id as the default deiterator if there are no # deiterates (i.e. per_study) or to use as the upstream node to # connect the first deiterator for every frequency deiter_nodes[freq] = sink # for per_study the "deiterator" == sink for iterator in sorted(pipeline.iterators(freq), key=deiter_node_sort_key): # Connect to previous deiterator or sink # NB: we only need to keep a reference to the last one in the # chain in order to connect with the "final" node, so we can # overwrite the entry in the 'deiter_nodes' dict deiter_nodes[freq] = pipeline.add( '{}_{}_deiter'.format(freq, iterator), IdentityInterface( ['checksums']), inputs={ 'checksums': (deiter_nodes[freq], 'checksums')}, joinsource=iterator, joinfield='checksums') # Create a final node, which is used to connect with downstream # pipelines pipeline.add( 'final', Merge( len(deiter_nodes)), inputs={ 'in{}'.format(i): (di, 'checksums') for i, di in enumerate(deiter_nodes.values(), start=1)})
[ "def", "_connect_pipeline", "(", "self", ",", "pipeline", ",", "required_outputs", ",", "workflow", ",", "subject_inds", ",", "visit_inds", ",", "filter_array", ",", "force", "=", "False", ")", ":", "if", "self", ".", "reprocess", "==", "'force'", ":", "force", "=", "True", "# Close-off construction of the pipeline and created, input and output", "# nodes and provenance dictionary", "pipeline", ".", "cap", "(", ")", "# Prepend prerequisite pipelines to complete workflow if they need", "# to be (re)processed", "final_nodes", "=", "[", "]", "# The array that represents the subject/visit pairs for which any", "# prerequisite pipeline will be (re)processed, and which therefore", "# needs to be included in the processing of the current pipeline. Row", "# indices correspond to subjects and column indices visits", "prqs_to_process_array", "=", "np", ".", "zeros", "(", "(", "len", "(", "subject_inds", ")", ",", "len", "(", "visit_inds", ")", ")", ",", "dtype", "=", "bool", ")", "# The array that represents the subject/visit pairs for which any", "# prerequisite pipeline will be skipped due to missing inputs. Row", "# indices correspond to subjects and column indices visits", "prqs_to_skip_array", "=", "np", ".", "zeros", "(", "(", "len", "(", "subject_inds", ")", ",", "len", "(", "visit_inds", ")", ")", ",", "dtype", "=", "bool", ")", "for", "getter_name", "in", "pipeline", ".", "prerequisites", ":", "prereq", "=", "pipeline", ".", "study", ".", "pipeline", "(", "getter_name", ")", "if", "prereq", ".", "to_process_array", ".", "any", "(", ")", ":", "final_nodes", ".", "append", "(", "prereq", ".", "node", "(", "'final'", ")", ")", "prqs_to_process_array", "|=", "prereq", ".", "to_process_array", "prqs_to_skip_array", "|=", "prereq", ".", "to_skip_array", "# Get list of sessions that need to be processed (i.e. if", "# they don't contain the outputs of this pipeline)", "to_process_array", ",", "to_protect_array", ",", "to_skip_array", "=", "self", ".", "_to_process", "(", "pipeline", ",", "required_outputs", ",", "prqs_to_process_array", ",", "prqs_to_skip_array", ",", "filter_array", ",", "subject_inds", ",", "visit_inds", ",", "force", ")", "# Store the arrays signifying which nodes to process, protect or skip", "# so they can be passed to downstream pipelines", "pipeline", ".", "to_process_array", "=", "to_process_array", "pipeline", ".", "to_protect_array", "=", "to_protect_array", "pipeline", ".", "to_skip_array", "=", "to_skip_array", "# Check to see if there are any sessions to process", "if", "not", "to_process_array", ".", "any", "(", ")", ":", "raise", "ArcanaNoRunRequiredException", "(", "\"No sessions to process for '{}' pipeline\"", ".", "format", "(", "pipeline", ".", "name", ")", ")", "# Set up workflow to run the pipeline, loading and saving from the", "# repository", "workflow", ".", "add_nodes", "(", "[", "pipeline", ".", "_workflow", "]", ")", "# If prerequisite pipelines need to be processed, connect their", "# \"final\" nodes to the initial node of this pipeline to ensure that", "# they are all processed before this pipeline is run.", "if", "final_nodes", ":", "prereqs", "=", "pipeline", ".", "add", "(", "'prereqs'", ",", "Merge", "(", "len", "(", "final_nodes", ")", ")", ")", "for", "i", ",", "final_node", "in", "enumerate", "(", "final_nodes", ",", "start", "=", "1", ")", ":", "workflow", ".", "connect", "(", "final_node", ",", "'out'", ",", "prereqs", ",", "'in{}'", ".", "format", "(", "i", ")", ")", "else", ":", "prereqs", "=", "None", "# Construct iterator structure over subjects and sessions to be", "# processed", "iter_nodes", "=", "self", ".", "_iterate", "(", "pipeline", ",", "to_process_array", ",", "subject_inds", ",", "visit_inds", ")", "sources", "=", "{", "}", "# Loop through each frequency present in the pipeline inputs and", "# create a corresponding source node", "for", "freq", "in", "pipeline", ".", "input_frequencies", ":", "try", ":", "inputs", "=", "list", "(", "pipeline", ".", "frequency_inputs", "(", "freq", ")", ")", "except", "ArcanaMissingDataException", "as", "e", ":", "raise", "ArcanaMissingDataException", "(", "str", "(", "e", ")", "+", "\", which is required for pipeline '{}'\"", ".", "format", "(", "pipeline", ".", "name", ")", ")", "inputnode", "=", "pipeline", ".", "inputnode", "(", "freq", ")", "sources", "[", "freq", "]", "=", "source", "=", "pipeline", ".", "add", "(", "'{}_source'", ".", "format", "(", "freq", ")", ",", "RepositorySource", "(", "i", ".", "collection", "for", "i", "in", "inputs", ")", ",", "inputs", "=", "(", "{", "'prereqs'", ":", "(", "prereqs", ",", "'out'", ")", "}", "if", "prereqs", "is", "not", "None", "else", "{", "}", ")", ")", "# Connect iter_nodes to source and input nodes", "for", "iterator", "in", "pipeline", ".", "iterators", "(", "freq", ")", ":", "pipeline", ".", "connect", "(", "iter_nodes", "[", "iterator", "]", ",", "iterator", ",", "source", ",", "iterator", ")", "pipeline", ".", "connect", "(", "source", ",", "iterator", ",", "inputnode", ",", "iterator", ")", "for", "input", "in", "inputs", ":", "# @ReservedAssignment", "pipeline", ".", "connect", "(", "source", ",", "input", ".", "suffixed_name", ",", "inputnode", ",", "input", ".", "name", ")", "deiter_nodes", "=", "{", "}", "def", "deiter_node_sort_key", "(", "it", ")", ":", "\"\"\"\n If there are two iter_nodes (i.e. both subject and visit ID) and\n one depends on the other (i.e. if the visit IDs per subject\n vary and vice-versa) we need to ensure that the dependent\n iterator is deiterated (joined) first.\n \"\"\"", "return", "iter_nodes", "[", "it", "]", ".", "itersource", "is", "None", "# Connect all outputs to the repository sink, creating a new sink for", "# each frequency level (i.e 'per_session', 'per_subject', 'per_visit',", "# or 'per_study')", "for", "freq", "in", "pipeline", ".", "output_frequencies", ":", "outputs", "=", "list", "(", "pipeline", ".", "frequency_outputs", "(", "freq", ")", ")", "if", "pipeline", ".", "iterators", "(", "freq", ")", "-", "pipeline", ".", "iterators", "(", ")", ":", "raise", "ArcanaDesignError", "(", "\"Doesn't make sense to output '{}', which are of '{}' \"", "\"frequency, when the pipeline only iterates over '{}'\"", ".", "format", "(", "\"', '\"", ".", "join", "(", "o", ".", "name", "for", "o", "in", "outputs", ")", ",", "freq", ",", "\"', '\"", ".", "join", "(", "pipeline", ".", "iterators", "(", ")", ")", ")", ")", "outputnode", "=", "pipeline", ".", "outputnode", "(", "freq", ")", "# Connect filesets/fields to sink to sink node, skipping outputs", "# that are study inputs", "to_connect", "=", "{", "o", ".", "suffixed_name", ":", "(", "outputnode", ",", "o", ".", "name", ")", "for", "o", "in", "outputs", "if", "o", ".", "is_spec", "}", "# Connect iterators to sink node", "to_connect", ".", "update", "(", "{", "i", ":", "(", "iter_nodes", "[", "i", "]", ",", "i", ")", "for", "i", "in", "pipeline", ".", "iterators", "(", ")", "}", ")", "# Connect checksums/values from sources to sink node in order to", "# save in provenance, joining where necessary", "for", "input_freq", "in", "pipeline", ".", "input_frequencies", ":", "checksums_to_connect", "=", "[", "i", ".", "checksum_suffixed_name", "for", "i", "in", "pipeline", ".", "frequency_inputs", "(", "input_freq", ")", "]", "if", "not", "checksums_to_connect", ":", "# Rare case of a pipeline with no inputs only iter_nodes", "# that will only occur in unittests in all likelihood", "continue", "# Loop over iterators that need to be joined, i.e. that are", "# present in the input frequency but not the output frequency", "# and create join nodes", "source", "=", "sources", "[", "input_freq", "]", "for", "iterator", "in", "(", "pipeline", ".", "iterators", "(", "input_freq", ")", "-", "pipeline", ".", "iterators", "(", "freq", ")", ")", ":", "join", "=", "pipeline", ".", "add", "(", "'{}_to_{}_{}_checksum_join'", ".", "format", "(", "input_freq", ",", "freq", ",", "iterator", ")", ",", "IdentityInterface", "(", "checksums_to_connect", ")", ",", "inputs", "=", "{", "tc", ":", "(", "source", ",", "tc", ")", "for", "tc", "in", "checksums_to_connect", "}", ",", "joinsource", "=", "iterator", ",", "joinfield", "=", "checksums_to_connect", ")", "source", "=", "join", "to_connect", ".", "update", "(", "{", "tc", ":", "(", "source", ",", "tc", ")", "for", "tc", "in", "checksums_to_connect", "}", ")", "# Add sink node", "sink", "=", "pipeline", ".", "add", "(", "'{}_sink'", ".", "format", "(", "freq", ")", ",", "RepositorySink", "(", "(", "o", ".", "collection", "for", "o", "in", "outputs", ")", ",", "pipeline", ")", ",", "inputs", "=", "to_connect", ")", "# \"De-iterate\" (join) over iterators to get back to single child", "# node by the time we connect to the final node of the pipeline Set", "# the sink and subject_id as the default deiterator if there are no", "# deiterates (i.e. per_study) or to use as the upstream node to", "# connect the first deiterator for every frequency", "deiter_nodes", "[", "freq", "]", "=", "sink", "# for per_study the \"deiterator\" == sink", "for", "iterator", "in", "sorted", "(", "pipeline", ".", "iterators", "(", "freq", ")", ",", "key", "=", "deiter_node_sort_key", ")", ":", "# Connect to previous deiterator or sink", "# NB: we only need to keep a reference to the last one in the", "# chain in order to connect with the \"final\" node, so we can", "# overwrite the entry in the 'deiter_nodes' dict", "deiter_nodes", "[", "freq", "]", "=", "pipeline", ".", "add", "(", "'{}_{}_deiter'", ".", "format", "(", "freq", ",", "iterator", ")", ",", "IdentityInterface", "(", "[", "'checksums'", "]", ")", ",", "inputs", "=", "{", "'checksums'", ":", "(", "deiter_nodes", "[", "freq", "]", ",", "'checksums'", ")", "}", ",", "joinsource", "=", "iterator", ",", "joinfield", "=", "'checksums'", ")", "# Create a final node, which is used to connect with downstream", "# pipelines", "pipeline", ".", "add", "(", "'final'", ",", "Merge", "(", "len", "(", "deiter_nodes", ")", ")", ",", "inputs", "=", "{", "'in{}'", ".", "format", "(", "i", ")", ":", "(", "di", ",", "'checksums'", ")", "for", "i", ",", "di", "in", "enumerate", "(", "deiter_nodes", ".", "values", "(", ")", ",", "start", "=", "1", ")", "}", ")" ]
Connects a pipeline to a overarching workflow that sets up iterators over subjects|visits present in the repository (if required) and repository source and sink nodes Parameters ---------- pipeline : Pipeline The pipeline to connect required_outputs : set[str] | None The outputs required to be produced by this pipeline. If None all are deemed to be required workflow : nipype.pipeline.engine.Workflow The overarching workflow to connect the pipeline to subject_inds : dct[str, int] A mapping of subject ID to row index in the filter array visit_inds : dct[str, int] A mapping of visit ID to column index in the filter array filter_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows correspond to subjects and columns correspond to visits in the repository. True values represent a combination of subject & visit ID to include in the current round of processing. Note that if the 'force' flag is not set, sessions won't be reprocessed unless the save provenance doesn't match that of the given pipeline. force : bool | 'all' A flag to force the processing of all sessions in the filter array, regardless of whether the parameters|pipeline used to generate existing data matches the given pipeline
[ "Connects", "a", "pipeline", "to", "a", "overarching", "workflow", "that", "sets", "up", "iterators", "over", "subjects|visits", "present", "in", "the", "repository", "(", "if", "required", ")", "and", "repository", "source", "and", "sink", "nodes" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/processor/base.py#L330-L536
MonashBI/arcana
arcana/processor/base.py
Processor._iterate
def _iterate(self, pipeline, to_process_array, subject_inds, visit_inds): """ Generate nodes that iterate over subjects and visits in the study that need to be processed by the pipeline Parameters ---------- pipeline : Pipeline The pipeline to add iter_nodes for to_process_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows correspond to subjects and columns correspond to visits in the repository. True values represent a combination of subject & visit ID to process the session for subject_inds : dct[str, int] A mapping of subject ID to row index in the 'to_process' array visit_inds : dct[str, int] A mapping of visit ID to column index in the 'to_process' array Returns ------- iter_nodes : dict[str, Node] A dictionary containing the nodes to iterate over all subject/visit IDs to process for each input frequency """ # Check to see whether the subject/visit IDs to process (as specified # by the 'to_process' array) can be factorized into indepdent nodes, # i.e. all subjects to process have the same visits to process and # vice-versa. factorizable = True if len(list(pipeline.iterators())) == 2: nz_rows = to_process_array[to_process_array.any(axis=1), :] ref_row = nz_rows[0, :] factorizable = all((r == ref_row).all() for r in nz_rows) # If the subject/visit IDs to process cannot be factorized into # indepedent iterators, determine which to make make dependent on the # other in order to avoid/minimise duplicatation of download attempts dependent = None if not factorizable: input_freqs = list(pipeline.input_frequencies) # By default pick iterator the one with the most IDs to # iterate to be the dependent in order to reduce the number of # nodes created and any duplication of download attempts across # the nodes (if both 'per_visit' and 'per_subject' inputs are # required num_subjs, num_visits = nz_rows[:, nz_rows.any(axis=0)].shape if num_subjs > num_visits: dependent = self.study.SUBJECT_ID else: dependent = self.study.VISIT_ID if 'per_visit' in input_freqs: if 'per_subject' in input_freqs: logger.warning( "Cannot factorize sessions to process into independent" " subject and visit iterators and both 'per_visit' and" " 'per_subject' inputs are used by pipeline therefore" " per_{} inputs may be cached twice".format( dependent[:-3])) else: dependent = self.study.SUBJECT_ID elif 'per_subject' in input_freqs: dependent = self.study.VISIT_ID # Invert the index dictionaries to get index-to-ID maps inv_subj_inds = {v: k for k, v in subject_inds.items()} inv_visit_inds = {v: k for k, v in visit_inds.items()} # Create iterator for subjects iter_nodes = {} if self.study.SUBJECT_ID in pipeline.iterators(): fields = [self.study.SUBJECT_ID] if dependent == self.study.SUBJECT_ID: fields.append(self.study.VISIT_ID) # Add iterator node named after subject iterator subj_it = pipeline.add(self.study.SUBJECT_ID, IdentityInterface(fields)) if dependent == self.study.SUBJECT_ID: # Subjects iterator is dependent on visit iterator (because of # non-factorizable IDs) subj_it.itersource = ('{}_{}'.format(pipeline.name, self.study.VISIT_ID), self.study.VISIT_ID) subj_it.iterables = [( self.study.SUBJECT_ID, {inv_visit_inds[n]: [inv_subj_inds[m] for m in col.nonzero()[0]] for n, col in enumerate(to_process_array.T)})] else: subj_it.iterables = ( self.study.SUBJECT_ID, [inv_subj_inds[n] for n in to_process_array.any(axis=1).nonzero()[0]]) iter_nodes[self.study.SUBJECT_ID] = subj_it # Create iterator for visits if self.study.VISIT_ID in pipeline.iterators(): fields = [self.study.VISIT_ID] if dependent == self.study.VISIT_ID: fields.append(self.study.SUBJECT_ID) # Add iterator node named after visit iterator visit_it = pipeline.add(self.study.VISIT_ID, IdentityInterface(fields)) if dependent == self.study.VISIT_ID: visit_it.itersource = ('{}_{}'.format(pipeline.name, self.study.SUBJECT_ID), self.study.SUBJECT_ID) visit_it.iterables = [( self.study.VISIT_ID, {inv_subj_inds[m]:[inv_visit_inds[n] for n in row.nonzero()[0]] for m, row in enumerate(to_process_array)})] else: visit_it.iterables = ( self.study.VISIT_ID, [inv_visit_inds[n] for n in to_process_array.any(axis=0).nonzero()[0]]) iter_nodes[self.study.VISIT_ID] = visit_it if dependent == self.study.SUBJECT_ID: pipeline.connect(visit_it, self.study.VISIT_ID, subj_it, self.study.VISIT_ID) if dependent == self.study.VISIT_ID: pipeline.connect(subj_it, self.study.SUBJECT_ID, visit_it, self.study.SUBJECT_ID) return iter_nodes
python
def _iterate(self, pipeline, to_process_array, subject_inds, visit_inds): """ Generate nodes that iterate over subjects and visits in the study that need to be processed by the pipeline Parameters ---------- pipeline : Pipeline The pipeline to add iter_nodes for to_process_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows correspond to subjects and columns correspond to visits in the repository. True values represent a combination of subject & visit ID to process the session for subject_inds : dct[str, int] A mapping of subject ID to row index in the 'to_process' array visit_inds : dct[str, int] A mapping of visit ID to column index in the 'to_process' array Returns ------- iter_nodes : dict[str, Node] A dictionary containing the nodes to iterate over all subject/visit IDs to process for each input frequency """ # Check to see whether the subject/visit IDs to process (as specified # by the 'to_process' array) can be factorized into indepdent nodes, # i.e. all subjects to process have the same visits to process and # vice-versa. factorizable = True if len(list(pipeline.iterators())) == 2: nz_rows = to_process_array[to_process_array.any(axis=1), :] ref_row = nz_rows[0, :] factorizable = all((r == ref_row).all() for r in nz_rows) # If the subject/visit IDs to process cannot be factorized into # indepedent iterators, determine which to make make dependent on the # other in order to avoid/minimise duplicatation of download attempts dependent = None if not factorizable: input_freqs = list(pipeline.input_frequencies) # By default pick iterator the one with the most IDs to # iterate to be the dependent in order to reduce the number of # nodes created and any duplication of download attempts across # the nodes (if both 'per_visit' and 'per_subject' inputs are # required num_subjs, num_visits = nz_rows[:, nz_rows.any(axis=0)].shape if num_subjs > num_visits: dependent = self.study.SUBJECT_ID else: dependent = self.study.VISIT_ID if 'per_visit' in input_freqs: if 'per_subject' in input_freqs: logger.warning( "Cannot factorize sessions to process into independent" " subject and visit iterators and both 'per_visit' and" " 'per_subject' inputs are used by pipeline therefore" " per_{} inputs may be cached twice".format( dependent[:-3])) else: dependent = self.study.SUBJECT_ID elif 'per_subject' in input_freqs: dependent = self.study.VISIT_ID # Invert the index dictionaries to get index-to-ID maps inv_subj_inds = {v: k for k, v in subject_inds.items()} inv_visit_inds = {v: k for k, v in visit_inds.items()} # Create iterator for subjects iter_nodes = {} if self.study.SUBJECT_ID in pipeline.iterators(): fields = [self.study.SUBJECT_ID] if dependent == self.study.SUBJECT_ID: fields.append(self.study.VISIT_ID) # Add iterator node named after subject iterator subj_it = pipeline.add(self.study.SUBJECT_ID, IdentityInterface(fields)) if dependent == self.study.SUBJECT_ID: # Subjects iterator is dependent on visit iterator (because of # non-factorizable IDs) subj_it.itersource = ('{}_{}'.format(pipeline.name, self.study.VISIT_ID), self.study.VISIT_ID) subj_it.iterables = [( self.study.SUBJECT_ID, {inv_visit_inds[n]: [inv_subj_inds[m] for m in col.nonzero()[0]] for n, col in enumerate(to_process_array.T)})] else: subj_it.iterables = ( self.study.SUBJECT_ID, [inv_subj_inds[n] for n in to_process_array.any(axis=1).nonzero()[0]]) iter_nodes[self.study.SUBJECT_ID] = subj_it # Create iterator for visits if self.study.VISIT_ID in pipeline.iterators(): fields = [self.study.VISIT_ID] if dependent == self.study.VISIT_ID: fields.append(self.study.SUBJECT_ID) # Add iterator node named after visit iterator visit_it = pipeline.add(self.study.VISIT_ID, IdentityInterface(fields)) if dependent == self.study.VISIT_ID: visit_it.itersource = ('{}_{}'.format(pipeline.name, self.study.SUBJECT_ID), self.study.SUBJECT_ID) visit_it.iterables = [( self.study.VISIT_ID, {inv_subj_inds[m]:[inv_visit_inds[n] for n in row.nonzero()[0]] for m, row in enumerate(to_process_array)})] else: visit_it.iterables = ( self.study.VISIT_ID, [inv_visit_inds[n] for n in to_process_array.any(axis=0).nonzero()[0]]) iter_nodes[self.study.VISIT_ID] = visit_it if dependent == self.study.SUBJECT_ID: pipeline.connect(visit_it, self.study.VISIT_ID, subj_it, self.study.VISIT_ID) if dependent == self.study.VISIT_ID: pipeline.connect(subj_it, self.study.SUBJECT_ID, visit_it, self.study.SUBJECT_ID) return iter_nodes
[ "def", "_iterate", "(", "self", ",", "pipeline", ",", "to_process_array", ",", "subject_inds", ",", "visit_inds", ")", ":", "# Check to see whether the subject/visit IDs to process (as specified", "# by the 'to_process' array) can be factorized into indepdent nodes,", "# i.e. all subjects to process have the same visits to process and", "# vice-versa.", "factorizable", "=", "True", "if", "len", "(", "list", "(", "pipeline", ".", "iterators", "(", ")", ")", ")", "==", "2", ":", "nz_rows", "=", "to_process_array", "[", "to_process_array", ".", "any", "(", "axis", "=", "1", ")", ",", ":", "]", "ref_row", "=", "nz_rows", "[", "0", ",", ":", "]", "factorizable", "=", "all", "(", "(", "r", "==", "ref_row", ")", ".", "all", "(", ")", "for", "r", "in", "nz_rows", ")", "# If the subject/visit IDs to process cannot be factorized into", "# indepedent iterators, determine which to make make dependent on the", "# other in order to avoid/minimise duplicatation of download attempts", "dependent", "=", "None", "if", "not", "factorizable", ":", "input_freqs", "=", "list", "(", "pipeline", ".", "input_frequencies", ")", "# By default pick iterator the one with the most IDs to", "# iterate to be the dependent in order to reduce the number of", "# nodes created and any duplication of download attempts across", "# the nodes (if both 'per_visit' and 'per_subject' inputs are", "# required", "num_subjs", ",", "num_visits", "=", "nz_rows", "[", ":", ",", "nz_rows", ".", "any", "(", "axis", "=", "0", ")", "]", ".", "shape", "if", "num_subjs", ">", "num_visits", ":", "dependent", "=", "self", ".", "study", ".", "SUBJECT_ID", "else", ":", "dependent", "=", "self", ".", "study", ".", "VISIT_ID", "if", "'per_visit'", "in", "input_freqs", ":", "if", "'per_subject'", "in", "input_freqs", ":", "logger", ".", "warning", "(", "\"Cannot factorize sessions to process into independent\"", "\" subject and visit iterators and both 'per_visit' and\"", "\" 'per_subject' inputs are used by pipeline therefore\"", "\" per_{} inputs may be cached twice\"", ".", "format", "(", "dependent", "[", ":", "-", "3", "]", ")", ")", "else", ":", "dependent", "=", "self", ".", "study", ".", "SUBJECT_ID", "elif", "'per_subject'", "in", "input_freqs", ":", "dependent", "=", "self", ".", "study", ".", "VISIT_ID", "# Invert the index dictionaries to get index-to-ID maps", "inv_subj_inds", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "subject_inds", ".", "items", "(", ")", "}", "inv_visit_inds", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "visit_inds", ".", "items", "(", ")", "}", "# Create iterator for subjects", "iter_nodes", "=", "{", "}", "if", "self", ".", "study", ".", "SUBJECT_ID", "in", "pipeline", ".", "iterators", "(", ")", ":", "fields", "=", "[", "self", ".", "study", ".", "SUBJECT_ID", "]", "if", "dependent", "==", "self", ".", "study", ".", "SUBJECT_ID", ":", "fields", ".", "append", "(", "self", ".", "study", ".", "VISIT_ID", ")", "# Add iterator node named after subject iterator", "subj_it", "=", "pipeline", ".", "add", "(", "self", ".", "study", ".", "SUBJECT_ID", ",", "IdentityInterface", "(", "fields", ")", ")", "if", "dependent", "==", "self", ".", "study", ".", "SUBJECT_ID", ":", "# Subjects iterator is dependent on visit iterator (because of", "# non-factorizable IDs)", "subj_it", ".", "itersource", "=", "(", "'{}_{}'", ".", "format", "(", "pipeline", ".", "name", ",", "self", ".", "study", ".", "VISIT_ID", ")", ",", "self", ".", "study", ".", "VISIT_ID", ")", "subj_it", ".", "iterables", "=", "[", "(", "self", ".", "study", ".", "SUBJECT_ID", ",", "{", "inv_visit_inds", "[", "n", "]", ":", "[", "inv_subj_inds", "[", "m", "]", "for", "m", "in", "col", ".", "nonzero", "(", ")", "[", "0", "]", "]", "for", "n", ",", "col", "in", "enumerate", "(", "to_process_array", ".", "T", ")", "}", ")", "]", "else", ":", "subj_it", ".", "iterables", "=", "(", "self", ".", "study", ".", "SUBJECT_ID", ",", "[", "inv_subj_inds", "[", "n", "]", "for", "n", "in", "to_process_array", ".", "any", "(", "axis", "=", "1", ")", ".", "nonzero", "(", ")", "[", "0", "]", "]", ")", "iter_nodes", "[", "self", ".", "study", ".", "SUBJECT_ID", "]", "=", "subj_it", "# Create iterator for visits", "if", "self", ".", "study", ".", "VISIT_ID", "in", "pipeline", ".", "iterators", "(", ")", ":", "fields", "=", "[", "self", ".", "study", ".", "VISIT_ID", "]", "if", "dependent", "==", "self", ".", "study", ".", "VISIT_ID", ":", "fields", ".", "append", "(", "self", ".", "study", ".", "SUBJECT_ID", ")", "# Add iterator node named after visit iterator", "visit_it", "=", "pipeline", ".", "add", "(", "self", ".", "study", ".", "VISIT_ID", ",", "IdentityInterface", "(", "fields", ")", ")", "if", "dependent", "==", "self", ".", "study", ".", "VISIT_ID", ":", "visit_it", ".", "itersource", "=", "(", "'{}_{}'", ".", "format", "(", "pipeline", ".", "name", ",", "self", ".", "study", ".", "SUBJECT_ID", ")", ",", "self", ".", "study", ".", "SUBJECT_ID", ")", "visit_it", ".", "iterables", "=", "[", "(", "self", ".", "study", ".", "VISIT_ID", ",", "{", "inv_subj_inds", "[", "m", "]", ":", "[", "inv_visit_inds", "[", "n", "]", "for", "n", "in", "row", ".", "nonzero", "(", ")", "[", "0", "]", "]", "for", "m", ",", "row", "in", "enumerate", "(", "to_process_array", ")", "}", ")", "]", "else", ":", "visit_it", ".", "iterables", "=", "(", "self", ".", "study", ".", "VISIT_ID", ",", "[", "inv_visit_inds", "[", "n", "]", "for", "n", "in", "to_process_array", ".", "any", "(", "axis", "=", "0", ")", ".", "nonzero", "(", ")", "[", "0", "]", "]", ")", "iter_nodes", "[", "self", ".", "study", ".", "VISIT_ID", "]", "=", "visit_it", "if", "dependent", "==", "self", ".", "study", ".", "SUBJECT_ID", ":", "pipeline", ".", "connect", "(", "visit_it", ",", "self", ".", "study", ".", "VISIT_ID", ",", "subj_it", ",", "self", ".", "study", ".", "VISIT_ID", ")", "if", "dependent", "==", "self", ".", "study", ".", "VISIT_ID", ":", "pipeline", ".", "connect", "(", "subj_it", ",", "self", ".", "study", ".", "SUBJECT_ID", ",", "visit_it", ",", "self", ".", "study", ".", "SUBJECT_ID", ")", "return", "iter_nodes" ]
Generate nodes that iterate over subjects and visits in the study that need to be processed by the pipeline Parameters ---------- pipeline : Pipeline The pipeline to add iter_nodes for to_process_array : 2-D numpy.array[bool] A two-dimensional boolean array, where rows correspond to subjects and columns correspond to visits in the repository. True values represent a combination of subject & visit ID to process the session for subject_inds : dct[str, int] A mapping of subject ID to row index in the 'to_process' array visit_inds : dct[str, int] A mapping of visit ID to column index in the 'to_process' array Returns ------- iter_nodes : dict[str, Node] A dictionary containing the nodes to iterate over all subject/visit IDs to process for each input frequency
[ "Generate", "nodes", "that", "iterate", "over", "subjects", "and", "visits", "in", "the", "study", "that", "need", "to", "be", "processed", "by", "the", "pipeline" ]
train
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/processor/base.py#L538-L658