repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/authorization.py
Authorization.authz_part2
def authz_part2(self, user, authn_event, request, **kwargs): """ After the authentication this is where you should end up :param user: :param request: The Authorization Request :param sid: Session key :param kwargs: possible other parameters :return: A redirect to the redirect_uri of the client """ sid = setup_session(self.endpoint_context, request, user, authn_event=authn_event) try: resp_info = self.post_authentication(user, request, sid, **kwargs) except Exception as err: return self.error_response({}, 'server_error', err) if "check_session_iframe" in self.endpoint_context.provider_info: ec = self.endpoint_context salt = rndstr() if ec.sdb.is_session_revoked(sid): pass else: authn_event = ec.sdb.get_authentication_event(sid) # use the last session _state = json.dumps({'authn_time': authn_event['authn_time']}) session_cookie = ec.cookie_dealer.create_cookie( json.dumps(_state), typ="session", cookie_name=ec.cookie_name['session_management']) opbs = session_cookie[ec.cookie_name['session_management']] _session_state = compute_session_state(opbs.value, salt, request["client_id"], resp_info['return_uri']) if 'cookie' in resp_info: if isinstance(resp_info['cookie'], list): resp_info['cookie'].append(session_cookie) else: append_cookie(resp_info['cookie'], session_cookie) else: resp_info['cookie'] = session_cookie resp_info['response_args']['session_state'] = _session_state # Mix-Up mitigation resp_info['response_args']['iss'] = self.endpoint_context.issuer resp_info['response_args']['client_id'] = request['client_id'] return resp_info
python
def authz_part2(self, user, authn_event, request, **kwargs): """ After the authentication this is where you should end up :param user: :param request: The Authorization Request :param sid: Session key :param kwargs: possible other parameters :return: A redirect to the redirect_uri of the client """ sid = setup_session(self.endpoint_context, request, user, authn_event=authn_event) try: resp_info = self.post_authentication(user, request, sid, **kwargs) except Exception as err: return self.error_response({}, 'server_error', err) if "check_session_iframe" in self.endpoint_context.provider_info: ec = self.endpoint_context salt = rndstr() if ec.sdb.is_session_revoked(sid): pass else: authn_event = ec.sdb.get_authentication_event(sid) # use the last session _state = json.dumps({'authn_time': authn_event['authn_time']}) session_cookie = ec.cookie_dealer.create_cookie( json.dumps(_state), typ="session", cookie_name=ec.cookie_name['session_management']) opbs = session_cookie[ec.cookie_name['session_management']] _session_state = compute_session_state(opbs.value, salt, request["client_id"], resp_info['return_uri']) if 'cookie' in resp_info: if isinstance(resp_info['cookie'], list): resp_info['cookie'].append(session_cookie) else: append_cookie(resp_info['cookie'], session_cookie) else: resp_info['cookie'] = session_cookie resp_info['response_args']['session_state'] = _session_state # Mix-Up mitigation resp_info['response_args']['iss'] = self.endpoint_context.issuer resp_info['response_args']['client_id'] = request['client_id'] return resp_info
[ "def", "authz_part2", "(", "self", ",", "user", ",", "authn_event", ",", "request", ",", "*", "*", "kwargs", ")", ":", "sid", "=", "setup_session", "(", "self", ".", "endpoint_context", ",", "request", ",", "user", ",", "authn_event", "=", "authn_event", ")", "try", ":", "resp_info", "=", "self", ".", "post_authentication", "(", "user", ",", "request", ",", "sid", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "err", ":", "return", "self", ".", "error_response", "(", "{", "}", ",", "'server_error'", ",", "err", ")", "if", "\"check_session_iframe\"", "in", "self", ".", "endpoint_context", ".", "provider_info", ":", "ec", "=", "self", ".", "endpoint_context", "salt", "=", "rndstr", "(", ")", "if", "ec", ".", "sdb", ".", "is_session_revoked", "(", "sid", ")", ":", "pass", "else", ":", "authn_event", "=", "ec", ".", "sdb", ".", "get_authentication_event", "(", "sid", ")", "# use the last session", "_state", "=", "json", ".", "dumps", "(", "{", "'authn_time'", ":", "authn_event", "[", "'authn_time'", "]", "}", ")", "session_cookie", "=", "ec", ".", "cookie_dealer", ".", "create_cookie", "(", "json", ".", "dumps", "(", "_state", ")", ",", "typ", "=", "\"session\"", ",", "cookie_name", "=", "ec", ".", "cookie_name", "[", "'session_management'", "]", ")", "opbs", "=", "session_cookie", "[", "ec", ".", "cookie_name", "[", "'session_management'", "]", "]", "_session_state", "=", "compute_session_state", "(", "opbs", ".", "value", ",", "salt", ",", "request", "[", "\"client_id\"", "]", ",", "resp_info", "[", "'return_uri'", "]", ")", "if", "'cookie'", "in", "resp_info", ":", "if", "isinstance", "(", "resp_info", "[", "'cookie'", "]", ",", "list", ")", ":", "resp_info", "[", "'cookie'", "]", ".", "append", "(", "session_cookie", ")", "else", ":", "append_cookie", "(", "resp_info", "[", "'cookie'", "]", ",", "session_cookie", ")", "else", ":", "resp_info", "[", "'cookie'", "]", "=", "session_cookie", "resp_info", "[", "'response_args'", "]", "[", "'session_state'", "]", "=", "_session_state", "# Mix-Up mitigation", "resp_info", "[", "'response_args'", "]", "[", "'iss'", "]", "=", "self", ".", "endpoint_context", ".", "issuer", "resp_info", "[", "'response_args'", "]", "[", "'client_id'", "]", "=", "request", "[", "'client_id'", "]", "return", "resp_info" ]
After the authentication this is where you should end up :param user: :param request: The Authorization Request :param sid: Session key :param kwargs: possible other parameters :return: A redirect to the redirect_uri of the client
[ "After", "the", "authentication", "this", "is", "where", "you", "should", "end", "up" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L642-L693
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/authorization.py
Authorization.process_request
def process_request(self, request_info=None, **kwargs): """ The AuthorizationRequest endpoint :param request_info: The authorization request as a dictionary :return: dictionary """ if isinstance(request_info, AuthorizationErrorResponse): return request_info _cid = request_info["client_id"] cinfo = self.endpoint_context.cdb[_cid] try: cookie = kwargs['cookie'] except KeyError: cookie = '' else: del kwargs['cookie'] if proposed_user(request_info): kwargs['req_user'] = proposed_user(request_info) else: try: _login_hint = request_info['login_hint'] except KeyError: pass else: if self.endpoint_context.login_hint_lookup: kwargs['req_user'] = self.endpoint_context.login_hint_lookup[ _login_hint] info = self.setup_auth(request_info, request_info["redirect_uri"], cinfo, cookie, **kwargs) if 'error' in info: return info try: _function = info['function'] except KeyError: # already authenticated logger.debug("- authenticated -") logger.debug("AREQ keys: %s" % request_info.keys()) res = self.authz_part2(info['user'], info['authn_event'], request_info, cookie=cookie) return res else: try: # Run the authentication function return { 'http_response': _function(**info['args']), 'return_uri': request_info["redirect_uri"] } except Exception as err: logger.exception(err) return {'http_response': 'Internal error: {}'.format(err)}
python
def process_request(self, request_info=None, **kwargs): """ The AuthorizationRequest endpoint :param request_info: The authorization request as a dictionary :return: dictionary """ if isinstance(request_info, AuthorizationErrorResponse): return request_info _cid = request_info["client_id"] cinfo = self.endpoint_context.cdb[_cid] try: cookie = kwargs['cookie'] except KeyError: cookie = '' else: del kwargs['cookie'] if proposed_user(request_info): kwargs['req_user'] = proposed_user(request_info) else: try: _login_hint = request_info['login_hint'] except KeyError: pass else: if self.endpoint_context.login_hint_lookup: kwargs['req_user'] = self.endpoint_context.login_hint_lookup[ _login_hint] info = self.setup_auth(request_info, request_info["redirect_uri"], cinfo, cookie, **kwargs) if 'error' in info: return info try: _function = info['function'] except KeyError: # already authenticated logger.debug("- authenticated -") logger.debug("AREQ keys: %s" % request_info.keys()) res = self.authz_part2(info['user'], info['authn_event'], request_info, cookie=cookie) return res else: try: # Run the authentication function return { 'http_response': _function(**info['args']), 'return_uri': request_info["redirect_uri"] } except Exception as err: logger.exception(err) return {'http_response': 'Internal error: {}'.format(err)}
[ "def", "process_request", "(", "self", ",", "request_info", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "request_info", ",", "AuthorizationErrorResponse", ")", ":", "return", "request_info", "_cid", "=", "request_info", "[", "\"client_id\"", "]", "cinfo", "=", "self", ".", "endpoint_context", ".", "cdb", "[", "_cid", "]", "try", ":", "cookie", "=", "kwargs", "[", "'cookie'", "]", "except", "KeyError", ":", "cookie", "=", "''", "else", ":", "del", "kwargs", "[", "'cookie'", "]", "if", "proposed_user", "(", "request_info", ")", ":", "kwargs", "[", "'req_user'", "]", "=", "proposed_user", "(", "request_info", ")", "else", ":", "try", ":", "_login_hint", "=", "request_info", "[", "'login_hint'", "]", "except", "KeyError", ":", "pass", "else", ":", "if", "self", ".", "endpoint_context", ".", "login_hint_lookup", ":", "kwargs", "[", "'req_user'", "]", "=", "self", ".", "endpoint_context", ".", "login_hint_lookup", "[", "_login_hint", "]", "info", "=", "self", ".", "setup_auth", "(", "request_info", ",", "request_info", "[", "\"redirect_uri\"", "]", ",", "cinfo", ",", "cookie", ",", "*", "*", "kwargs", ")", "if", "'error'", "in", "info", ":", "return", "info", "try", ":", "_function", "=", "info", "[", "'function'", "]", "except", "KeyError", ":", "# already authenticated", "logger", ".", "debug", "(", "\"- authenticated -\"", ")", "logger", ".", "debug", "(", "\"AREQ keys: %s\"", "%", "request_info", ".", "keys", "(", ")", ")", "res", "=", "self", ".", "authz_part2", "(", "info", "[", "'user'", "]", ",", "info", "[", "'authn_event'", "]", ",", "request_info", ",", "cookie", "=", "cookie", ")", "return", "res", "else", ":", "try", ":", "# Run the authentication function", "return", "{", "'http_response'", ":", "_function", "(", "*", "*", "info", "[", "'args'", "]", ")", ",", "'return_uri'", ":", "request_info", "[", "\"redirect_uri\"", "]", "}", "except", "Exception", "as", "err", ":", "logger", ".", "exception", "(", "err", ")", "return", "{", "'http_response'", ":", "'Internal error: {}'", ".", "format", "(", "err", ")", "}" ]
The AuthorizationRequest endpoint :param request_info: The authorization request as a dictionary :return: dictionary
[ "The", "AuthorizationRequest", "endpoint" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L695-L751
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
setup_session
def setup_session(endpoint_context, areq, uid, client_id='', acr='', salt='salt', authn_event=None): """ Setting up a user session :param endpoint_context: :param areq: :param uid: :param acr: :param client_id: :param salt: :param authn_event: A already made AuthnEvent :return: """ if authn_event is None and acr: authn_event = AuthnEvent(uid=uid, salt=salt, authn_info=acr, authn_time=time.time()) if not client_id: client_id = areq['client_id'] sid = endpoint_context.sdb.create_authz_session(authn_event, areq, client_id=client_id, uid=uid) endpoint_context.sdb.do_sub(sid, uid, '') return sid
python
def setup_session(endpoint_context, areq, uid, client_id='', acr='', salt='salt', authn_event=None): """ Setting up a user session :param endpoint_context: :param areq: :param uid: :param acr: :param client_id: :param salt: :param authn_event: A already made AuthnEvent :return: """ if authn_event is None and acr: authn_event = AuthnEvent(uid=uid, salt=salt, authn_info=acr, authn_time=time.time()) if not client_id: client_id = areq['client_id'] sid = endpoint_context.sdb.create_authz_session(authn_event, areq, client_id=client_id, uid=uid) endpoint_context.sdb.do_sub(sid, uid, '') return sid
[ "def", "setup_session", "(", "endpoint_context", ",", "areq", ",", "uid", ",", "client_id", "=", "''", ",", "acr", "=", "''", ",", "salt", "=", "'salt'", ",", "authn_event", "=", "None", ")", ":", "if", "authn_event", "is", "None", "and", "acr", ":", "authn_event", "=", "AuthnEvent", "(", "uid", "=", "uid", ",", "salt", "=", "salt", ",", "authn_info", "=", "acr", ",", "authn_time", "=", "time", ".", "time", "(", ")", ")", "if", "not", "client_id", ":", "client_id", "=", "areq", "[", "'client_id'", "]", "sid", "=", "endpoint_context", ".", "sdb", ".", "create_authz_session", "(", "authn_event", ",", "areq", ",", "client_id", "=", "client_id", ",", "uid", "=", "uid", ")", "endpoint_context", ".", "sdb", ".", "do_sub", "(", "sid", ",", "uid", ",", "''", ")", "return", "sid" ]
Setting up a user session :param endpoint_context: :param areq: :param uid: :param acr: :param client_id: :param salt: :param authn_event: A already made AuthnEvent :return:
[ "Setting", "up", "a", "user", "session" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L44-L69
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
dict_match
def dict_match(a, b): """ Check if all attribute/value pairs in a also appears in b :param a: A dictionary :param b: A dictionary :return: True/False """ res = [] for k, v in a.items(): try: res.append(b[k] == v) except KeyError: pass return all(res)
python
def dict_match(a, b): """ Check if all attribute/value pairs in a also appears in b :param a: A dictionary :param b: A dictionary :return: True/False """ res = [] for k, v in a.items(): try: res.append(b[k] == v) except KeyError: pass return all(res)
[ "def", "dict_match", "(", "a", ",", "b", ")", ":", "res", "=", "[", "]", "for", "k", ",", "v", "in", "a", ".", "items", "(", ")", ":", "try", ":", "res", ".", "append", "(", "b", "[", "k", "]", "==", "v", ")", "except", "KeyError", ":", "pass", "return", "all", "(", "res", ")" ]
Check if all attribute/value pairs in a also appears in b :param a: A dictionary :param b: A dictionary :return: True/False
[ "Check", "if", "all", "attribute", "/", "value", "pairs", "in", "a", "also", "appears", "in", "b" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L92-L106
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
mint_sub
def mint_sub(client_salt, sector_id="", subject_type="public", uid='', user_salt=''): """ Mint a new sub (subject identifier) :param authn_event: Authentication event information :param client_salt: client specific salt - used in pairwise :param sector_id: Possible sector identifier :param subject_type: 'public'/'pairwise' :return: Subject identifier """ if subject_type == "public": sub = hashlib.sha256( "{}{}".format(uid, user_salt).encode("utf-8")).hexdigest() else: sub = pairwise_id(uid, sector_id, "{}{}".format(client_salt, user_salt)) return sub
python
def mint_sub(client_salt, sector_id="", subject_type="public", uid='', user_salt=''): """ Mint a new sub (subject identifier) :param authn_event: Authentication event information :param client_salt: client specific salt - used in pairwise :param sector_id: Possible sector identifier :param subject_type: 'public'/'pairwise' :return: Subject identifier """ if subject_type == "public": sub = hashlib.sha256( "{}{}".format(uid, user_salt).encode("utf-8")).hexdigest() else: sub = pairwise_id(uid, sector_id, "{}{}".format(client_salt, user_salt)) return sub
[ "def", "mint_sub", "(", "client_salt", ",", "sector_id", "=", "\"\"", ",", "subject_type", "=", "\"public\"", ",", "uid", "=", "''", ",", "user_salt", "=", "''", ")", ":", "if", "subject_type", "==", "\"public\"", ":", "sub", "=", "hashlib", ".", "sha256", "(", "\"{}{}\"", ".", "format", "(", "uid", ",", "user_salt", ")", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")", "else", ":", "sub", "=", "pairwise_id", "(", "uid", ",", "sector_id", ",", "\"{}{}\"", ".", "format", "(", "client_salt", ",", "user_salt", ")", ")", "return", "sub" ]
Mint a new sub (subject identifier) :param authn_event: Authentication event information :param client_salt: client specific salt - used in pairwise :param sector_id: Possible sector identifier :param subject_type: 'public'/'pairwise' :return: Subject identifier
[ "Mint", "a", "new", "sub", "(", "subject", "identifier", ")" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L109-L127
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.update
def update(self, sid, **kwargs): """ Add attribute value assertion to a special session :param sid: Session ID :param kwargs: """ item = self[sid] for attribute, value in kwargs.items(): item[attribute] = value self[sid] = item
python
def update(self, sid, **kwargs): """ Add attribute value assertion to a special session :param sid: Session ID :param kwargs: """ item = self[sid] for attribute, value in kwargs.items(): item[attribute] = value self[sid] = item
[ "def", "update", "(", "self", ",", "sid", ",", "*", "*", "kwargs", ")", ":", "item", "=", "self", "[", "sid", "]", "for", "attribute", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "item", "[", "attribute", "]", "=", "value", "self", "[", "sid", "]", "=", "item" ]
Add attribute value assertion to a special session :param sid: Session ID :param kwargs:
[ "Add", "attribute", "value", "assertion", "to", "a", "special", "session" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L204-L214
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.update_by_token
def update_by_token(self, token, **kwargs): """ Updated the session info. Any type of known token can be used :param token: code/access token/refresh token/... :param kwargs: Key word arguements """ _sid = self.handler.sid(token) return self.update(_sid, **kwargs)
python
def update_by_token(self, token, **kwargs): """ Updated the session info. Any type of known token can be used :param token: code/access token/refresh token/... :param kwargs: Key word arguements """ _sid = self.handler.sid(token) return self.update(_sid, **kwargs)
[ "def", "update_by_token", "(", "self", ",", "token", ",", "*", "*", "kwargs", ")", ":", "_sid", "=", "self", ".", "handler", ".", "sid", "(", "token", ")", "return", "self", ".", "update", "(", "_sid", ",", "*", "*", "kwargs", ")" ]
Updated the session info. Any type of known token can be used :param token: code/access token/refresh token/... :param kwargs: Key word arguements
[ "Updated", "the", "session", "info", ".", "Any", "type", "of", "known", "token", "can", "be", "used" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L216-L224
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.replace_token
def replace_token(self, sid, sinfo, token_type): """ Replace an old refresh_token with a new one :param sid: session ID :param sinfo: session info :param token_type: What type of tokens should be replaced :return: Updated session info """ try: # Mint a new one refresh_token = self.handler[token_type](sid, sinfo=sinfo) except KeyError: pass else: # blacklist the old is there is one try: self.handler[token_type].black_list(sinfo[token_type]) except KeyError: pass sinfo[token_type] = refresh_token return sinfo
python
def replace_token(self, sid, sinfo, token_type): """ Replace an old refresh_token with a new one :param sid: session ID :param sinfo: session info :param token_type: What type of tokens should be replaced :return: Updated session info """ try: # Mint a new one refresh_token = self.handler[token_type](sid, sinfo=sinfo) except KeyError: pass else: # blacklist the old is there is one try: self.handler[token_type].black_list(sinfo[token_type]) except KeyError: pass sinfo[token_type] = refresh_token return sinfo
[ "def", "replace_token", "(", "self", ",", "sid", ",", "sinfo", ",", "token_type", ")", ":", "try", ":", "# Mint a new one", "refresh_token", "=", "self", ".", "handler", "[", "token_type", "]", "(", "sid", ",", "sinfo", "=", "sinfo", ")", "except", "KeyError", ":", "pass", "else", ":", "# blacklist the old is there is one", "try", ":", "self", ".", "handler", "[", "token_type", "]", ".", "black_list", "(", "sinfo", "[", "token_type", "]", ")", "except", "KeyError", ":", "pass", "sinfo", "[", "token_type", "]", "=", "refresh_token", "return", "sinfo" ]
Replace an old refresh_token with a new one :param sid: session ID :param sinfo: session info :param token_type: What type of tokens should be replaced :return: Updated session info
[ "Replace", "an", "old", "refresh_token", "with", "a", "new", "one" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L268-L291
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.refresh_token
def refresh_token(self, token, new_refresh=False): """ Issue a new access token using a valid refresh token :param token: Refresh token :param new_refresh: Whether a new refresh token should be minted or not :return: Dictionary with session info :raises: ExpiredToken for invalid refresh token WrongTokenType for wrong token type """ try: _tinfo = self.handler['refresh_token'].info(token) except KeyError: return False if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']: raise ExpiredToken() _sid = _tinfo['sid'] session_info = self[_sid] session_info = self.replace_token(_sid, session_info, 'access_token') session_info["token_type"] = self.handler['access_token'].token_type if new_refresh: session_info = self.replace_token(_sid, session_info, 'refresh_token') self[_sid] = session_info return session_info
python
def refresh_token(self, token, new_refresh=False): """ Issue a new access token using a valid refresh token :param token: Refresh token :param new_refresh: Whether a new refresh token should be minted or not :return: Dictionary with session info :raises: ExpiredToken for invalid refresh token WrongTokenType for wrong token type """ try: _tinfo = self.handler['refresh_token'].info(token) except KeyError: return False if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']: raise ExpiredToken() _sid = _tinfo['sid'] session_info = self[_sid] session_info = self.replace_token(_sid, session_info, 'access_token') session_info["token_type"] = self.handler['access_token'].token_type if new_refresh: session_info = self.replace_token(_sid, session_info, 'refresh_token') self[_sid] = session_info return session_info
[ "def", "refresh_token", "(", "self", ",", "token", ",", "new_refresh", "=", "False", ")", ":", "try", ":", "_tinfo", "=", "self", ".", "handler", "[", "'refresh_token'", "]", ".", "info", "(", "token", ")", "except", "KeyError", ":", "return", "False", "if", "is_expired", "(", "int", "(", "_tinfo", "[", "'exp'", "]", ")", ")", "or", "_tinfo", "[", "'black_listed'", "]", ":", "raise", "ExpiredToken", "(", ")", "_sid", "=", "_tinfo", "[", "'sid'", "]", "session_info", "=", "self", "[", "_sid", "]", "session_info", "=", "self", ".", "replace_token", "(", "_sid", ",", "session_info", ",", "'access_token'", ")", "session_info", "[", "\"token_type\"", "]", "=", "self", ".", "handler", "[", "'access_token'", "]", ".", "token_type", "if", "new_refresh", ":", "session_info", "=", "self", ".", "replace_token", "(", "_sid", ",", "session_info", ",", "'refresh_token'", ")", "self", "[", "_sid", "]", "=", "session_info", "return", "session_info" ]
Issue a new access token using a valid refresh token :param token: Refresh token :param new_refresh: Whether a new refresh token should be minted or not :return: Dictionary with session info :raises: ExpiredToken for invalid refresh token WrongTokenType for wrong token type
[ "Issue", "a", "new", "access", "token", "using", "a", "valid", "refresh", "token" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L356-L387
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.is_token_valid
def is_token_valid(self, token): """ Checks validity of a given token :param token: Access or refresh token """ try: _tinfo = self.handler.info(token) except KeyError: return False if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']: return False # Dependent on what state the session is in. session_info = self[_tinfo['sid']] if session_info["oauth_state"] == "authz": if _tinfo['handler'] != self.handler['code']: return False elif session_info["oauth_state"] == "token": if _tinfo['handler'] != self.handler['access_token']: return False return True
python
def is_token_valid(self, token): """ Checks validity of a given token :param token: Access or refresh token """ try: _tinfo = self.handler.info(token) except KeyError: return False if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']: return False # Dependent on what state the session is in. session_info = self[_tinfo['sid']] if session_info["oauth_state"] == "authz": if _tinfo['handler'] != self.handler['code']: return False elif session_info["oauth_state"] == "token": if _tinfo['handler'] != self.handler['access_token']: return False return True
[ "def", "is_token_valid", "(", "self", ",", "token", ")", ":", "try", ":", "_tinfo", "=", "self", ".", "handler", ".", "info", "(", "token", ")", "except", "KeyError", ":", "return", "False", "if", "is_expired", "(", "int", "(", "_tinfo", "[", "'exp'", "]", ")", ")", "or", "_tinfo", "[", "'black_listed'", "]", ":", "return", "False", "# Dependent on what state the session is in.", "session_info", "=", "self", "[", "_tinfo", "[", "'sid'", "]", "]", "if", "session_info", "[", "\"oauth_state\"", "]", "==", "\"authz\"", ":", "if", "_tinfo", "[", "'handler'", "]", "!=", "self", ".", "handler", "[", "'code'", "]", ":", "return", "False", "elif", "session_info", "[", "\"oauth_state\"", "]", "==", "\"token\"", ":", "if", "_tinfo", "[", "'handler'", "]", "!=", "self", ".", "handler", "[", "'access_token'", "]", ":", "return", "False", "return", "True" ]
Checks validity of a given token :param token: Access or refresh token
[ "Checks", "validity", "of", "a", "given", "token" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L389-L414
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.revoke_token
def revoke_token(self, token, token_type=''): """ Revokes access token :param token: access token """ if token_type: self.handler[token_type].black_list(token) else: self.handler.black_list(token)
python
def revoke_token(self, token, token_type=''): """ Revokes access token :param token: access token """ if token_type: self.handler[token_type].black_list(token) else: self.handler.black_list(token)
[ "def", "revoke_token", "(", "self", ",", "token", ",", "token_type", "=", "''", ")", ":", "if", "token_type", ":", "self", ".", "handler", "[", "token_type", "]", ".", "black_list", "(", "token", ")", "else", ":", "self", ".", "handler", ".", "black_list", "(", "token", ")" ]
Revokes access token :param token: access token
[ "Revokes", "access", "token" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L416-L425
IdentityPython/oidcendpoint
src/oidcendpoint/session.py
SessionDB.revoke_session
def revoke_session(self, sid='', token=''): """ Mark session as revoked but also explicitly revoke all issued tokens :param token: any token connected to the session :param sid: Session identifier """ if not sid: if token: sid = self.handler.sid(token) else: raise ValueError('Need one of "sid" or "token"') for typ in ['access_token', 'refresh_token', 'code']: try: self.revoke_token(self[sid][typ], typ) except KeyError: # If no such token has been issued pass self.update(sid, revoked=True)
python
def revoke_session(self, sid='', token=''): """ Mark session as revoked but also explicitly revoke all issued tokens :param token: any token connected to the session :param sid: Session identifier """ if not sid: if token: sid = self.handler.sid(token) else: raise ValueError('Need one of "sid" or "token"') for typ in ['access_token', 'refresh_token', 'code']: try: self.revoke_token(self[sid][typ], typ) except KeyError: # If no such token has been issued pass self.update(sid, revoked=True)
[ "def", "revoke_session", "(", "self", ",", "sid", "=", "''", ",", "token", "=", "''", ")", ":", "if", "not", "sid", ":", "if", "token", ":", "sid", "=", "self", ".", "handler", ".", "sid", "(", "token", ")", "else", ":", "raise", "ValueError", "(", "'Need one of \"sid\" or \"token\"'", ")", "for", "typ", "in", "[", "'access_token'", ",", "'refresh_token'", ",", "'code'", "]", ":", "try", ":", "self", ".", "revoke_token", "(", "self", "[", "sid", "]", "[", "typ", "]", ",", "typ", ")", "except", "KeyError", ":", "# If no such token has been issued", "pass", "self", ".", "update", "(", "sid", ",", "revoked", "=", "True", ")" ]
Mark session as revoked but also explicitly revoke all issued tokens :param token: any token connected to the session :param sid: Session identifier
[ "Mark", "session", "as", "revoked", "but", "also", "explicitly", "revoke", "all", "issued", "tokens" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/session.py#L435-L454
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_add_longslit_model.py
rectwv_coeff_add_longslit_model
def rectwv_coeff_add_longslit_model(rectwv_coeff, geometry, debugplot=0): """Compute longslit_model coefficients for RectWaveCoeff object. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for a particular CSU configuration corresponding to a longslit observation. geometry : TBD debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Updated object with longslit_model coefficients computed. """ logger = logging.getLogger(__name__) # check grism and filter grism_name = rectwv_coeff.tags['grism'] logger.info('Grism: ' + grism_name) filter_name = rectwv_coeff.tags['filter'] logger.info('Filter: ' + filter_name) # list of slitlets to be computed list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in rectwv_coeff.missing_slitlets: list_valid_islitlets.remove(idel) if abs(debugplot) >= 10: print('>>> valid slitlet numbers:\n', list_valid_islitlets) # --- # check that the CSU configuration corresponds to longslit csu_bar_slit_center_list = [] for islitlet in list_valid_islitlets: csu_bar_slit_center_list.append( rectwv_coeff.contents[islitlet - 1]['csu_bar_slit_center'] ) if abs(debugplot) >= 10: logger.debug('Checking csu_bar_slit_center values:') summary(np.array(csu_bar_slit_center_list), debug=True) pause_debugplot(debugplot) # --- # polynomial coefficients corresponding to the wavelength calibration # step 0: determine poldeg_refined, checking that it is the same for # all the slitlets poldeg_refined_list = [] for islitlet in list_valid_islitlets: poldeg_refined_list.append( len(rectwv_coeff.contents[islitlet - 1]['wpoly_coeff']) - 1 ) # remove duplicates poldeg_refined_list = list(set(poldeg_refined_list)) if len(poldeg_refined_list) != 1: raise ValueError('Unexpected different poldeg_refined found: ' + str(poldeg_refined_list)) poldeg_refined = poldeg_refined_list[0] # step 1: compute variation of each coefficient as a function of # y0_reference_middle of each slitlet list_poly = [] for i in range(poldeg_refined + 1): xp = [] yp = [] for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] wpoly_coeff = tmp_dict['wpoly_coeff'] if wpoly_coeff is not None: xp.append(tmp_dict['y0_reference_middle']) yp.append(wpoly_coeff[i]) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp), deg=2, times_sigma_reject=5, xlabel='y0_rectified', ylabel='coeff[' + str(i) + ']', title="Fit to refined wavelength calibration coefficients", geometry=geometry, debugplot=debugplot ) list_poly.append(poly) # step 2: use the variation of each polynomial coefficient with # y0_reference_middle to infer the expected wavelength calibration # polynomial for each rectifified slitlet for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] y0_reference_middle = tmp_dict['y0_reference_middle'] list_new_coeff = [] for i in range(poldeg_refined + 1): new_coeff = list_poly[i](y0_reference_middle) list_new_coeff.append(new_coeff) tmp_dict['wpoly_coeff_longslit_model'] = list_new_coeff # --- # rectification transformation coefficients aij and bij # step 0: determine order_fmap, checking that it is the same for # all the slitlets order_fmap_list = [] for islitlet in list_valid_islitlets: order_fmap_list.append( rectwv_coeff.contents[islitlet - 1]['ttd_order'] ) # remove duplicates order_fmap_list = list(set(order_fmap_list)) if len(order_fmap_list) != 1: raise ValueError('Unexpected different order_fmap found') order_fmap = order_fmap_list[0] # step 1: compute variation of each coefficient as a function of # y0_reference_middle of each slitlet list_poly_ttd_aij = [] list_poly_ttd_bij = [] list_poly_tti_aij = [] list_poly_tti_bij = [] ncoef_ttd = ncoef_fmap(order_fmap) for i in range(ncoef_ttd): xp = [] yp_ttd_aij = [] yp_ttd_bij = [] yp_tti_aij = [] yp_tti_bij = [] for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] ttd_aij = tmp_dict['ttd_aij'] ttd_bij = tmp_dict['ttd_bij'] tti_aij = tmp_dict['tti_aij'] tti_bij = tmp_dict['tti_bij'] if ttd_aij is not None: xp.append(tmp_dict['y0_reference_middle']) yp_ttd_aij.append(ttd_aij[i]) yp_ttd_bij.append(ttd_bij[i]) yp_tti_aij.append(tti_aij[i]) yp_tti_bij.append(tti_bij[i]) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_ttd_aij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='ttd_aij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_ttd_aij.append(poly) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_ttd_bij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='ttd_bij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_ttd_bij.append(poly) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_tti_aij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='tti_aij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_tti_aij.append(poly) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_tti_bij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='tti_bij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_tti_bij.append(poly) # step 2: use the variation of each coefficient with y0_reference_middle # to infer the expected rectification transformation for each slitlet for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] y0_reference_middle = tmp_dict['y0_reference_middle'] tmp_dict['ttd_order_longslit_model'] = order_fmap ttd_aij_longslit_model = [] ttd_bij_longslit_model = [] tti_aij_longslit_model = [] tti_bij_longslit_model = [] for i in range(ncoef_ttd): new_coeff = list_poly_ttd_aij[i](y0_reference_middle) ttd_aij_longslit_model.append(new_coeff) new_coeff = list_poly_ttd_bij[i](y0_reference_middle) ttd_bij_longslit_model.append(new_coeff) new_coeff = list_poly_tti_aij[i](y0_reference_middle) tti_aij_longslit_model.append(new_coeff) new_coeff = list_poly_tti_bij[i](y0_reference_middle) tti_bij_longslit_model.append(new_coeff) tmp_dict['ttd_aij_longslit_model'] = ttd_aij_longslit_model tmp_dict['ttd_bij_longslit_model'] = ttd_bij_longslit_model tmp_dict['tti_aij_longslit_model'] = tti_aij_longslit_model tmp_dict['tti_bij_longslit_model'] = tti_bij_longslit_model # --- # update uuid and meta_info in output JSON structure rectwv_coeff.uuid = str(uuid4()) rectwv_coeff.meta_info['creation_date'] = datetime.now().isoformat() # return updated object return rectwv_coeff
python
def rectwv_coeff_add_longslit_model(rectwv_coeff, geometry, debugplot=0): """Compute longslit_model coefficients for RectWaveCoeff object. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for a particular CSU configuration corresponding to a longslit observation. geometry : TBD debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Updated object with longslit_model coefficients computed. """ logger = logging.getLogger(__name__) # check grism and filter grism_name = rectwv_coeff.tags['grism'] logger.info('Grism: ' + grism_name) filter_name = rectwv_coeff.tags['filter'] logger.info('Filter: ' + filter_name) # list of slitlets to be computed list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in rectwv_coeff.missing_slitlets: list_valid_islitlets.remove(idel) if abs(debugplot) >= 10: print('>>> valid slitlet numbers:\n', list_valid_islitlets) # --- # check that the CSU configuration corresponds to longslit csu_bar_slit_center_list = [] for islitlet in list_valid_islitlets: csu_bar_slit_center_list.append( rectwv_coeff.contents[islitlet - 1]['csu_bar_slit_center'] ) if abs(debugplot) >= 10: logger.debug('Checking csu_bar_slit_center values:') summary(np.array(csu_bar_slit_center_list), debug=True) pause_debugplot(debugplot) # --- # polynomial coefficients corresponding to the wavelength calibration # step 0: determine poldeg_refined, checking that it is the same for # all the slitlets poldeg_refined_list = [] for islitlet in list_valid_islitlets: poldeg_refined_list.append( len(rectwv_coeff.contents[islitlet - 1]['wpoly_coeff']) - 1 ) # remove duplicates poldeg_refined_list = list(set(poldeg_refined_list)) if len(poldeg_refined_list) != 1: raise ValueError('Unexpected different poldeg_refined found: ' + str(poldeg_refined_list)) poldeg_refined = poldeg_refined_list[0] # step 1: compute variation of each coefficient as a function of # y0_reference_middle of each slitlet list_poly = [] for i in range(poldeg_refined + 1): xp = [] yp = [] for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] wpoly_coeff = tmp_dict['wpoly_coeff'] if wpoly_coeff is not None: xp.append(tmp_dict['y0_reference_middle']) yp.append(wpoly_coeff[i]) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp), deg=2, times_sigma_reject=5, xlabel='y0_rectified', ylabel='coeff[' + str(i) + ']', title="Fit to refined wavelength calibration coefficients", geometry=geometry, debugplot=debugplot ) list_poly.append(poly) # step 2: use the variation of each polynomial coefficient with # y0_reference_middle to infer the expected wavelength calibration # polynomial for each rectifified slitlet for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] y0_reference_middle = tmp_dict['y0_reference_middle'] list_new_coeff = [] for i in range(poldeg_refined + 1): new_coeff = list_poly[i](y0_reference_middle) list_new_coeff.append(new_coeff) tmp_dict['wpoly_coeff_longslit_model'] = list_new_coeff # --- # rectification transformation coefficients aij and bij # step 0: determine order_fmap, checking that it is the same for # all the slitlets order_fmap_list = [] for islitlet in list_valid_islitlets: order_fmap_list.append( rectwv_coeff.contents[islitlet - 1]['ttd_order'] ) # remove duplicates order_fmap_list = list(set(order_fmap_list)) if len(order_fmap_list) != 1: raise ValueError('Unexpected different order_fmap found') order_fmap = order_fmap_list[0] # step 1: compute variation of each coefficient as a function of # y0_reference_middle of each slitlet list_poly_ttd_aij = [] list_poly_ttd_bij = [] list_poly_tti_aij = [] list_poly_tti_bij = [] ncoef_ttd = ncoef_fmap(order_fmap) for i in range(ncoef_ttd): xp = [] yp_ttd_aij = [] yp_ttd_bij = [] yp_tti_aij = [] yp_tti_bij = [] for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] ttd_aij = tmp_dict['ttd_aij'] ttd_bij = tmp_dict['ttd_bij'] tti_aij = tmp_dict['tti_aij'] tti_bij = tmp_dict['tti_bij'] if ttd_aij is not None: xp.append(tmp_dict['y0_reference_middle']) yp_ttd_aij.append(ttd_aij[i]) yp_ttd_bij.append(ttd_bij[i]) yp_tti_aij.append(tti_aij[i]) yp_tti_bij.append(tti_bij[i]) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_ttd_aij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='ttd_aij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_ttd_aij.append(poly) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_ttd_bij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='ttd_bij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_ttd_bij.append(poly) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_tti_aij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='tti_aij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_tti_aij.append(poly) poly, yres, reject = polfit_residuals_with_sigma_rejection( x=np.array(xp), y=np.array(yp_tti_bij), deg=5, times_sigma_reject=5, xlabel='y0_rectified', ylabel='tti_bij[' + str(i) + ']', geometry=geometry, debugplot=debugplot ) list_poly_tti_bij.append(poly) # step 2: use the variation of each coefficient with y0_reference_middle # to infer the expected rectification transformation for each slitlet for islitlet in list_valid_islitlets: tmp_dict = rectwv_coeff.contents[islitlet - 1] y0_reference_middle = tmp_dict['y0_reference_middle'] tmp_dict['ttd_order_longslit_model'] = order_fmap ttd_aij_longslit_model = [] ttd_bij_longslit_model = [] tti_aij_longslit_model = [] tti_bij_longslit_model = [] for i in range(ncoef_ttd): new_coeff = list_poly_ttd_aij[i](y0_reference_middle) ttd_aij_longslit_model.append(new_coeff) new_coeff = list_poly_ttd_bij[i](y0_reference_middle) ttd_bij_longslit_model.append(new_coeff) new_coeff = list_poly_tti_aij[i](y0_reference_middle) tti_aij_longslit_model.append(new_coeff) new_coeff = list_poly_tti_bij[i](y0_reference_middle) tti_bij_longslit_model.append(new_coeff) tmp_dict['ttd_aij_longslit_model'] = ttd_aij_longslit_model tmp_dict['ttd_bij_longslit_model'] = ttd_bij_longslit_model tmp_dict['tti_aij_longslit_model'] = tti_aij_longslit_model tmp_dict['tti_bij_longslit_model'] = tti_bij_longslit_model # --- # update uuid and meta_info in output JSON structure rectwv_coeff.uuid = str(uuid4()) rectwv_coeff.meta_info['creation_date'] = datetime.now().isoformat() # return updated object return rectwv_coeff
[ "def", "rectwv_coeff_add_longslit_model", "(", "rectwv_coeff", ",", "geometry", ",", "debugplot", "=", "0", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# check grism and filter", "grism_name", "=", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", "logger", ".", "info", "(", "'Grism: '", "+", "grism_name", ")", "filter_name", "=", "rectwv_coeff", ".", "tags", "[", "'filter'", "]", "logger", ".", "info", "(", "'Filter: '", "+", "filter_name", ")", "# list of slitlets to be computed", "list_valid_islitlets", "=", "list", "(", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ")", "for", "idel", "in", "rectwv_coeff", ".", "missing_slitlets", ":", "list_valid_islitlets", ".", "remove", "(", "idel", ")", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "print", "(", "'>>> valid slitlet numbers:\\n'", ",", "list_valid_islitlets", ")", "# ---", "# check that the CSU configuration corresponds to longslit", "csu_bar_slit_center_list", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "csu_bar_slit_center_list", ".", "append", "(", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "'csu_bar_slit_center'", "]", ")", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "logger", ".", "debug", "(", "'Checking csu_bar_slit_center values:'", ")", "summary", "(", "np", ".", "array", "(", "csu_bar_slit_center_list", ")", ",", "debug", "=", "True", ")", "pause_debugplot", "(", "debugplot", ")", "# ---", "# polynomial coefficients corresponding to the wavelength calibration", "# step 0: determine poldeg_refined, checking that it is the same for", "# all the slitlets", "poldeg_refined_list", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "poldeg_refined_list", ".", "append", "(", "len", "(", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "'wpoly_coeff'", "]", ")", "-", "1", ")", "# remove duplicates", "poldeg_refined_list", "=", "list", "(", "set", "(", "poldeg_refined_list", ")", ")", "if", "len", "(", "poldeg_refined_list", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Unexpected different poldeg_refined found: '", "+", "str", "(", "poldeg_refined_list", ")", ")", "poldeg_refined", "=", "poldeg_refined_list", "[", "0", "]", "# step 1: compute variation of each coefficient as a function of", "# y0_reference_middle of each slitlet", "list_poly", "=", "[", "]", "for", "i", "in", "range", "(", "poldeg_refined", "+", "1", ")", ":", "xp", "=", "[", "]", "yp", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "tmp_dict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "wpoly_coeff", "=", "tmp_dict", "[", "'wpoly_coeff'", "]", "if", "wpoly_coeff", "is", "not", "None", ":", "xp", ".", "append", "(", "tmp_dict", "[", "'y0_reference_middle'", "]", ")", "yp", ".", "append", "(", "wpoly_coeff", "[", "i", "]", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp", ")", ",", "deg", "=", "2", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'coeff['", "+", "str", "(", "i", ")", "+", "']'", ",", "title", "=", "\"Fit to refined wavelength calibration coefficients\"", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly", ".", "append", "(", "poly", ")", "# step 2: use the variation of each polynomial coefficient with", "# y0_reference_middle to infer the expected wavelength calibration", "# polynomial for each rectifified slitlet", "for", "islitlet", "in", "list_valid_islitlets", ":", "tmp_dict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "y0_reference_middle", "=", "tmp_dict", "[", "'y0_reference_middle'", "]", "list_new_coeff", "=", "[", "]", "for", "i", "in", "range", "(", "poldeg_refined", "+", "1", ")", ":", "new_coeff", "=", "list_poly", "[", "i", "]", "(", "y0_reference_middle", ")", "list_new_coeff", ".", "append", "(", "new_coeff", ")", "tmp_dict", "[", "'wpoly_coeff_longslit_model'", "]", "=", "list_new_coeff", "# ---", "# rectification transformation coefficients aij and bij", "# step 0: determine order_fmap, checking that it is the same for", "# all the slitlets", "order_fmap_list", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "order_fmap_list", ".", "append", "(", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "'ttd_order'", "]", ")", "# remove duplicates", "order_fmap_list", "=", "list", "(", "set", "(", "order_fmap_list", ")", ")", "if", "len", "(", "order_fmap_list", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Unexpected different order_fmap found'", ")", "order_fmap", "=", "order_fmap_list", "[", "0", "]", "# step 1: compute variation of each coefficient as a function of", "# y0_reference_middle of each slitlet", "list_poly_ttd_aij", "=", "[", "]", "list_poly_ttd_bij", "=", "[", "]", "list_poly_tti_aij", "=", "[", "]", "list_poly_tti_bij", "=", "[", "]", "ncoef_ttd", "=", "ncoef_fmap", "(", "order_fmap", ")", "for", "i", "in", "range", "(", "ncoef_ttd", ")", ":", "xp", "=", "[", "]", "yp_ttd_aij", "=", "[", "]", "yp_ttd_bij", "=", "[", "]", "yp_tti_aij", "=", "[", "]", "yp_tti_bij", "=", "[", "]", "for", "islitlet", "in", "list_valid_islitlets", ":", "tmp_dict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "ttd_aij", "=", "tmp_dict", "[", "'ttd_aij'", "]", "ttd_bij", "=", "tmp_dict", "[", "'ttd_bij'", "]", "tti_aij", "=", "tmp_dict", "[", "'tti_aij'", "]", "tti_bij", "=", "tmp_dict", "[", "'tti_bij'", "]", "if", "ttd_aij", "is", "not", "None", ":", "xp", ".", "append", "(", "tmp_dict", "[", "'y0_reference_middle'", "]", ")", "yp_ttd_aij", ".", "append", "(", "ttd_aij", "[", "i", "]", ")", "yp_ttd_bij", ".", "append", "(", "ttd_bij", "[", "i", "]", ")", "yp_tti_aij", ".", "append", "(", "tti_aij", "[", "i", "]", ")", "yp_tti_bij", ".", "append", "(", "tti_bij", "[", "i", "]", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp_ttd_aij", ")", ",", "deg", "=", "5", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'ttd_aij['", "+", "str", "(", "i", ")", "+", "']'", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly_ttd_aij", ".", "append", "(", "poly", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp_ttd_bij", ")", ",", "deg", "=", "5", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'ttd_bij['", "+", "str", "(", "i", ")", "+", "']'", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly_ttd_bij", ".", "append", "(", "poly", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp_tti_aij", ")", ",", "deg", "=", "5", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'tti_aij['", "+", "str", "(", "i", ")", "+", "']'", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly_tti_aij", ".", "append", "(", "poly", ")", "poly", ",", "yres", ",", "reject", "=", "polfit_residuals_with_sigma_rejection", "(", "x", "=", "np", ".", "array", "(", "xp", ")", ",", "y", "=", "np", ".", "array", "(", "yp_tti_bij", ")", ",", "deg", "=", "5", ",", "times_sigma_reject", "=", "5", ",", "xlabel", "=", "'y0_rectified'", ",", "ylabel", "=", "'tti_bij['", "+", "str", "(", "i", ")", "+", "']'", ",", "geometry", "=", "geometry", ",", "debugplot", "=", "debugplot", ")", "list_poly_tti_bij", ".", "append", "(", "poly", ")", "# step 2: use the variation of each coefficient with y0_reference_middle", "# to infer the expected rectification transformation for each slitlet", "for", "islitlet", "in", "list_valid_islitlets", ":", "tmp_dict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "y0_reference_middle", "=", "tmp_dict", "[", "'y0_reference_middle'", "]", "tmp_dict", "[", "'ttd_order_longslit_model'", "]", "=", "order_fmap", "ttd_aij_longslit_model", "=", "[", "]", "ttd_bij_longslit_model", "=", "[", "]", "tti_aij_longslit_model", "=", "[", "]", "tti_bij_longslit_model", "=", "[", "]", "for", "i", "in", "range", "(", "ncoef_ttd", ")", ":", "new_coeff", "=", "list_poly_ttd_aij", "[", "i", "]", "(", "y0_reference_middle", ")", "ttd_aij_longslit_model", ".", "append", "(", "new_coeff", ")", "new_coeff", "=", "list_poly_ttd_bij", "[", "i", "]", "(", "y0_reference_middle", ")", "ttd_bij_longslit_model", ".", "append", "(", "new_coeff", ")", "new_coeff", "=", "list_poly_tti_aij", "[", "i", "]", "(", "y0_reference_middle", ")", "tti_aij_longslit_model", ".", "append", "(", "new_coeff", ")", "new_coeff", "=", "list_poly_tti_bij", "[", "i", "]", "(", "y0_reference_middle", ")", "tti_bij_longslit_model", ".", "append", "(", "new_coeff", ")", "tmp_dict", "[", "'ttd_aij_longslit_model'", "]", "=", "ttd_aij_longslit_model", "tmp_dict", "[", "'ttd_bij_longslit_model'", "]", "=", "ttd_bij_longslit_model", "tmp_dict", "[", "'tti_aij_longslit_model'", "]", "=", "tti_aij_longslit_model", "tmp_dict", "[", "'tti_bij_longslit_model'", "]", "=", "tti_bij_longslit_model", "# ---", "# update uuid and meta_info in output JSON structure", "rectwv_coeff", ".", "uuid", "=", "str", "(", "uuid4", "(", ")", ")", "rectwv_coeff", ".", "meta_info", "[", "'creation_date'", "]", "=", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "# return updated object", "return", "rectwv_coeff" ]
Compute longslit_model coefficients for RectWaveCoeff object. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for a particular CSU configuration corresponding to a longslit observation. geometry : TBD debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Updated object with longslit_model coefficients computed.
[ "Compute", "longslit_model", "coefficients", "for", "RectWaveCoeff", "object", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_add_longslit_model.py#L44-L266
IdentityPython/oidcendpoint
src/oidcendpoint/user_authn/authn_context.py
pick_auth
def pick_auth(endpoint_context, areq, all=False): """ Pick authentication method :param areq: AuthorizationRequest instance :return: A dictionary with the authentication method and its authn class ref """ acrs = [] try: if len(endpoint_context.authn_broker) == 1: return endpoint_context.authn_broker.default() if "acr_values" in areq: if not isinstance(areq["acr_values"], list): areq["acr_values"] = [areq["acr_values"]] acrs = areq["acr_values"] else: # same as any try: acrs = areq["claims"]["id_token"]["acr"]["values"] except KeyError: try: _ith = areq[verified_claim_name("id_token_hint")] except KeyError: try: _hint = areq['login_hint'] except KeyError: pass else: if endpoint_context.login_hint2acrs: acrs = endpoint_context.login_hint2acrs(_hint) else: try: acrs = [_ith['acr']] except KeyError: pass if not acrs: return endpoint_context.authn_broker.default() for acr in acrs: res = endpoint_context.authn_broker.pick(acr) logger.debug("Picked AuthN broker for ACR %s: %s" % ( str(acr), str(res))) if res: if all: return res else: # Return the first guess by pick. return res[0] except KeyError as exc: logger.debug( "An error occurred while picking the authN broker: %s" % str(exc)) return None
python
def pick_auth(endpoint_context, areq, all=False): """ Pick authentication method :param areq: AuthorizationRequest instance :return: A dictionary with the authentication method and its authn class ref """ acrs = [] try: if len(endpoint_context.authn_broker) == 1: return endpoint_context.authn_broker.default() if "acr_values" in areq: if not isinstance(areq["acr_values"], list): areq["acr_values"] = [areq["acr_values"]] acrs = areq["acr_values"] else: # same as any try: acrs = areq["claims"]["id_token"]["acr"]["values"] except KeyError: try: _ith = areq[verified_claim_name("id_token_hint")] except KeyError: try: _hint = areq['login_hint'] except KeyError: pass else: if endpoint_context.login_hint2acrs: acrs = endpoint_context.login_hint2acrs(_hint) else: try: acrs = [_ith['acr']] except KeyError: pass if not acrs: return endpoint_context.authn_broker.default() for acr in acrs: res = endpoint_context.authn_broker.pick(acr) logger.debug("Picked AuthN broker for ACR %s: %s" % ( str(acr), str(res))) if res: if all: return res else: # Return the first guess by pick. return res[0] except KeyError as exc: logger.debug( "An error occurred while picking the authN broker: %s" % str(exc)) return None
[ "def", "pick_auth", "(", "endpoint_context", ",", "areq", ",", "all", "=", "False", ")", ":", "acrs", "=", "[", "]", "try", ":", "if", "len", "(", "endpoint_context", ".", "authn_broker", ")", "==", "1", ":", "return", "endpoint_context", ".", "authn_broker", ".", "default", "(", ")", "if", "\"acr_values\"", "in", "areq", ":", "if", "not", "isinstance", "(", "areq", "[", "\"acr_values\"", "]", ",", "list", ")", ":", "areq", "[", "\"acr_values\"", "]", "=", "[", "areq", "[", "\"acr_values\"", "]", "]", "acrs", "=", "areq", "[", "\"acr_values\"", "]", "else", ":", "# same as any", "try", ":", "acrs", "=", "areq", "[", "\"claims\"", "]", "[", "\"id_token\"", "]", "[", "\"acr\"", "]", "[", "\"values\"", "]", "except", "KeyError", ":", "try", ":", "_ith", "=", "areq", "[", "verified_claim_name", "(", "\"id_token_hint\"", ")", "]", "except", "KeyError", ":", "try", ":", "_hint", "=", "areq", "[", "'login_hint'", "]", "except", "KeyError", ":", "pass", "else", ":", "if", "endpoint_context", ".", "login_hint2acrs", ":", "acrs", "=", "endpoint_context", ".", "login_hint2acrs", "(", "_hint", ")", "else", ":", "try", ":", "acrs", "=", "[", "_ith", "[", "'acr'", "]", "]", "except", "KeyError", ":", "pass", "if", "not", "acrs", ":", "return", "endpoint_context", ".", "authn_broker", ".", "default", "(", ")", "for", "acr", "in", "acrs", ":", "res", "=", "endpoint_context", ".", "authn_broker", ".", "pick", "(", "acr", ")", "logger", ".", "debug", "(", "\"Picked AuthN broker for ACR %s: %s\"", "%", "(", "str", "(", "acr", ")", ",", "str", "(", "res", ")", ")", ")", "if", "res", ":", "if", "all", ":", "return", "res", "else", ":", "# Return the first guess by pick.", "return", "res", "[", "0", "]", "except", "KeyError", "as", "exc", ":", "logger", ".", "debug", "(", "\"An error occurred while picking the authN broker: %s\"", "%", "str", "(", "exc", ")", ")", "return", "None" ]
Pick authentication method :param areq: AuthorizationRequest instance :return: A dictionary with the authentication method and its authn class ref
[ "Pick", "authentication", "method" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/user_authn/authn_context.py#L117-L172
IdentityPython/oidcendpoint
src/oidcendpoint/user_authn/authn_context.py
AuthnBroker.get_method
def get_method(self, cls_name): """ Generator that returns all registered authenticators based on a specific authentication class. :param acr: Authentication Class :return: generator """ for id, spec in self.db.items(): if spec["method"].__class__.__name__ == cls_name: yield spec["method"]
python
def get_method(self, cls_name): """ Generator that returns all registered authenticators based on a specific authentication class. :param acr: Authentication Class :return: generator """ for id, spec in self.db.items(): if spec["method"].__class__.__name__ == cls_name: yield spec["method"]
[ "def", "get_method", "(", "self", ",", "cls_name", ")", ":", "for", "id", ",", "spec", "in", "self", ".", "db", ".", "items", "(", ")", ":", "if", "spec", "[", "\"method\"", "]", ".", "__class__", ".", "__name__", "==", "cls_name", ":", "yield", "spec", "[", "\"method\"", "]" ]
Generator that returns all registered authenticators based on a specific authentication class. :param acr: Authentication Class :return: generator
[ "Generator", "that", "returns", "all", "registered", "authenticators", "based", "on", "a", "specific", "authentication", "class", "." ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/user_authn/authn_context.py#L63-L73
IdentityPython/oidcendpoint
src/oidcendpoint/user_authn/authn_context.py
AuthnBroker.pick
def pick(self, acr=None): """ Given the authentication context find zero or more authn methods that could be used. :param acr: The authentication class reference requested :return: An URL """ if acr is None: # Anything else doesn't make sense return self.db.values() else: return self._pick_by_class_ref(acr)
python
def pick(self, acr=None): """ Given the authentication context find zero or more authn methods that could be used. :param acr: The authentication class reference requested :return: An URL """ if acr is None: # Anything else doesn't make sense return self.db.values() else: return self._pick_by_class_ref(acr)
[ "def", "pick", "(", "self", ",", "acr", "=", "None", ")", ":", "if", "acr", "is", "None", ":", "# Anything else doesn't make sense", "return", "self", ".", "db", ".", "values", "(", ")", "else", ":", "return", "self", ".", "_pick_by_class_ref", "(", "acr", ")" ]
Given the authentication context find zero or more authn methods that could be used. :param acr: The authentication class reference requested :return: An URL
[ "Given", "the", "authentication", "context", "find", "zero", "or", "more", "authn", "methods", "that", "could", "be", "used", "." ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/user_authn/authn_context.py#L78-L91
guaix-ucm/pyemir
emirdrp/util/sextractor.py
SExtractor.setup
def setup(self, path=None): """ Look for SExtractor program ('sextractor', or 'sex'). If a full path is provided, only this path is checked. Raise a SExtractorException if it failed. Return program and version if it succeed. """ # -- Finding sextractor program and its version # first look for 'sextractor', then 'sex' candidates = ['sextractor', 'sex'] if (path): candidates = [path] selected = None for candidate in candidates: try: p = subprocess.Popen(candidate, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) (_out_err, _in) = (p.stdout, p.stdin) versionline = _out_err.read() if (versionline.find("SExtractor") != -1): selected = candidate break except IOError: continue if not(selected): raise SExtractorException( """ Cannot find SExtractor program. Check your PATH, or provide the SExtractor program path in the constructor. """ ) _program = selected # print versionline _version_match = re.search("[Vv]ersion ([0-9\.])+", versionline) if not _version_match: raise SExtractorException( "Cannot determine SExtractor version." ) _version = _version_match.group()[8:] if not _version: raise SExtractorException( "Cannot determine SExtractor version." ) # print "Use " + self.program + " [" + self.version + "]" return _program, _version
python
def setup(self, path=None): """ Look for SExtractor program ('sextractor', or 'sex'). If a full path is provided, only this path is checked. Raise a SExtractorException if it failed. Return program and version if it succeed. """ # -- Finding sextractor program and its version # first look for 'sextractor', then 'sex' candidates = ['sextractor', 'sex'] if (path): candidates = [path] selected = None for candidate in candidates: try: p = subprocess.Popen(candidate, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) (_out_err, _in) = (p.stdout, p.stdin) versionline = _out_err.read() if (versionline.find("SExtractor") != -1): selected = candidate break except IOError: continue if not(selected): raise SExtractorException( """ Cannot find SExtractor program. Check your PATH, or provide the SExtractor program path in the constructor. """ ) _program = selected # print versionline _version_match = re.search("[Vv]ersion ([0-9\.])+", versionline) if not _version_match: raise SExtractorException( "Cannot determine SExtractor version." ) _version = _version_match.group()[8:] if not _version: raise SExtractorException( "Cannot determine SExtractor version." ) # print "Use " + self.program + " [" + self.version + "]" return _program, _version
[ "def", "setup", "(", "self", ",", "path", "=", "None", ")", ":", "# -- Finding sextractor program and its version", "# first look for 'sextractor', then 'sex'", "candidates", "=", "[", "'sextractor'", ",", "'sex'", "]", "if", "(", "path", ")", ":", "candidates", "=", "[", "path", "]", "selected", "=", "None", "for", "candidate", "in", "candidates", ":", "try", ":", "p", "=", "subprocess", ".", "Popen", "(", "candidate", ",", "shell", "=", "True", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "close_fds", "=", "True", ")", "(", "_out_err", ",", "_in", ")", "=", "(", "p", ".", "stdout", ",", "p", ".", "stdin", ")", "versionline", "=", "_out_err", ".", "read", "(", ")", "if", "(", "versionline", ".", "find", "(", "\"SExtractor\"", ")", "!=", "-", "1", ")", ":", "selected", "=", "candidate", "break", "except", "IOError", ":", "continue", "if", "not", "(", "selected", ")", ":", "raise", "SExtractorException", "(", "\"\"\"\n Cannot find SExtractor program. Check your PATH,\n or provide the SExtractor program path in the constructor.\n \"\"\"", ")", "_program", "=", "selected", "# print versionline", "_version_match", "=", "re", ".", "search", "(", "\"[Vv]ersion ([0-9\\.])+\"", ",", "versionline", ")", "if", "not", "_version_match", ":", "raise", "SExtractorException", "(", "\"Cannot determine SExtractor version.\"", ")", "_version", "=", "_version_match", ".", "group", "(", ")", "[", "8", ":", "]", "if", "not", "_version", ":", "raise", "SExtractorException", "(", "\"Cannot determine SExtractor version.\"", ")", "# print \"Use \" + self.program + \" [\" + self.version + \"]\"", "return", "_program", ",", "_version" ]
Look for SExtractor program ('sextractor', or 'sex'). If a full path is provided, only this path is checked. Raise a SExtractorException if it failed. Return program and version if it succeed.
[ "Look", "for", "SExtractor", "program", "(", "sextractor", "or", "sex", ")", ".", "If", "a", "full", "path", "is", "provided", "only", "this", "path", "is", "checked", ".", "Raise", "a", "SExtractorException", "if", "it", "failed", ".", "Return", "program", "and", "version", "if", "it", "succeed", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/util/sextractor.py#L419-L476
guaix-ucm/pyemir
emirdrp/util/sextractor.py
SExtractor.update_config
def update_config(self): """ Update the configuration files according to the current in-memory SExtractor configuration. """ # -- Write filter configuration file # First check the filter itself filter = self.config['FILTER_MASK'] rows = len(filter) cols = len(filter[0]) # May raise ValueError, OK filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w') filter_f.write("CONV NORM\n") filter_f.write("# %dx%d Generated from sextractor.py module.\n" % (rows, cols)) for row in filter: filter_f.write(" ".join(map(repr, row))) filter_f.write("\n") filter_f.close() # -- Write parameter list file parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w') for parameter in self.config['PARAMETERS_LIST']: print(parameter, file=parameters_f) parameters_f.close() # -- Write NNW configuration file nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w') nnw_f.write(nnw_config) nnw_f.close() # -- Write main configuration file main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w') for key in self.config.keys(): if (key in SExtractor._SE_config_special_keys): continue if (key == "PHOT_AUTOPARAMS"): # tuple instead of a single value value = " ".join(map(str, self.config[key])) else: value = str(self.config[key]) print(("%-16s %-16s # %s" % (key, value, SExtractor._SE_config[key]['comment'])), file=main_f) main_f.close()
python
def update_config(self): """ Update the configuration files according to the current in-memory SExtractor configuration. """ # -- Write filter configuration file # First check the filter itself filter = self.config['FILTER_MASK'] rows = len(filter) cols = len(filter[0]) # May raise ValueError, OK filter_f = __builtin__.open(self.config['FILTER_NAME'], 'w') filter_f.write("CONV NORM\n") filter_f.write("# %dx%d Generated from sextractor.py module.\n" % (rows, cols)) for row in filter: filter_f.write(" ".join(map(repr, row))) filter_f.write("\n") filter_f.close() # -- Write parameter list file parameters_f = __builtin__.open(self.config['PARAMETERS_NAME'], 'w') for parameter in self.config['PARAMETERS_LIST']: print(parameter, file=parameters_f) parameters_f.close() # -- Write NNW configuration file nnw_f = __builtin__.open(self.config['STARNNW_NAME'], 'w') nnw_f.write(nnw_config) nnw_f.close() # -- Write main configuration file main_f = __builtin__.open(self.config['CONFIG_FILE'], 'w') for key in self.config.keys(): if (key in SExtractor._SE_config_special_keys): continue if (key == "PHOT_AUTOPARAMS"): # tuple instead of a single value value = " ".join(map(str, self.config[key])) else: value = str(self.config[key]) print(("%-16s %-16s # %s" % (key, value, SExtractor._SE_config[key]['comment'])), file=main_f) main_f.close()
[ "def", "update_config", "(", "self", ")", ":", "# -- Write filter configuration file", "# First check the filter itself", "filter", "=", "self", ".", "config", "[", "'FILTER_MASK'", "]", "rows", "=", "len", "(", "filter", ")", "cols", "=", "len", "(", "filter", "[", "0", "]", ")", "# May raise ValueError, OK", "filter_f", "=", "__builtin__", ".", "open", "(", "self", ".", "config", "[", "'FILTER_NAME'", "]", ",", "'w'", ")", "filter_f", ".", "write", "(", "\"CONV NORM\\n\"", ")", "filter_f", ".", "write", "(", "\"# %dx%d Generated from sextractor.py module.\\n\"", "%", "(", "rows", ",", "cols", ")", ")", "for", "row", "in", "filter", ":", "filter_f", ".", "write", "(", "\" \"", ".", "join", "(", "map", "(", "repr", ",", "row", ")", ")", ")", "filter_f", ".", "write", "(", "\"\\n\"", ")", "filter_f", ".", "close", "(", ")", "# -- Write parameter list file", "parameters_f", "=", "__builtin__", ".", "open", "(", "self", ".", "config", "[", "'PARAMETERS_NAME'", "]", ",", "'w'", ")", "for", "parameter", "in", "self", ".", "config", "[", "'PARAMETERS_LIST'", "]", ":", "print", "(", "parameter", ",", "file", "=", "parameters_f", ")", "parameters_f", ".", "close", "(", ")", "# -- Write NNW configuration file", "nnw_f", "=", "__builtin__", ".", "open", "(", "self", ".", "config", "[", "'STARNNW_NAME'", "]", ",", "'w'", ")", "nnw_f", ".", "write", "(", "nnw_config", ")", "nnw_f", ".", "close", "(", ")", "# -- Write main configuration file", "main_f", "=", "__builtin__", ".", "open", "(", "self", ".", "config", "[", "'CONFIG_FILE'", "]", ",", "'w'", ")", "for", "key", "in", "self", ".", "config", ".", "keys", "(", ")", ":", "if", "(", "key", "in", "SExtractor", ".", "_SE_config_special_keys", ")", ":", "continue", "if", "(", "key", "==", "\"PHOT_AUTOPARAMS\"", ")", ":", "# tuple instead of a single value", "value", "=", "\" \"", ".", "join", "(", "map", "(", "str", ",", "self", ".", "config", "[", "key", "]", ")", ")", "else", ":", "value", "=", "str", "(", "self", ".", "config", "[", "key", "]", ")", "print", "(", "(", "\"%-16s %-16s # %s\"", "%", "(", "key", ",", "value", ",", "SExtractor", ".", "_SE_config", "[", "key", "]", "[", "'comment'", "]", ")", ")", ",", "file", "=", "main_f", ")", "main_f", ".", "close", "(", ")" ]
Update the configuration files according to the current in-memory SExtractor configuration.
[ "Update", "the", "configuration", "files", "according", "to", "the", "current", "in", "-", "memory", "SExtractor", "configuration", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/util/sextractor.py#L478-L531
guaix-ucm/pyemir
emirdrp/util/sextractor.py
SExtractor.run
def run(self, file, updateconfig=True, clean=False, path=None): """ Run SExtractor. If updateconfig is True (default), the configuration files will be updated before running SExtractor. If clean is True (default: False), configuration files (if any) will be deleted after SExtractor terminates. """ if updateconfig: self.update_config() # Try to find SExtractor program # This will raise an exception if it failed self.program, self.version = self.setup(path) commandline = ( self.program + " -c " + self.config['CONFIG_FILE'] + " " + file) # print commandline rcode = os.system(commandline) if (rcode): raise SExtractorException( "SExtractor command [%s] failed." % commandline ) if clean: self.clean()
python
def run(self, file, updateconfig=True, clean=False, path=None): """ Run SExtractor. If updateconfig is True (default), the configuration files will be updated before running SExtractor. If clean is True (default: False), configuration files (if any) will be deleted after SExtractor terminates. """ if updateconfig: self.update_config() # Try to find SExtractor program # This will raise an exception if it failed self.program, self.version = self.setup(path) commandline = ( self.program + " -c " + self.config['CONFIG_FILE'] + " " + file) # print commandline rcode = os.system(commandline) if (rcode): raise SExtractorException( "SExtractor command [%s] failed." % commandline ) if clean: self.clean()
[ "def", "run", "(", "self", ",", "file", ",", "updateconfig", "=", "True", ",", "clean", "=", "False", ",", "path", "=", "None", ")", ":", "if", "updateconfig", ":", "self", ".", "update_config", "(", ")", "# Try to find SExtractor program", "# This will raise an exception if it failed", "self", ".", "program", ",", "self", ".", "version", "=", "self", ".", "setup", "(", "path", ")", "commandline", "=", "(", "self", ".", "program", "+", "\" -c \"", "+", "self", ".", "config", "[", "'CONFIG_FILE'", "]", "+", "\" \"", "+", "file", ")", "# print commandline", "rcode", "=", "os", ".", "system", "(", "commandline", ")", "if", "(", "rcode", ")", ":", "raise", "SExtractorException", "(", "\"SExtractor command [%s] failed.\"", "%", "commandline", ")", "if", "clean", ":", "self", ".", "clean", "(", ")" ]
Run SExtractor. If updateconfig is True (default), the configuration files will be updated before running SExtractor. If clean is True (default: False), configuration files (if any) will be deleted after SExtractor terminates.
[ "Run", "SExtractor", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/util/sextractor.py#L533-L565
guaix-ucm/pyemir
emirdrp/util/sextractor.py
SExtractor.catalog
def catalog(self): """ Read the output catalog produced by the last SExtractor run. Output is a list of dictionaries, with a dictionary for each star: {'param1': value, 'param2': value, ...}. """ output_f = SExtractorfile(self.config['CATALOG_NAME'], 'r') c = output_f.read() output_f.close() return c
python
def catalog(self): """ Read the output catalog produced by the last SExtractor run. Output is a list of dictionaries, with a dictionary for each star: {'param1': value, 'param2': value, ...}. """ output_f = SExtractorfile(self.config['CATALOG_NAME'], 'r') c = output_f.read() output_f.close() return c
[ "def", "catalog", "(", "self", ")", ":", "output_f", "=", "SExtractorfile", "(", "self", ".", "config", "[", "'CATALOG_NAME'", "]", ",", "'r'", ")", "c", "=", "output_f", ".", "read", "(", ")", "output_f", ".", "close", "(", ")", "return", "c" ]
Read the output catalog produced by the last SExtractor run. Output is a list of dictionaries, with a dictionary for each star: {'param1': value, 'param2': value, ...}.
[ "Read", "the", "output", "catalog", "produced", "by", "the", "last", "SExtractor", "run", ".", "Output", "is", "a", "list", "of", "dictionaries", "with", "a", "dictionary", "for", "each", "star", ":", "{", "param1", ":", "value", "param2", ":", "value", "...", "}", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/util/sextractor.py#L567-L578
guaix-ucm/pyemir
emirdrp/util/sextractor.py
SExtractor.clean
def clean(self, config=True, catalog=False, check=False): """ Remove the generated SExtractor files (if any). If config is True, remove generated configuration files. If catalog is True, remove the output catalog. If check is True, remove output check image. """ try: if (config): os.unlink(self.config['FILTER_NAME']) os.unlink(self.config['PARAMETERS_NAME']) os.unlink(self.config['STARNNW_NAME']) os.unlink(self.config['CONFIG_FILE']) if (catalog): os.unlink(self.config['CATALOG_NAME']) if (check): os.unlink(self.config['CHECKIMAGE_NAME']) except OSError: pass
python
def clean(self, config=True, catalog=False, check=False): """ Remove the generated SExtractor files (if any). If config is True, remove generated configuration files. If catalog is True, remove the output catalog. If check is True, remove output check image. """ try: if (config): os.unlink(self.config['FILTER_NAME']) os.unlink(self.config['PARAMETERS_NAME']) os.unlink(self.config['STARNNW_NAME']) os.unlink(self.config['CONFIG_FILE']) if (catalog): os.unlink(self.config['CATALOG_NAME']) if (check): os.unlink(self.config['CHECKIMAGE_NAME']) except OSError: pass
[ "def", "clean", "(", "self", ",", "config", "=", "True", ",", "catalog", "=", "False", ",", "check", "=", "False", ")", ":", "try", ":", "if", "(", "config", ")", ":", "os", ".", "unlink", "(", "self", ".", "config", "[", "'FILTER_NAME'", "]", ")", "os", ".", "unlink", "(", "self", ".", "config", "[", "'PARAMETERS_NAME'", "]", ")", "os", ".", "unlink", "(", "self", ".", "config", "[", "'STARNNW_NAME'", "]", ")", "os", ".", "unlink", "(", "self", ".", "config", "[", "'CONFIG_FILE'", "]", ")", "if", "(", "catalog", ")", ":", "os", ".", "unlink", "(", "self", ".", "config", "[", "'CATALOG_NAME'", "]", ")", "if", "(", "check", ")", ":", "os", ".", "unlink", "(", "self", ".", "config", "[", "'CHECKIMAGE_NAME'", "]", ")", "except", "OSError", ":", "pass" ]
Remove the generated SExtractor files (if any). If config is True, remove generated configuration files. If catalog is True, remove the output catalog. If check is True, remove output check image.
[ "Remove", "the", "generated", "SExtractor", "files", "(", "if", "any", ")", ".", "If", "config", "is", "True", "remove", "generated", "configuration", "files", ".", "If", "catalog", "is", "True", "remove", "the", "output", "catalog", ".", "If", "check", "is", "True", "remove", "output", "check", "image", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/util/sextractor.py#L580-L600
guaix-ucm/pyemir
emirdrp/processing/wavecal/refine_rectwv_coeff.py
refine_rectwv_coeff
def refine_rectwv_coeff(input_image, rectwv_coeff, refine_wavecalib_mode, minimum_slitlet_width_mm, maximum_slitlet_width_mm, save_intermediate_results=False, debugplot=0): """Refine RectWaveCoeff object using a catalogue of lines One and only one among refine_with_oh_lines_mode and refine_with_arc_lines must be different from zero. Parameters ---------- input_image : HDUList object Input 2D image. rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. refine_wavecalib_mode : int Integer, indicating the type of refinement: 0 : no refinement 1 : apply the same global offset to all the slitlets (using ARC lines) 2 : apply individual offset to each slitlet (using ARC lines) 11 : apply the same global offset to all the slitlets (using OH lines) 12 : apply individual offset to each slitlet (using OH lines) minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. save_intermediate_results : bool If True, save plots in PDF files debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- refined_rectwv_coeff : RectWaveCoeff instance Refined rectification and wavelength calibration coefficients for the particular CSU configuration. expected_cat_image : HDUList object Output 2D image with the expected catalogued lines. """ logger = logging.getLogger(__name__) if save_intermediate_results: from matplotlib.backends.backend_pdf import PdfPages pdf = PdfPages('crosscorrelation.pdf') else: pdf = None # image header main_header = input_image[0].header filter_name = main_header['filter'] grism_name = main_header['grism'] # protections if refine_wavecalib_mode not in [1, 2, 11, 12]: logger.error('Wavelength calibration refinemente mode={}'. format( refine_wavecalib_mode )) raise ValueError("Invalid wavelength calibration refinement mode") # read tabulated lines if refine_wavecalib_mode in [1, 2]: # ARC lines if grism_name == 'LR': catlines_file = 'lines_argon_neon_xenon_empirical_LR.dat' else: catlines_file = 'lines_argon_neon_xenon_empirical.dat' dumdata = pkgutil.get_data('emirdrp.instrument.configs', catlines_file) arc_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(arc_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = catlines[:, 0] catlines_all_flux = catlines[:, 1] mode = refine_wavecalib_mode elif refine_wavecalib_mode in [11, 12]: # OH lines dumdata = pkgutil.get_data( 'emirdrp.instrument.configs', 'Oliva_etal_2013.dat' ) oh_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(oh_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0])) catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2])) mode = refine_wavecalib_mode - 10 else: raise ValueError('Unexpected mode={}'.format(refine_wavecalib_mode)) # initialize output refined_rectwv_coeff = deepcopy(rectwv_coeff) logger.info('Computing median spectrum') # compute median spectrum and normalize it sp_median = median_slitlets_rectified( input_image, mode=2, minimum_slitlet_width_mm=minimum_slitlet_width_mm, maximum_slitlet_width_mm=maximum_slitlet_width_mm )[0].data sp_median /= sp_median.max() # determine minimum and maximum useful wavelength jmin, jmax = find_pix_borders(sp_median, 0) naxis1 = main_header['naxis1'] naxis2 = main_header['naxis2'] crpix1 = main_header['crpix1'] crval1 = main_header['crval1'] cdelt1 = main_header['cdelt1'] xwave = crval1 + (np.arange(naxis1) + 1.0 - crpix1) * cdelt1 if grism_name == 'LR': wv_parameters = set_wv_parameters(filter_name, grism_name) wave_min = wv_parameters['wvmin_useful'] wave_max = wv_parameters['wvmax_useful'] else: wave_min = crval1 + (jmin + 1 - crpix1) * cdelt1 wave_max = crval1 + (jmax + 1 - crpix1) * cdelt1 logger.info('Setting wave_min to {}'.format(wave_min)) logger.info('Setting wave_max to {}'.format(wave_max)) # extract subset of catalogue lines within current wavelength range lok1 = catlines_all_wave >= wave_min lok2 = catlines_all_wave <= wave_max catlines_reference_wave = catlines_all_wave[lok1*lok2] catlines_reference_flux = catlines_all_flux[lok1*lok2] catlines_reference_flux /= catlines_reference_flux.max() # estimate sigma to broaden catalogue lines csu_config = CsuConfiguration.define_from_header(main_header) # segregate slitlets list_useful_slitlets = csu_config.widths_in_range_mm( minwidth=minimum_slitlet_width_mm, maxwidth=maximum_slitlet_width_mm ) # remove missing slitlets if len(refined_rectwv_coeff.missing_slitlets) > 0: for iremove in refined_rectwv_coeff.missing_slitlets: if iremove in list_useful_slitlets: list_useful_slitlets.remove(iremove) list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1)) if i not in list_useful_slitlets] logger.info('list of useful slitlets: {}'.format( list_useful_slitlets)) logger.info('list of not useful slitlets: {}'.format( list_not_useful_slitlets)) tempwidths = np.array([csu_config.csu_bar_slit_width(islitlet) for islitlet in list_useful_slitlets]) widths_summary = summary(tempwidths) logger.info('Statistics of useful slitlet widths (mm):') logger.info('- npoints....: {0:d}'.format(widths_summary['npoints'])) logger.info('- mean.......: {0:7.3f}'.format(widths_summary['mean'])) logger.info('- median.....: {0:7.3f}'.format(widths_summary['median'])) logger.info('- std........: {0:7.3f}'.format(widths_summary['std'])) logger.info('- robust_std.: {0:7.3f}'.format(widths_summary['robust_std'])) # empirical transformation of slit width (mm) to pixels sigma_broadening = cdelt1 * widths_summary['median'] # convolve location of catalogue lines to generate expected spectrum xwave_reference, sp_reference = convolve_comb_lines( catlines_reference_wave, catlines_reference_flux, sigma_broadening, crpix1, crval1, cdelt1, naxis1 ) sp_reference /= sp_reference.max() # generate image2d with expected lines image2d_expected_lines = np.tile(sp_reference, (naxis2, 1)) hdu = fits.PrimaryHDU(data=image2d_expected_lines, header=main_header) expected_cat_image = fits.HDUList([hdu]) if (abs(debugplot) % 10 != 0) or (pdf is not None): ax = ximplotxy(xwave, sp_median, 'C1-', xlabel='Wavelength (Angstroms, in vacuum)', ylabel='Normalized number of counts', title='Median spectrum', label='observed spectrum', show=False) # overplot reference catalogue lines ax.stem(catlines_reference_wave, catlines_reference_flux, 'C4-', markerfmt=' ', basefmt='C4-', label='tabulated lines') # overplot convolved reference lines ax.plot(xwave_reference, sp_reference, 'C0-', label='expected spectrum') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) # compute baseline signal in sp_median baseline = np.percentile(sp_median[sp_median > 0], q=10) if (abs(debugplot) % 10 != 0) or (pdf is not None): fig = plt.figure() ax = fig.add_subplot(111) ax.hist(sp_median, bins=1000, log=True) ax.set_xlabel('Normalized number of counts') ax.set_ylabel('Number of pixels') ax.set_title('Median spectrum') ax.axvline(float(baseline), linestyle='--', color='grey') if pdf is not None: pdf.savefig() else: geometry = (0, 0, 640, 480) set_window_geometry(geometry) plt.show() # subtract baseline to sp_median (only pixels with signal above zero) lok = np.where(sp_median > 0) sp_median[lok] -= baseline # compute global offset through periodic correlation logger.info('Computing global offset') global_offset, fpeak = periodic_corr1d( sp_reference=sp_reference, sp_offset=sp_median, fminmax=None, naround_zero=50, plottitle='Median spectrum (cross-correlation)', pdf=pdf, debugplot=debugplot ) logger.info('Global offset: {} pixels'.format(-global_offset)) missing_slitlets = rectwv_coeff.missing_slitlets if mode == 1: # apply computed offset to obtain refined_rectwv_coeff_global for islitlet in range(1, EMIR_NBARS + 1): if islitlet not in missing_slitlets: i = islitlet - 1 dumdict = refined_rectwv_coeff.contents[i] dumdict['wpoly_coeff'][0] -= global_offset*cdelt1 elif mode == 2: # compute individual offset for each slitlet logger.info('Computing individual offsets') median_55sp = median_slitlets_rectified(input_image, mode=1) offset_array = np.zeros(EMIR_NBARS) xplot = [] yplot = [] xplot_skipped = [] yplot_skipped = [] cout = '0' for islitlet in range(1, EMIR_NBARS + 1): if islitlet in list_useful_slitlets: i = islitlet - 1 sp_median = median_55sp[0].data[i, :] lok = np.where(sp_median > 0) if np.any(lok): baseline = np.percentile(sp_median[lok], q=10) sp_median[lok] -= baseline sp_median /= sp_median.max() offset_array[i], fpeak = periodic_corr1d( sp_reference=sp_reference, sp_offset=median_55sp[0].data[i, :], fminmax=None, naround_zero=50, plottitle='slitlet #{0} (cross-correlation)'.format( islitlet), pdf=pdf, debugplot=debugplot ) else: offset_array[i] = 0.0 dumdict = refined_rectwv_coeff.contents[i] dumdict['wpoly_coeff'][0] -= offset_array[i]*cdelt1 xplot.append(islitlet) yplot.append(-offset_array[i]) # second correction wpoly_coeff_refined = check_wlcalib_sp( sp=median_55sp[0].data[i, :], crpix1=crpix1, crval1=crval1-offset_array[i]*cdelt1, cdelt1=cdelt1, wv_master=catlines_reference_wave, coeff_ini=dumdict['wpoly_coeff'], naxis1_ini=EMIR_NAXIS1, title='slitlet #{0} (after applying offset)'.format( islitlet), ylogscale=False, pdf=pdf, debugplot=debugplot ) dumdict['wpoly_coeff'] = wpoly_coeff_refined cout += '.' else: xplot_skipped.append(islitlet) yplot_skipped.append(0) cout += 'i' if islitlet % 10 == 0: if cout != 'i': cout = str(islitlet // 10) logger.info(cout) # show offsets with opposite sign stat_summary = summary(np.array(yplot)) logger.info('Statistics of individual slitlet offsets (pixels):') logger.info('- npoints....: {0:d}'.format(stat_summary['npoints'])) logger.info('- mean.......: {0:7.3f}'.format(stat_summary['mean'])) logger.info('- median.....: {0:7.3f}'.format(stat_summary['median'])) logger.info('- std........: {0:7.3f}'.format(stat_summary['std'])) logger.info('- robust_std.: {0:7.3f}'.format(stat_summary[ 'robust_std'])) if (abs(debugplot) % 10 != 0) or (pdf is not None): ax = ximplotxy(xplot, yplot, linestyle='', marker='o', color='C0', xlabel='slitlet number', ylabel='-offset (pixels) = offset to be applied', title='cross-correlation result', show=False, **{'label': 'individual slitlets'}) if len(xplot_skipped) > 0: ax.plot(xplot_skipped, yplot_skipped, 'mx') ax.axhline(-global_offset, linestyle='--', color='C1', label='global offset') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) else: raise ValueError('Unexpected mode={}'.format(mode)) # close output PDF file if pdf is not None: pdf.close() # return result return refined_rectwv_coeff, expected_cat_image
python
def refine_rectwv_coeff(input_image, rectwv_coeff, refine_wavecalib_mode, minimum_slitlet_width_mm, maximum_slitlet_width_mm, save_intermediate_results=False, debugplot=0): """Refine RectWaveCoeff object using a catalogue of lines One and only one among refine_with_oh_lines_mode and refine_with_arc_lines must be different from zero. Parameters ---------- input_image : HDUList object Input 2D image. rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. refine_wavecalib_mode : int Integer, indicating the type of refinement: 0 : no refinement 1 : apply the same global offset to all the slitlets (using ARC lines) 2 : apply individual offset to each slitlet (using ARC lines) 11 : apply the same global offset to all the slitlets (using OH lines) 12 : apply individual offset to each slitlet (using OH lines) minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. save_intermediate_results : bool If True, save plots in PDF files debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- refined_rectwv_coeff : RectWaveCoeff instance Refined rectification and wavelength calibration coefficients for the particular CSU configuration. expected_cat_image : HDUList object Output 2D image with the expected catalogued lines. """ logger = logging.getLogger(__name__) if save_intermediate_results: from matplotlib.backends.backend_pdf import PdfPages pdf = PdfPages('crosscorrelation.pdf') else: pdf = None # image header main_header = input_image[0].header filter_name = main_header['filter'] grism_name = main_header['grism'] # protections if refine_wavecalib_mode not in [1, 2, 11, 12]: logger.error('Wavelength calibration refinemente mode={}'. format( refine_wavecalib_mode )) raise ValueError("Invalid wavelength calibration refinement mode") # read tabulated lines if refine_wavecalib_mode in [1, 2]: # ARC lines if grism_name == 'LR': catlines_file = 'lines_argon_neon_xenon_empirical_LR.dat' else: catlines_file = 'lines_argon_neon_xenon_empirical.dat' dumdata = pkgutil.get_data('emirdrp.instrument.configs', catlines_file) arc_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(arc_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = catlines[:, 0] catlines_all_flux = catlines[:, 1] mode = refine_wavecalib_mode elif refine_wavecalib_mode in [11, 12]: # OH lines dumdata = pkgutil.get_data( 'emirdrp.instrument.configs', 'Oliva_etal_2013.dat' ) oh_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(oh_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0])) catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2])) mode = refine_wavecalib_mode - 10 else: raise ValueError('Unexpected mode={}'.format(refine_wavecalib_mode)) # initialize output refined_rectwv_coeff = deepcopy(rectwv_coeff) logger.info('Computing median spectrum') # compute median spectrum and normalize it sp_median = median_slitlets_rectified( input_image, mode=2, minimum_slitlet_width_mm=minimum_slitlet_width_mm, maximum_slitlet_width_mm=maximum_slitlet_width_mm )[0].data sp_median /= sp_median.max() # determine minimum and maximum useful wavelength jmin, jmax = find_pix_borders(sp_median, 0) naxis1 = main_header['naxis1'] naxis2 = main_header['naxis2'] crpix1 = main_header['crpix1'] crval1 = main_header['crval1'] cdelt1 = main_header['cdelt1'] xwave = crval1 + (np.arange(naxis1) + 1.0 - crpix1) * cdelt1 if grism_name == 'LR': wv_parameters = set_wv_parameters(filter_name, grism_name) wave_min = wv_parameters['wvmin_useful'] wave_max = wv_parameters['wvmax_useful'] else: wave_min = crval1 + (jmin + 1 - crpix1) * cdelt1 wave_max = crval1 + (jmax + 1 - crpix1) * cdelt1 logger.info('Setting wave_min to {}'.format(wave_min)) logger.info('Setting wave_max to {}'.format(wave_max)) # extract subset of catalogue lines within current wavelength range lok1 = catlines_all_wave >= wave_min lok2 = catlines_all_wave <= wave_max catlines_reference_wave = catlines_all_wave[lok1*lok2] catlines_reference_flux = catlines_all_flux[lok1*lok2] catlines_reference_flux /= catlines_reference_flux.max() # estimate sigma to broaden catalogue lines csu_config = CsuConfiguration.define_from_header(main_header) # segregate slitlets list_useful_slitlets = csu_config.widths_in_range_mm( minwidth=minimum_slitlet_width_mm, maxwidth=maximum_slitlet_width_mm ) # remove missing slitlets if len(refined_rectwv_coeff.missing_slitlets) > 0: for iremove in refined_rectwv_coeff.missing_slitlets: if iremove in list_useful_slitlets: list_useful_slitlets.remove(iremove) list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1)) if i not in list_useful_slitlets] logger.info('list of useful slitlets: {}'.format( list_useful_slitlets)) logger.info('list of not useful slitlets: {}'.format( list_not_useful_slitlets)) tempwidths = np.array([csu_config.csu_bar_slit_width(islitlet) for islitlet in list_useful_slitlets]) widths_summary = summary(tempwidths) logger.info('Statistics of useful slitlet widths (mm):') logger.info('- npoints....: {0:d}'.format(widths_summary['npoints'])) logger.info('- mean.......: {0:7.3f}'.format(widths_summary['mean'])) logger.info('- median.....: {0:7.3f}'.format(widths_summary['median'])) logger.info('- std........: {0:7.3f}'.format(widths_summary['std'])) logger.info('- robust_std.: {0:7.3f}'.format(widths_summary['robust_std'])) # empirical transformation of slit width (mm) to pixels sigma_broadening = cdelt1 * widths_summary['median'] # convolve location of catalogue lines to generate expected spectrum xwave_reference, sp_reference = convolve_comb_lines( catlines_reference_wave, catlines_reference_flux, sigma_broadening, crpix1, crval1, cdelt1, naxis1 ) sp_reference /= sp_reference.max() # generate image2d with expected lines image2d_expected_lines = np.tile(sp_reference, (naxis2, 1)) hdu = fits.PrimaryHDU(data=image2d_expected_lines, header=main_header) expected_cat_image = fits.HDUList([hdu]) if (abs(debugplot) % 10 != 0) or (pdf is not None): ax = ximplotxy(xwave, sp_median, 'C1-', xlabel='Wavelength (Angstroms, in vacuum)', ylabel='Normalized number of counts', title='Median spectrum', label='observed spectrum', show=False) # overplot reference catalogue lines ax.stem(catlines_reference_wave, catlines_reference_flux, 'C4-', markerfmt=' ', basefmt='C4-', label='tabulated lines') # overplot convolved reference lines ax.plot(xwave_reference, sp_reference, 'C0-', label='expected spectrum') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) # compute baseline signal in sp_median baseline = np.percentile(sp_median[sp_median > 0], q=10) if (abs(debugplot) % 10 != 0) or (pdf is not None): fig = plt.figure() ax = fig.add_subplot(111) ax.hist(sp_median, bins=1000, log=True) ax.set_xlabel('Normalized number of counts') ax.set_ylabel('Number of pixels') ax.set_title('Median spectrum') ax.axvline(float(baseline), linestyle='--', color='grey') if pdf is not None: pdf.savefig() else: geometry = (0, 0, 640, 480) set_window_geometry(geometry) plt.show() # subtract baseline to sp_median (only pixels with signal above zero) lok = np.where(sp_median > 0) sp_median[lok] -= baseline # compute global offset through periodic correlation logger.info('Computing global offset') global_offset, fpeak = periodic_corr1d( sp_reference=sp_reference, sp_offset=sp_median, fminmax=None, naround_zero=50, plottitle='Median spectrum (cross-correlation)', pdf=pdf, debugplot=debugplot ) logger.info('Global offset: {} pixels'.format(-global_offset)) missing_slitlets = rectwv_coeff.missing_slitlets if mode == 1: # apply computed offset to obtain refined_rectwv_coeff_global for islitlet in range(1, EMIR_NBARS + 1): if islitlet not in missing_slitlets: i = islitlet - 1 dumdict = refined_rectwv_coeff.contents[i] dumdict['wpoly_coeff'][0] -= global_offset*cdelt1 elif mode == 2: # compute individual offset for each slitlet logger.info('Computing individual offsets') median_55sp = median_slitlets_rectified(input_image, mode=1) offset_array = np.zeros(EMIR_NBARS) xplot = [] yplot = [] xplot_skipped = [] yplot_skipped = [] cout = '0' for islitlet in range(1, EMIR_NBARS + 1): if islitlet in list_useful_slitlets: i = islitlet - 1 sp_median = median_55sp[0].data[i, :] lok = np.where(sp_median > 0) if np.any(lok): baseline = np.percentile(sp_median[lok], q=10) sp_median[lok] -= baseline sp_median /= sp_median.max() offset_array[i], fpeak = periodic_corr1d( sp_reference=sp_reference, sp_offset=median_55sp[0].data[i, :], fminmax=None, naround_zero=50, plottitle='slitlet #{0} (cross-correlation)'.format( islitlet), pdf=pdf, debugplot=debugplot ) else: offset_array[i] = 0.0 dumdict = refined_rectwv_coeff.contents[i] dumdict['wpoly_coeff'][0] -= offset_array[i]*cdelt1 xplot.append(islitlet) yplot.append(-offset_array[i]) # second correction wpoly_coeff_refined = check_wlcalib_sp( sp=median_55sp[0].data[i, :], crpix1=crpix1, crval1=crval1-offset_array[i]*cdelt1, cdelt1=cdelt1, wv_master=catlines_reference_wave, coeff_ini=dumdict['wpoly_coeff'], naxis1_ini=EMIR_NAXIS1, title='slitlet #{0} (after applying offset)'.format( islitlet), ylogscale=False, pdf=pdf, debugplot=debugplot ) dumdict['wpoly_coeff'] = wpoly_coeff_refined cout += '.' else: xplot_skipped.append(islitlet) yplot_skipped.append(0) cout += 'i' if islitlet % 10 == 0: if cout != 'i': cout = str(islitlet // 10) logger.info(cout) # show offsets with opposite sign stat_summary = summary(np.array(yplot)) logger.info('Statistics of individual slitlet offsets (pixels):') logger.info('- npoints....: {0:d}'.format(stat_summary['npoints'])) logger.info('- mean.......: {0:7.3f}'.format(stat_summary['mean'])) logger.info('- median.....: {0:7.3f}'.format(stat_summary['median'])) logger.info('- std........: {0:7.3f}'.format(stat_summary['std'])) logger.info('- robust_std.: {0:7.3f}'.format(stat_summary[ 'robust_std'])) if (abs(debugplot) % 10 != 0) or (pdf is not None): ax = ximplotxy(xplot, yplot, linestyle='', marker='o', color='C0', xlabel='slitlet number', ylabel='-offset (pixels) = offset to be applied', title='cross-correlation result', show=False, **{'label': 'individual slitlets'}) if len(xplot_skipped) > 0: ax.plot(xplot_skipped, yplot_skipped, 'mx') ax.axhline(-global_offset, linestyle='--', color='C1', label='global offset') ax.legend() if pdf is not None: pdf.savefig() else: pause_debugplot(debugplot=debugplot, pltshow=True) else: raise ValueError('Unexpected mode={}'.format(mode)) # close output PDF file if pdf is not None: pdf.close() # return result return refined_rectwv_coeff, expected_cat_image
[ "def", "refine_rectwv_coeff", "(", "input_image", ",", "rectwv_coeff", ",", "refine_wavecalib_mode", ",", "minimum_slitlet_width_mm", ",", "maximum_slitlet_width_mm", ",", "save_intermediate_results", "=", "False", ",", "debugplot", "=", "0", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "if", "save_intermediate_results", ":", "from", "matplotlib", ".", "backends", ".", "backend_pdf", "import", "PdfPages", "pdf", "=", "PdfPages", "(", "'crosscorrelation.pdf'", ")", "else", ":", "pdf", "=", "None", "# image header", "main_header", "=", "input_image", "[", "0", "]", ".", "header", "filter_name", "=", "main_header", "[", "'filter'", "]", "grism_name", "=", "main_header", "[", "'grism'", "]", "# protections", "if", "refine_wavecalib_mode", "not", "in", "[", "1", ",", "2", ",", "11", ",", "12", "]", ":", "logger", ".", "error", "(", "'Wavelength calibration refinemente mode={}'", ".", "format", "(", "refine_wavecalib_mode", ")", ")", "raise", "ValueError", "(", "\"Invalid wavelength calibration refinement mode\"", ")", "# read tabulated lines", "if", "refine_wavecalib_mode", "in", "[", "1", ",", "2", "]", ":", "# ARC lines", "if", "grism_name", "==", "'LR'", ":", "catlines_file", "=", "'lines_argon_neon_xenon_empirical_LR.dat'", "else", ":", "catlines_file", "=", "'lines_argon_neon_xenon_empirical.dat'", "dumdata", "=", "pkgutil", ".", "get_data", "(", "'emirdrp.instrument.configs'", ",", "catlines_file", ")", "arc_lines_tmpfile", "=", "StringIO", "(", "dumdata", ".", "decode", "(", "'utf8'", ")", ")", "catlines", "=", "np", ".", "genfromtxt", "(", "arc_lines_tmpfile", ")", "# define wavelength and flux as separate arrays", "catlines_all_wave", "=", "catlines", "[", ":", ",", "0", "]", "catlines_all_flux", "=", "catlines", "[", ":", ",", "1", "]", "mode", "=", "refine_wavecalib_mode", "elif", "refine_wavecalib_mode", "in", "[", "11", ",", "12", "]", ":", "# OH lines", "dumdata", "=", "pkgutil", ".", "get_data", "(", "'emirdrp.instrument.configs'", ",", "'Oliva_etal_2013.dat'", ")", "oh_lines_tmpfile", "=", "StringIO", "(", "dumdata", ".", "decode", "(", "'utf8'", ")", ")", "catlines", "=", "np", ".", "genfromtxt", "(", "oh_lines_tmpfile", ")", "# define wavelength and flux as separate arrays", "catlines_all_wave", "=", "np", ".", "concatenate", "(", "(", "catlines", "[", ":", ",", "1", "]", ",", "catlines", "[", ":", ",", "0", "]", ")", ")", "catlines_all_flux", "=", "np", ".", "concatenate", "(", "(", "catlines", "[", ":", ",", "2", "]", ",", "catlines", "[", ":", ",", "2", "]", ")", ")", "mode", "=", "refine_wavecalib_mode", "-", "10", "else", ":", "raise", "ValueError", "(", "'Unexpected mode={}'", ".", "format", "(", "refine_wavecalib_mode", ")", ")", "# initialize output", "refined_rectwv_coeff", "=", "deepcopy", "(", "rectwv_coeff", ")", "logger", ".", "info", "(", "'Computing median spectrum'", ")", "# compute median spectrum and normalize it", "sp_median", "=", "median_slitlets_rectified", "(", "input_image", ",", "mode", "=", "2", ",", "minimum_slitlet_width_mm", "=", "minimum_slitlet_width_mm", ",", "maximum_slitlet_width_mm", "=", "maximum_slitlet_width_mm", ")", "[", "0", "]", ".", "data", "sp_median", "/=", "sp_median", ".", "max", "(", ")", "# determine minimum and maximum useful wavelength", "jmin", ",", "jmax", "=", "find_pix_borders", "(", "sp_median", ",", "0", ")", "naxis1", "=", "main_header", "[", "'naxis1'", "]", "naxis2", "=", "main_header", "[", "'naxis2'", "]", "crpix1", "=", "main_header", "[", "'crpix1'", "]", "crval1", "=", "main_header", "[", "'crval1'", "]", "cdelt1", "=", "main_header", "[", "'cdelt1'", "]", "xwave", "=", "crval1", "+", "(", "np", ".", "arange", "(", "naxis1", ")", "+", "1.0", "-", "crpix1", ")", "*", "cdelt1", "if", "grism_name", "==", "'LR'", ":", "wv_parameters", "=", "set_wv_parameters", "(", "filter_name", ",", "grism_name", ")", "wave_min", "=", "wv_parameters", "[", "'wvmin_useful'", "]", "wave_max", "=", "wv_parameters", "[", "'wvmax_useful'", "]", "else", ":", "wave_min", "=", "crval1", "+", "(", "jmin", "+", "1", "-", "crpix1", ")", "*", "cdelt1", "wave_max", "=", "crval1", "+", "(", "jmax", "+", "1", "-", "crpix1", ")", "*", "cdelt1", "logger", ".", "info", "(", "'Setting wave_min to {}'", ".", "format", "(", "wave_min", ")", ")", "logger", ".", "info", "(", "'Setting wave_max to {}'", ".", "format", "(", "wave_max", ")", ")", "# extract subset of catalogue lines within current wavelength range", "lok1", "=", "catlines_all_wave", ">=", "wave_min", "lok2", "=", "catlines_all_wave", "<=", "wave_max", "catlines_reference_wave", "=", "catlines_all_wave", "[", "lok1", "*", "lok2", "]", "catlines_reference_flux", "=", "catlines_all_flux", "[", "lok1", "*", "lok2", "]", "catlines_reference_flux", "/=", "catlines_reference_flux", ".", "max", "(", ")", "# estimate sigma to broaden catalogue lines", "csu_config", "=", "CsuConfiguration", ".", "define_from_header", "(", "main_header", ")", "# segregate slitlets", "list_useful_slitlets", "=", "csu_config", ".", "widths_in_range_mm", "(", "minwidth", "=", "minimum_slitlet_width_mm", ",", "maxwidth", "=", "maximum_slitlet_width_mm", ")", "# remove missing slitlets", "if", "len", "(", "refined_rectwv_coeff", ".", "missing_slitlets", ")", ">", "0", ":", "for", "iremove", "in", "refined_rectwv_coeff", ".", "missing_slitlets", ":", "if", "iremove", "in", "list_useful_slitlets", ":", "list_useful_slitlets", ".", "remove", "(", "iremove", ")", "list_not_useful_slitlets", "=", "[", "i", "for", "i", "in", "list", "(", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ")", "if", "i", "not", "in", "list_useful_slitlets", "]", "logger", ".", "info", "(", "'list of useful slitlets: {}'", ".", "format", "(", "list_useful_slitlets", ")", ")", "logger", ".", "info", "(", "'list of not useful slitlets: {}'", ".", "format", "(", "list_not_useful_slitlets", ")", ")", "tempwidths", "=", "np", ".", "array", "(", "[", "csu_config", ".", "csu_bar_slit_width", "(", "islitlet", ")", "for", "islitlet", "in", "list_useful_slitlets", "]", ")", "widths_summary", "=", "summary", "(", "tempwidths", ")", "logger", ".", "info", "(", "'Statistics of useful slitlet widths (mm):'", ")", "logger", ".", "info", "(", "'- npoints....: {0:d}'", ".", "format", "(", "widths_summary", "[", "'npoints'", "]", ")", ")", "logger", ".", "info", "(", "'- mean.......: {0:7.3f}'", ".", "format", "(", "widths_summary", "[", "'mean'", "]", ")", ")", "logger", ".", "info", "(", "'- median.....: {0:7.3f}'", ".", "format", "(", "widths_summary", "[", "'median'", "]", ")", ")", "logger", ".", "info", "(", "'- std........: {0:7.3f}'", ".", "format", "(", "widths_summary", "[", "'std'", "]", ")", ")", "logger", ".", "info", "(", "'- robust_std.: {0:7.3f}'", ".", "format", "(", "widths_summary", "[", "'robust_std'", "]", ")", ")", "# empirical transformation of slit width (mm) to pixels", "sigma_broadening", "=", "cdelt1", "*", "widths_summary", "[", "'median'", "]", "# convolve location of catalogue lines to generate expected spectrum", "xwave_reference", ",", "sp_reference", "=", "convolve_comb_lines", "(", "catlines_reference_wave", ",", "catlines_reference_flux", ",", "sigma_broadening", ",", "crpix1", ",", "crval1", ",", "cdelt1", ",", "naxis1", ")", "sp_reference", "/=", "sp_reference", ".", "max", "(", ")", "# generate image2d with expected lines", "image2d_expected_lines", "=", "np", ".", "tile", "(", "sp_reference", ",", "(", "naxis2", ",", "1", ")", ")", "hdu", "=", "fits", ".", "PrimaryHDU", "(", "data", "=", "image2d_expected_lines", ",", "header", "=", "main_header", ")", "expected_cat_image", "=", "fits", ".", "HDUList", "(", "[", "hdu", "]", ")", "if", "(", "abs", "(", "debugplot", ")", "%", "10", "!=", "0", ")", "or", "(", "pdf", "is", "not", "None", ")", ":", "ax", "=", "ximplotxy", "(", "xwave", ",", "sp_median", ",", "'C1-'", ",", "xlabel", "=", "'Wavelength (Angstroms, in vacuum)'", ",", "ylabel", "=", "'Normalized number of counts'", ",", "title", "=", "'Median spectrum'", ",", "label", "=", "'observed spectrum'", ",", "show", "=", "False", ")", "# overplot reference catalogue lines", "ax", ".", "stem", "(", "catlines_reference_wave", ",", "catlines_reference_flux", ",", "'C4-'", ",", "markerfmt", "=", "' '", ",", "basefmt", "=", "'C4-'", ",", "label", "=", "'tabulated lines'", ")", "# overplot convolved reference lines", "ax", ".", "plot", "(", "xwave_reference", ",", "sp_reference", ",", "'C0-'", ",", "label", "=", "'expected spectrum'", ")", "ax", ".", "legend", "(", ")", "if", "pdf", "is", "not", "None", ":", "pdf", ".", "savefig", "(", ")", "else", ":", "pause_debugplot", "(", "debugplot", "=", "debugplot", ",", "pltshow", "=", "True", ")", "# compute baseline signal in sp_median", "baseline", "=", "np", ".", "percentile", "(", "sp_median", "[", "sp_median", ">", "0", "]", ",", "q", "=", "10", ")", "if", "(", "abs", "(", "debugplot", ")", "%", "10", "!=", "0", ")", "or", "(", "pdf", "is", "not", "None", ")", ":", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "ax", ".", "hist", "(", "sp_median", ",", "bins", "=", "1000", ",", "log", "=", "True", ")", "ax", ".", "set_xlabel", "(", "'Normalized number of counts'", ")", "ax", ".", "set_ylabel", "(", "'Number of pixels'", ")", "ax", ".", "set_title", "(", "'Median spectrum'", ")", "ax", ".", "axvline", "(", "float", "(", "baseline", ")", ",", "linestyle", "=", "'--'", ",", "color", "=", "'grey'", ")", "if", "pdf", "is", "not", "None", ":", "pdf", ".", "savefig", "(", ")", "else", ":", "geometry", "=", "(", "0", ",", "0", ",", "640", ",", "480", ")", "set_window_geometry", "(", "geometry", ")", "plt", ".", "show", "(", ")", "# subtract baseline to sp_median (only pixels with signal above zero)", "lok", "=", "np", ".", "where", "(", "sp_median", ">", "0", ")", "sp_median", "[", "lok", "]", "-=", "baseline", "# compute global offset through periodic correlation", "logger", ".", "info", "(", "'Computing global offset'", ")", "global_offset", ",", "fpeak", "=", "periodic_corr1d", "(", "sp_reference", "=", "sp_reference", ",", "sp_offset", "=", "sp_median", ",", "fminmax", "=", "None", ",", "naround_zero", "=", "50", ",", "plottitle", "=", "'Median spectrum (cross-correlation)'", ",", "pdf", "=", "pdf", ",", "debugplot", "=", "debugplot", ")", "logger", ".", "info", "(", "'Global offset: {} pixels'", ".", "format", "(", "-", "global_offset", ")", ")", "missing_slitlets", "=", "rectwv_coeff", ".", "missing_slitlets", "if", "mode", "==", "1", ":", "# apply computed offset to obtain refined_rectwv_coeff_global", "for", "islitlet", "in", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ":", "if", "islitlet", "not", "in", "missing_slitlets", ":", "i", "=", "islitlet", "-", "1", "dumdict", "=", "refined_rectwv_coeff", ".", "contents", "[", "i", "]", "dumdict", "[", "'wpoly_coeff'", "]", "[", "0", "]", "-=", "global_offset", "*", "cdelt1", "elif", "mode", "==", "2", ":", "# compute individual offset for each slitlet", "logger", ".", "info", "(", "'Computing individual offsets'", ")", "median_55sp", "=", "median_slitlets_rectified", "(", "input_image", ",", "mode", "=", "1", ")", "offset_array", "=", "np", ".", "zeros", "(", "EMIR_NBARS", ")", "xplot", "=", "[", "]", "yplot", "=", "[", "]", "xplot_skipped", "=", "[", "]", "yplot_skipped", "=", "[", "]", "cout", "=", "'0'", "for", "islitlet", "in", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ":", "if", "islitlet", "in", "list_useful_slitlets", ":", "i", "=", "islitlet", "-", "1", "sp_median", "=", "median_55sp", "[", "0", "]", ".", "data", "[", "i", ",", ":", "]", "lok", "=", "np", ".", "where", "(", "sp_median", ">", "0", ")", "if", "np", ".", "any", "(", "lok", ")", ":", "baseline", "=", "np", ".", "percentile", "(", "sp_median", "[", "lok", "]", ",", "q", "=", "10", ")", "sp_median", "[", "lok", "]", "-=", "baseline", "sp_median", "/=", "sp_median", ".", "max", "(", ")", "offset_array", "[", "i", "]", ",", "fpeak", "=", "periodic_corr1d", "(", "sp_reference", "=", "sp_reference", ",", "sp_offset", "=", "median_55sp", "[", "0", "]", ".", "data", "[", "i", ",", ":", "]", ",", "fminmax", "=", "None", ",", "naround_zero", "=", "50", ",", "plottitle", "=", "'slitlet #{0} (cross-correlation)'", ".", "format", "(", "islitlet", ")", ",", "pdf", "=", "pdf", ",", "debugplot", "=", "debugplot", ")", "else", ":", "offset_array", "[", "i", "]", "=", "0.0", "dumdict", "=", "refined_rectwv_coeff", ".", "contents", "[", "i", "]", "dumdict", "[", "'wpoly_coeff'", "]", "[", "0", "]", "-=", "offset_array", "[", "i", "]", "*", "cdelt1", "xplot", ".", "append", "(", "islitlet", ")", "yplot", ".", "append", "(", "-", "offset_array", "[", "i", "]", ")", "# second correction", "wpoly_coeff_refined", "=", "check_wlcalib_sp", "(", "sp", "=", "median_55sp", "[", "0", "]", ".", "data", "[", "i", ",", ":", "]", ",", "crpix1", "=", "crpix1", ",", "crval1", "=", "crval1", "-", "offset_array", "[", "i", "]", "*", "cdelt1", ",", "cdelt1", "=", "cdelt1", ",", "wv_master", "=", "catlines_reference_wave", ",", "coeff_ini", "=", "dumdict", "[", "'wpoly_coeff'", "]", ",", "naxis1_ini", "=", "EMIR_NAXIS1", ",", "title", "=", "'slitlet #{0} (after applying offset)'", ".", "format", "(", "islitlet", ")", ",", "ylogscale", "=", "False", ",", "pdf", "=", "pdf", ",", "debugplot", "=", "debugplot", ")", "dumdict", "[", "'wpoly_coeff'", "]", "=", "wpoly_coeff_refined", "cout", "+=", "'.'", "else", ":", "xplot_skipped", ".", "append", "(", "islitlet", ")", "yplot_skipped", ".", "append", "(", "0", ")", "cout", "+=", "'i'", "if", "islitlet", "%", "10", "==", "0", ":", "if", "cout", "!=", "'i'", ":", "cout", "=", "str", "(", "islitlet", "//", "10", ")", "logger", ".", "info", "(", "cout", ")", "# show offsets with opposite sign", "stat_summary", "=", "summary", "(", "np", ".", "array", "(", "yplot", ")", ")", "logger", ".", "info", "(", "'Statistics of individual slitlet offsets (pixels):'", ")", "logger", ".", "info", "(", "'- npoints....: {0:d}'", ".", "format", "(", "stat_summary", "[", "'npoints'", "]", ")", ")", "logger", ".", "info", "(", "'- mean.......: {0:7.3f}'", ".", "format", "(", "stat_summary", "[", "'mean'", "]", ")", ")", "logger", ".", "info", "(", "'- median.....: {0:7.3f}'", ".", "format", "(", "stat_summary", "[", "'median'", "]", ")", ")", "logger", ".", "info", "(", "'- std........: {0:7.3f}'", ".", "format", "(", "stat_summary", "[", "'std'", "]", ")", ")", "logger", ".", "info", "(", "'- robust_std.: {0:7.3f}'", ".", "format", "(", "stat_summary", "[", "'robust_std'", "]", ")", ")", "if", "(", "abs", "(", "debugplot", ")", "%", "10", "!=", "0", ")", "or", "(", "pdf", "is", "not", "None", ")", ":", "ax", "=", "ximplotxy", "(", "xplot", ",", "yplot", ",", "linestyle", "=", "''", ",", "marker", "=", "'o'", ",", "color", "=", "'C0'", ",", "xlabel", "=", "'slitlet number'", ",", "ylabel", "=", "'-offset (pixels) = offset to be applied'", ",", "title", "=", "'cross-correlation result'", ",", "show", "=", "False", ",", "*", "*", "{", "'label'", ":", "'individual slitlets'", "}", ")", "if", "len", "(", "xplot_skipped", ")", ">", "0", ":", "ax", ".", "plot", "(", "xplot_skipped", ",", "yplot_skipped", ",", "'mx'", ")", "ax", ".", "axhline", "(", "-", "global_offset", ",", "linestyle", "=", "'--'", ",", "color", "=", "'C1'", ",", "label", "=", "'global offset'", ")", "ax", ".", "legend", "(", ")", "if", "pdf", "is", "not", "None", ":", "pdf", ".", "savefig", "(", ")", "else", ":", "pause_debugplot", "(", "debugplot", "=", "debugplot", ",", "pltshow", "=", "True", ")", "else", ":", "raise", "ValueError", "(", "'Unexpected mode={}'", ".", "format", "(", "mode", ")", ")", "# close output PDF file", "if", "pdf", "is", "not", "None", ":", "pdf", ".", "close", "(", ")", "# return result", "return", "refined_rectwv_coeff", ",", "expected_cat_image" ]
Refine RectWaveCoeff object using a catalogue of lines One and only one among refine_with_oh_lines_mode and refine_with_arc_lines must be different from zero. Parameters ---------- input_image : HDUList object Input 2D image. rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. refine_wavecalib_mode : int Integer, indicating the type of refinement: 0 : no refinement 1 : apply the same global offset to all the slitlets (using ARC lines) 2 : apply individual offset to each slitlet (using ARC lines) 11 : apply the same global offset to all the slitlets (using OH lines) 12 : apply individual offset to each slitlet (using OH lines) minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. save_intermediate_results : bool If True, save plots in PDF files debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- refined_rectwv_coeff : RectWaveCoeff instance Refined rectification and wavelength calibration coefficients for the particular CSU configuration. expected_cat_image : HDUList object Output 2D image with the expected catalogued lines.
[ "Refine", "RectWaveCoeff", "object", "using", "a", "catalogue", "of", "lines" ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/refine_rectwv_coeff.py#L49-L381
guaix-ucm/pyemir
emirdrp/tools/select_unrectified_slitlets.py
select_unrectified_slitlet
def select_unrectified_slitlet(image2d, islitlet, csu_bar_slit_center, params, parmodel, maskonly): """Returns image with the indicated slitlet (zero anywhere else). Parameters ---------- image2d : numpy array Initial image from which the slitlet data will be extracted. islitlet : int Slitlet number. csu_bar_slit_center : float CSU bar slit center. params : :class:`~lmfit.parameter.Parameters` Parameters to be employed in the prediction of the distorted boundaries. parmodel : str Model to be assumed. Allowed values are 'longslit' and 'multislit'. maskonly : bool If True, returns simply a mask (1 in the slitlet region and zero anywhere else. Returns ------- image2d_output : numpy array 2D image with the pixel information corresponding to the selected slitlet and zero everywhere else. """ # protection if image2d.shape != (EMIR_NAXIS2, EMIR_NAXIS1): raise ValueError("NAXIS1, NAXIS2 unexpected for EMIR detector") # initialize image output image2d_output = np.zeros_like(image2d) # expected slitlet frontiers list_expected_frontiers = expected_distorted_frontiers( islitlet, csu_bar_slit_center, params, parmodel, numpts=101, deg=5, debugplot=0 ) pol_lower_expected = list_expected_frontiers[0].poly_funct pol_upper_expected = list_expected_frontiers[1].poly_funct # main loop: compute for each channel the minimum and maximum scan for j in range(EMIR_NAXIS1): xchannel = j + 1 y0_lower = pol_lower_expected(xchannel) y0_upper = pol_upper_expected(xchannel) n1, n2 = nscan_minmax_frontiers(y0_frontier_lower=y0_lower, y0_frontier_upper=y0_upper, resize=True) # note that n1 and n2 are scans (ranging from 1 to NAXIS2) if maskonly: image2d_output[(n1 - 1):n2, j] = np.repeat( [1.0], (n2 - n1 + 1) ) else: image2d_output[(n1 - 1):n2, j] = image2d[(n1 - 1):n2, j] return image2d_output
python
def select_unrectified_slitlet(image2d, islitlet, csu_bar_slit_center, params, parmodel, maskonly): """Returns image with the indicated slitlet (zero anywhere else). Parameters ---------- image2d : numpy array Initial image from which the slitlet data will be extracted. islitlet : int Slitlet number. csu_bar_slit_center : float CSU bar slit center. params : :class:`~lmfit.parameter.Parameters` Parameters to be employed in the prediction of the distorted boundaries. parmodel : str Model to be assumed. Allowed values are 'longslit' and 'multislit'. maskonly : bool If True, returns simply a mask (1 in the slitlet region and zero anywhere else. Returns ------- image2d_output : numpy array 2D image with the pixel information corresponding to the selected slitlet and zero everywhere else. """ # protection if image2d.shape != (EMIR_NAXIS2, EMIR_NAXIS1): raise ValueError("NAXIS1, NAXIS2 unexpected for EMIR detector") # initialize image output image2d_output = np.zeros_like(image2d) # expected slitlet frontiers list_expected_frontiers = expected_distorted_frontiers( islitlet, csu_bar_slit_center, params, parmodel, numpts=101, deg=5, debugplot=0 ) pol_lower_expected = list_expected_frontiers[0].poly_funct pol_upper_expected = list_expected_frontiers[1].poly_funct # main loop: compute for each channel the minimum and maximum scan for j in range(EMIR_NAXIS1): xchannel = j + 1 y0_lower = pol_lower_expected(xchannel) y0_upper = pol_upper_expected(xchannel) n1, n2 = nscan_minmax_frontiers(y0_frontier_lower=y0_lower, y0_frontier_upper=y0_upper, resize=True) # note that n1 and n2 are scans (ranging from 1 to NAXIS2) if maskonly: image2d_output[(n1 - 1):n2, j] = np.repeat( [1.0], (n2 - n1 + 1) ) else: image2d_output[(n1 - 1):n2, j] = image2d[(n1 - 1):n2, j] return image2d_output
[ "def", "select_unrectified_slitlet", "(", "image2d", ",", "islitlet", ",", "csu_bar_slit_center", ",", "params", ",", "parmodel", ",", "maskonly", ")", ":", "# protection", "if", "image2d", ".", "shape", "!=", "(", "EMIR_NAXIS2", ",", "EMIR_NAXIS1", ")", ":", "raise", "ValueError", "(", "\"NAXIS1, NAXIS2 unexpected for EMIR detector\"", ")", "# initialize image output", "image2d_output", "=", "np", ".", "zeros_like", "(", "image2d", ")", "# expected slitlet frontiers", "list_expected_frontiers", "=", "expected_distorted_frontiers", "(", "islitlet", ",", "csu_bar_slit_center", ",", "params", ",", "parmodel", ",", "numpts", "=", "101", ",", "deg", "=", "5", ",", "debugplot", "=", "0", ")", "pol_lower_expected", "=", "list_expected_frontiers", "[", "0", "]", ".", "poly_funct", "pol_upper_expected", "=", "list_expected_frontiers", "[", "1", "]", ".", "poly_funct", "# main loop: compute for each channel the minimum and maximum scan", "for", "j", "in", "range", "(", "EMIR_NAXIS1", ")", ":", "xchannel", "=", "j", "+", "1", "y0_lower", "=", "pol_lower_expected", "(", "xchannel", ")", "y0_upper", "=", "pol_upper_expected", "(", "xchannel", ")", "n1", ",", "n2", "=", "nscan_minmax_frontiers", "(", "y0_frontier_lower", "=", "y0_lower", ",", "y0_frontier_upper", "=", "y0_upper", ",", "resize", "=", "True", ")", "# note that n1 and n2 are scans (ranging from 1 to NAXIS2)", "if", "maskonly", ":", "image2d_output", "[", "(", "n1", "-", "1", ")", ":", "n2", ",", "j", "]", "=", "np", ".", "repeat", "(", "[", "1.0", "]", ",", "(", "n2", "-", "n1", "+", "1", ")", ")", "else", ":", "image2d_output", "[", "(", "n1", "-", "1", ")", ":", "n2", ",", "j", "]", "=", "image2d", "[", "(", "n1", "-", "1", ")", ":", "n2", ",", "j", "]", "return", "image2d_output" ]
Returns image with the indicated slitlet (zero anywhere else). Parameters ---------- image2d : numpy array Initial image from which the slitlet data will be extracted. islitlet : int Slitlet number. csu_bar_slit_center : float CSU bar slit center. params : :class:`~lmfit.parameter.Parameters` Parameters to be employed in the prediction of the distorted boundaries. parmodel : str Model to be assumed. Allowed values are 'longslit' and 'multislit'. maskonly : bool If True, returns simply a mask (1 in the slitlet region and zero anywhere else. Returns ------- image2d_output : numpy array 2D image with the pixel information corresponding to the selected slitlet and zero everywhere else.
[ "Returns", "image", "with", "the", "indicated", "slitlet", "(", "zero", "anywhere", "else", ")", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/tools/select_unrectified_slitlets.py#L47-L108
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_from_arc_image.py
rectwv_coeff_from_arc_image
def rectwv_coeff_from_arc_image(reduced_image, bound_param, lines_catalog, args_nbrightlines=None, args_ymargin_bb=2, args_remove_sp_background=True, args_times_sigma_threshold=10, args_order_fmap=2, args_sigma_gaussian_filtering=2, args_margin_npix=50, args_poldeg_initial=3, args_poldeg_refined=5, args_interactive=False, args_threshold_wv=0, args_ylogscale=False, args_pdf=None, args_geometry=(0,0,640,480), debugplot=0): """Evaluate rect.+wavecal. coefficients from arc image Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. bound_param : RefinedBoundaryModelParam instance Refined boundary model. lines_catalog : Numpy array 2D numpy array with the contents of the master file with the expected arc line wavelengths. args_nbrightlines : int TBD args_ymargin_bb : int TBD args_remove_sp_background : bool TBD args_times_sigma_threshold : float TBD args_order_fmap : int TBD args_sigma_gaussian_filtering : float TBD args_margin_npix : int TBD args_poldeg_initial : int TBD args_poldeg_refined : int TBD args_interactive : bool TBD args_threshold_wv : float TBD args_ylogscale : bool TBD args_pdf : TBD args_geometry : TBD debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration of the input arc image. reduced_55sp : HDUList object Image with 55 spectra corresponding to the median spectrum for each slitlet, employed to derived the wavelength calibration polynomial. """ logger = logging.getLogger(__name__) # protections if args_interactive and args_pdf is not None: logger.error('--interactive and --pdf are incompatible options') raise ValueError('--interactive and --pdf are incompatible options') # header and data array header = reduced_image[0].header image2d = reduced_image[0].data # check grism and filter filter_name = header['filter'] logger.info('Filter: ' + filter_name) if filter_name != bound_param.tags['filter']: raise ValueError('Filter name does not match!') grism_name = header['grism'] logger.info('Grism: ' + grism_name) if grism_name != bound_param.tags['grism']: raise ValueError('Grism name does not match!') # read the CSU configuration from the image header csu_conf = CsuConfiguration.define_from_header(header) logger.debug(csu_conf) # read the DTU configuration from the image header dtu_conf = DtuConfiguration.define_from_header(header) logger.debug(dtu_conf) # set boundary parameters parmodel = bound_param.meta_info['parmodel'] params = bound_params_from_dict(bound_param.__getstate__()) if abs(debugplot) >= 10: print('-' * 83) print('* FITTED BOUND PARAMETERS') params.pretty_print() pause_debugplot(debugplot) # determine parameters according to grism+filter combination wv_parameters = set_wv_parameters(filter_name, grism_name) islitlet_min = wv_parameters['islitlet_min'] islitlet_max = wv_parameters['islitlet_max'] if args_nbrightlines is None: nbrightlines = wv_parameters['nbrightlines'] else: nbrightlines = [int(idum) for idum in args_nbrightlines.split(',')] poly_crval1_linear = wv_parameters['poly_crval1_linear'] poly_cdelt1_linear = wv_parameters['poly_cdelt1_linear'] wvmin_expected = wv_parameters['wvmin_expected'] wvmax_expected = wv_parameters['wvmax_expected'] wvmin_useful = wv_parameters['wvmin_useful'] wvmax_useful = wv_parameters['wvmax_useful'] # list of slitlets to be computed logger.info('list_slitlets: [' + str(islitlet_min) + ',... ' + str(islitlet_max) + ']') # read master arc line wavelengths (only brightest lines) wv_master = read_wv_master_from_array( master_table=lines_catalog, lines='brightest', debugplot=debugplot ) # read master arc line wavelengths (whole data set) wv_master_all = read_wv_master_from_array( master_table=lines_catalog, lines='all', debugplot=debugplot ) # check that the arc lines in the master file are properly sorted # in ascending order for i in range(len(wv_master_all) - 1): if wv_master_all[i] >= wv_master_all[i + 1]: logger.error('>>> wavelengths: ' + str(wv_master_all[i]) + ' ' + str(wv_master_all[i+1])) raise ValueError('Arc lines are not sorted in master file') # --- image2d_55sp = np.zeros((EMIR_NBARS, EMIR_NAXIS1)) # compute rectification transformation and wavelength calibration # polynomials measured_slitlets = [] cout = '0' for islitlet in range(1, EMIR_NBARS + 1): if islitlet_min <= islitlet <= islitlet_max: # define Slitlet2dArc object slt = Slitlet2dArc( islitlet=islitlet, csu_conf=csu_conf, ymargin_bb=args_ymargin_bb, params=params, parmodel=parmodel, debugplot=debugplot ) # extract 2D image corresponding to the selected slitlet, clipping # the image beyond the unrectified slitlet (in order to isolate # the arc lines of the current slitlet; otherwise there are # problems with arc lines from neighbour slitlets) image2d_tmp = select_unrectified_slitlet( image2d=image2d, islitlet=islitlet, csu_bar_slit_center=csu_conf.csu_bar_slit_center(islitlet), params=params, parmodel=parmodel, maskonly=False ) slitlet2d = slt.extract_slitlet2d(image2d_tmp) # subtract smooth background computed as follows: # - median collapsed spectrum of the whole slitlet2d # - independent median filtering of the previous spectrum in the # two halves in the spectral direction if args_remove_sp_background: spmedian = np.median(slitlet2d, axis=0) naxis1_tmp = spmedian.shape[0] jmidpoint = naxis1_tmp // 2 sp1 = medfilt(spmedian[:jmidpoint], [201]) sp2 = medfilt(spmedian[jmidpoint:], [201]) spbackground = np.concatenate((sp1, sp2)) slitlet2d -= spbackground # locate unknown arc lines slt.locate_unknown_arc_lines( slitlet2d=slitlet2d, times_sigma_threshold=args_times_sigma_threshold) # continue working with current slitlet only if arc lines have # been detected if slt.list_arc_lines is not None: # compute intersections between spectrum trails and arc lines slt.xy_spectrail_arc_intersections(slitlet2d=slitlet2d) # compute rectification transformation slt.estimate_tt_to_rectify(order=args_order_fmap, slitlet2d=slitlet2d) # rectify image slitlet2d_rect = slt.rectify(slitlet2d, resampling=2, transformation=1) # median spectrum and line peaks from rectified image sp_median, fxpeaks = slt.median_spectrum_from_rectified_image( slitlet2d_rect, sigma_gaussian_filtering=args_sigma_gaussian_filtering, nwinwidth_initial=5, nwinwidth_refined=5, times_sigma_threshold=5, npix_avoid_border=6, nbrightlines=nbrightlines ) image2d_55sp[islitlet - 1, :] = sp_median # determine expected wavelength limits prior to the wavelength # calibration csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) crval1_linear = poly_crval1_linear(csu_bar_slit_center) cdelt1_linear = poly_cdelt1_linear(csu_bar_slit_center) expected_wvmin = crval1_linear - \ args_margin_npix * cdelt1_linear naxis1_linear = sp_median.shape[0] crvaln_linear = crval1_linear + \ (naxis1_linear - 1) * cdelt1_linear expected_wvmax = crvaln_linear + \ args_margin_npix * cdelt1_linear # override previous estimates when necessary if wvmin_expected is not None: expected_wvmin = wvmin_expected if wvmax_expected is not None: expected_wvmax = wvmax_expected # clip initial master arc line list with bright lines to # the expected wavelength range lok1 = expected_wvmin <= wv_master lok2 = wv_master <= expected_wvmax lok = lok1 * lok2 wv_master_eff = wv_master[lok] # perform initial wavelength calibration solution_wv = wvcal_spectrum( sp=sp_median, fxpeaks=fxpeaks, poly_degree_wfit=args_poldeg_initial, wv_master=wv_master_eff, wv_ini_search=expected_wvmin, wv_end_search=expected_wvmax, wvmin_useful=wvmin_useful, wvmax_useful=wvmax_useful, geometry=args_geometry, debugplot=slt.debugplot ) # store initial wavelength calibration polynomial in current # slitlet instance slt.wpoly = np.polynomial.Polynomial(solution_wv.coeff) pause_debugplot(debugplot) # clip initial master arc line list with all the lines to # the expected wavelength range lok1 = expected_wvmin <= wv_master_all lok2 = wv_master_all <= expected_wvmax lok = lok1 * lok2 wv_master_all_eff = wv_master_all[lok] # clip master arc line list to useful region if wvmin_useful is not None: lok = wvmin_useful <= wv_master_all_eff wv_master_all_eff = wv_master_all_eff[lok] if wvmax_useful is not None: lok = wv_master_all_eff <= wvmax_useful wv_master_all_eff = wv_master_all_eff[lok] # refine wavelength calibration if args_poldeg_refined > 0: plottitle = '[slitlet#{}, refined]'.format(islitlet) poly_refined, yres_summary = refine_arccalibration( sp=sp_median, poly_initial=slt.wpoly, wv_master=wv_master_all_eff, poldeg=args_poldeg_refined, ntimes_match_wv=1, interactive=args_interactive, threshold=args_threshold_wv, plottitle=plottitle, ylogscale=args_ylogscale, geometry=args_geometry, pdf=args_pdf, debugplot=slt.debugplot ) # store refined wavelength calibration polynomial in # current slitlet instance slt.wpoly = poly_refined # compute approximate linear values for CRVAL1 and CDELT1 naxis1_linear = sp_median.shape[0] crmin1_linear = slt.wpoly(1) crmax1_linear = slt.wpoly(naxis1_linear) slt.crval1_linear = crmin1_linear slt.cdelt1_linear = \ (crmax1_linear - crmin1_linear) / (naxis1_linear - 1) # check that the trimming of wv_master and wv_master_all has # preserved the wavelength range [crmin1_linear, crmax1_linear] if crmin1_linear < expected_wvmin: logger.warning(">>> islitlet: " +str(islitlet)) logger.warning("expected_wvmin: " + str(expected_wvmin)) logger.warning("crmin1_linear.: " + str(crmin1_linear)) logger.warning("WARNING: Unexpected crmin1_linear < " "expected_wvmin") if crmax1_linear > expected_wvmax: logger.warning(">>> islitlet: " +str(islitlet)) logger.warning("expected_wvmax: " + str(expected_wvmax)) logger.warning("crmax1_linear.: " + str(crmax1_linear)) logger.warning("WARNING: Unexpected crmax1_linear > " "expected_wvmax") cout += '.' else: cout += 'x' if islitlet % 10 == 0: if cout != 'x': cout = str(islitlet // 10) if debugplot != 0: pause_debugplot(debugplot) else: # define Slitlet2dArc object slt = Slitlet2dArc( islitlet=islitlet, csu_conf=csu_conf, ymargin_bb=args_ymargin_bb, params=None, parmodel=None, debugplot=debugplot ) cout += 'i' # store current slitlet in list of measured slitlets measured_slitlets.append(slt) logger.info(cout) # --- # generate FITS file structure with 55 spectra corresponding to the # median spectrum for each slitlet reduced_55sp = fits.PrimaryHDU(data=image2d_55sp) reduced_55sp.header['crpix1'] = (0.0, 'reference pixel') reduced_55sp.header['crval1'] = (0.0, 'central value at crpix2') reduced_55sp.header['cdelt1'] = (1.0, 'increment') reduced_55sp.header['ctype1'] = 'PIXEL' reduced_55sp.header['cunit1'] = ('Pixel', 'units along axis2') reduced_55sp.header['crpix2'] = (0.0, 'reference pixel') reduced_55sp.header['crval2'] = (0.0, 'central value at crpix2') reduced_55sp.header['cdelt2'] = (1.0, 'increment') reduced_55sp.header['ctype2'] = 'PIXEL' reduced_55sp.header['cunit2'] = ('Pixel', 'units along axis2') # --- # Generate structure to store intermediate results outdict = {} outdict['instrument'] = 'EMIR' outdict['meta_info'] = {} outdict['meta_info']['creation_date'] = datetime.now().isoformat() outdict['meta_info']['description'] = \ 'computation of rectification and wavelength calibration polynomial ' \ 'coefficients for a particular CSU configuration' outdict['meta_info']['recipe_name'] = 'undefined' outdict['meta_info']['origin'] = {} outdict['meta_info']['origin']['bound_param_uuid'] = \ bound_param.uuid outdict['meta_info']['origin']['arc_image_uuid'] = 'undefined' outdict['tags'] = {} outdict['tags']['grism'] = grism_name outdict['tags']['filter'] = filter_name outdict['tags']['islitlet_min'] = islitlet_min outdict['tags']['islitlet_max'] = islitlet_max outdict['dtu_configuration'] = dtu_conf.outdict() outdict['uuid'] = str(uuid4()) outdict['contents'] = {} missing_slitlets = [] for slt in measured_slitlets: islitlet = slt.islitlet if islitlet_min <= islitlet <= islitlet_max: # avoid error when creating a python list of coefficients from # numpy polynomials when the polynomials do not exist (note that # the JSON format doesn't handle numpy arrays and such arrays must # be transformed into native python lists) if slt.wpoly is None: wpoly_coeff = None else: wpoly_coeff = slt.wpoly.coef.tolist() if slt.wpoly_longslit_model is None: wpoly_coeff_longslit_model = None else: wpoly_coeff_longslit_model = \ slt.wpoly_longslit_model.coef.tolist() # avoid similar error when creating a python list of coefficients # when the numpy array does not exist; note that this problem # does not happen with tt?_aij_longslit_model and # tt?_bij_longslit_model because the latter have already been # created as native python lists if slt.ttd_aij is None: ttd_aij = None else: ttd_aij = slt.ttd_aij.tolist() if slt.ttd_bij is None: ttd_bij = None else: ttd_bij = slt.ttd_bij.tolist() if slt.tti_aij is None: tti_aij = None else: tti_aij = slt.tti_aij.tolist() if slt.tti_bij is None: tti_bij = None else: tti_bij = slt.tti_bij.tolist() # creating temporary dictionary with the information corresponding # to the current slitlett that will be saved in the JSON file tmp_dict = { 'csu_bar_left': slt.csu_bar_left, 'csu_bar_right': slt.csu_bar_right, 'csu_bar_slit_center': slt.csu_bar_slit_center, 'csu_bar_slit_width': slt.csu_bar_slit_width, 'x0_reference': slt.x0_reference, 'y0_reference_lower': slt.y0_reference_lower, 'y0_reference_middle': slt.y0_reference_middle, 'y0_reference_upper': slt.y0_reference_upper, 'y0_reference_lower_expected': slt.y0_reference_lower_expected, 'y0_reference_middle_expected': slt.y0_reference_middle_expected, 'y0_reference_upper_expected': slt.y0_reference_upper_expected, 'y0_frontier_lower': slt.y0_frontier_lower, 'y0_frontier_upper': slt.y0_frontier_upper, 'y0_frontier_lower_expected': slt.y0_frontier_lower_expected, 'y0_frontier_upper_expected': slt.y0_frontier_upper_expected, 'corr_yrect_a': slt.corr_yrect_a, 'corr_yrect_b': slt.corr_yrect_b, 'min_row_rectified': slt.min_row_rectified, 'max_row_rectified': slt.max_row_rectified, 'ymargin_bb': slt.ymargin_bb, 'bb_nc1_orig': slt.bb_nc1_orig, 'bb_nc2_orig': slt.bb_nc2_orig, 'bb_ns1_orig': slt.bb_ns1_orig, 'bb_ns2_orig': slt.bb_ns2_orig, 'spectrail': { 'poly_coef_lower': slt.list_spectrails[ slt.i_lower_spectrail].poly_funct.coef.tolist(), 'poly_coef_middle': slt.list_spectrails[ slt.i_middle_spectrail].poly_funct.coef.tolist(), 'poly_coef_upper': slt.list_spectrails[ slt.i_upper_spectrail].poly_funct.coef.tolist(), }, 'frontier': { 'poly_coef_lower': slt.list_frontiers[0].poly_funct.coef.tolist(), 'poly_coef_upper': slt.list_frontiers[1].poly_funct.coef.tolist(), }, 'ttd_order': slt.ttd_order, 'ttd_aij': ttd_aij, 'ttd_bij': ttd_bij, 'tti_aij': tti_aij, 'tti_bij': tti_bij, 'ttd_order_longslit_model': slt.ttd_order_longslit_model, 'ttd_aij_longslit_model': slt.ttd_aij_longslit_model, 'ttd_bij_longslit_model': slt.ttd_bij_longslit_model, 'tti_aij_longslit_model': slt.tti_aij_longslit_model, 'tti_bij_longslit_model': slt.tti_bij_longslit_model, 'wpoly_coeff': wpoly_coeff, 'wpoly_coeff_longslit_model': wpoly_coeff_longslit_model, 'crval1_linear': slt.crval1_linear, 'cdelt1_linear': slt.cdelt1_linear } else: missing_slitlets.append(islitlet) tmp_dict = { 'csu_bar_left': slt.csu_bar_left, 'csu_bar_right': slt.csu_bar_right, 'csu_bar_slit_center': slt.csu_bar_slit_center, 'csu_bar_slit_width': slt.csu_bar_slit_width, 'x0_reference': slt.x0_reference, 'y0_frontier_lower_expected': slt.y0_frontier_lower_expected, 'y0_frontier_upper_expected': slt.y0_frontier_upper_expected } slitlet_label = "slitlet" + str(islitlet).zfill(2) outdict['contents'][slitlet_label] = tmp_dict # --- # OBSOLETE ''' # save JSON file needed to compute the MOS model with open(args.out_json.name, 'w') as fstream: json.dump(outdict, fstream, indent=2, sort_keys=True) print('>>> Saving file ' + args.out_json.name) ''' # --- # Create object of type RectWaveCoeff with coefficients for # rectification and wavelength calibration rectwv_coeff = RectWaveCoeff(instrument='EMIR') rectwv_coeff.quality_control = numina.types.qc.QC.GOOD rectwv_coeff.tags['grism'] = grism_name rectwv_coeff.tags['filter'] = filter_name rectwv_coeff.meta_info['origin']['bound_param'] = \ 'uuid' + bound_param.uuid rectwv_coeff.meta_info['dtu_configuration'] = outdict['dtu_configuration'] rectwv_coeff.total_slitlets = EMIR_NBARS rectwv_coeff.missing_slitlets = missing_slitlets for i in range(EMIR_NBARS): islitlet = i + 1 dumdict = {'islitlet': islitlet} cslitlet = 'slitlet' + str(islitlet).zfill(2) if cslitlet in outdict['contents']: dumdict.update(outdict['contents'][cslitlet]) else: raise ValueError("Unexpected error") rectwv_coeff.contents.append(dumdict) # debugging __getstate__ and __setstate__ # rectwv_coeff.writeto(args.out_json.name) # print('>>> Saving file ' + args.out_json.name) # check_setstate_getstate(rectwv_coeff, args.out_json.name) logger.info('Generating RectWaveCoeff object with uuid=' + rectwv_coeff.uuid) return rectwv_coeff, reduced_55sp
python
def rectwv_coeff_from_arc_image(reduced_image, bound_param, lines_catalog, args_nbrightlines=None, args_ymargin_bb=2, args_remove_sp_background=True, args_times_sigma_threshold=10, args_order_fmap=2, args_sigma_gaussian_filtering=2, args_margin_npix=50, args_poldeg_initial=3, args_poldeg_refined=5, args_interactive=False, args_threshold_wv=0, args_ylogscale=False, args_pdf=None, args_geometry=(0,0,640,480), debugplot=0): """Evaluate rect.+wavecal. coefficients from arc image Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. bound_param : RefinedBoundaryModelParam instance Refined boundary model. lines_catalog : Numpy array 2D numpy array with the contents of the master file with the expected arc line wavelengths. args_nbrightlines : int TBD args_ymargin_bb : int TBD args_remove_sp_background : bool TBD args_times_sigma_threshold : float TBD args_order_fmap : int TBD args_sigma_gaussian_filtering : float TBD args_margin_npix : int TBD args_poldeg_initial : int TBD args_poldeg_refined : int TBD args_interactive : bool TBD args_threshold_wv : float TBD args_ylogscale : bool TBD args_pdf : TBD args_geometry : TBD debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration of the input arc image. reduced_55sp : HDUList object Image with 55 spectra corresponding to the median spectrum for each slitlet, employed to derived the wavelength calibration polynomial. """ logger = logging.getLogger(__name__) # protections if args_interactive and args_pdf is not None: logger.error('--interactive and --pdf are incompatible options') raise ValueError('--interactive and --pdf are incompatible options') # header and data array header = reduced_image[0].header image2d = reduced_image[0].data # check grism and filter filter_name = header['filter'] logger.info('Filter: ' + filter_name) if filter_name != bound_param.tags['filter']: raise ValueError('Filter name does not match!') grism_name = header['grism'] logger.info('Grism: ' + grism_name) if grism_name != bound_param.tags['grism']: raise ValueError('Grism name does not match!') # read the CSU configuration from the image header csu_conf = CsuConfiguration.define_from_header(header) logger.debug(csu_conf) # read the DTU configuration from the image header dtu_conf = DtuConfiguration.define_from_header(header) logger.debug(dtu_conf) # set boundary parameters parmodel = bound_param.meta_info['parmodel'] params = bound_params_from_dict(bound_param.__getstate__()) if abs(debugplot) >= 10: print('-' * 83) print('* FITTED BOUND PARAMETERS') params.pretty_print() pause_debugplot(debugplot) # determine parameters according to grism+filter combination wv_parameters = set_wv_parameters(filter_name, grism_name) islitlet_min = wv_parameters['islitlet_min'] islitlet_max = wv_parameters['islitlet_max'] if args_nbrightlines is None: nbrightlines = wv_parameters['nbrightlines'] else: nbrightlines = [int(idum) for idum in args_nbrightlines.split(',')] poly_crval1_linear = wv_parameters['poly_crval1_linear'] poly_cdelt1_linear = wv_parameters['poly_cdelt1_linear'] wvmin_expected = wv_parameters['wvmin_expected'] wvmax_expected = wv_parameters['wvmax_expected'] wvmin_useful = wv_parameters['wvmin_useful'] wvmax_useful = wv_parameters['wvmax_useful'] # list of slitlets to be computed logger.info('list_slitlets: [' + str(islitlet_min) + ',... ' + str(islitlet_max) + ']') # read master arc line wavelengths (only brightest lines) wv_master = read_wv_master_from_array( master_table=lines_catalog, lines='brightest', debugplot=debugplot ) # read master arc line wavelengths (whole data set) wv_master_all = read_wv_master_from_array( master_table=lines_catalog, lines='all', debugplot=debugplot ) # check that the arc lines in the master file are properly sorted # in ascending order for i in range(len(wv_master_all) - 1): if wv_master_all[i] >= wv_master_all[i + 1]: logger.error('>>> wavelengths: ' + str(wv_master_all[i]) + ' ' + str(wv_master_all[i+1])) raise ValueError('Arc lines are not sorted in master file') # --- image2d_55sp = np.zeros((EMIR_NBARS, EMIR_NAXIS1)) # compute rectification transformation and wavelength calibration # polynomials measured_slitlets = [] cout = '0' for islitlet in range(1, EMIR_NBARS + 1): if islitlet_min <= islitlet <= islitlet_max: # define Slitlet2dArc object slt = Slitlet2dArc( islitlet=islitlet, csu_conf=csu_conf, ymargin_bb=args_ymargin_bb, params=params, parmodel=parmodel, debugplot=debugplot ) # extract 2D image corresponding to the selected slitlet, clipping # the image beyond the unrectified slitlet (in order to isolate # the arc lines of the current slitlet; otherwise there are # problems with arc lines from neighbour slitlets) image2d_tmp = select_unrectified_slitlet( image2d=image2d, islitlet=islitlet, csu_bar_slit_center=csu_conf.csu_bar_slit_center(islitlet), params=params, parmodel=parmodel, maskonly=False ) slitlet2d = slt.extract_slitlet2d(image2d_tmp) # subtract smooth background computed as follows: # - median collapsed spectrum of the whole slitlet2d # - independent median filtering of the previous spectrum in the # two halves in the spectral direction if args_remove_sp_background: spmedian = np.median(slitlet2d, axis=0) naxis1_tmp = spmedian.shape[0] jmidpoint = naxis1_tmp // 2 sp1 = medfilt(spmedian[:jmidpoint], [201]) sp2 = medfilt(spmedian[jmidpoint:], [201]) spbackground = np.concatenate((sp1, sp2)) slitlet2d -= spbackground # locate unknown arc lines slt.locate_unknown_arc_lines( slitlet2d=slitlet2d, times_sigma_threshold=args_times_sigma_threshold) # continue working with current slitlet only if arc lines have # been detected if slt.list_arc_lines is not None: # compute intersections between spectrum trails and arc lines slt.xy_spectrail_arc_intersections(slitlet2d=slitlet2d) # compute rectification transformation slt.estimate_tt_to_rectify(order=args_order_fmap, slitlet2d=slitlet2d) # rectify image slitlet2d_rect = slt.rectify(slitlet2d, resampling=2, transformation=1) # median spectrum and line peaks from rectified image sp_median, fxpeaks = slt.median_spectrum_from_rectified_image( slitlet2d_rect, sigma_gaussian_filtering=args_sigma_gaussian_filtering, nwinwidth_initial=5, nwinwidth_refined=5, times_sigma_threshold=5, npix_avoid_border=6, nbrightlines=nbrightlines ) image2d_55sp[islitlet - 1, :] = sp_median # determine expected wavelength limits prior to the wavelength # calibration csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) crval1_linear = poly_crval1_linear(csu_bar_slit_center) cdelt1_linear = poly_cdelt1_linear(csu_bar_slit_center) expected_wvmin = crval1_linear - \ args_margin_npix * cdelt1_linear naxis1_linear = sp_median.shape[0] crvaln_linear = crval1_linear + \ (naxis1_linear - 1) * cdelt1_linear expected_wvmax = crvaln_linear + \ args_margin_npix * cdelt1_linear # override previous estimates when necessary if wvmin_expected is not None: expected_wvmin = wvmin_expected if wvmax_expected is not None: expected_wvmax = wvmax_expected # clip initial master arc line list with bright lines to # the expected wavelength range lok1 = expected_wvmin <= wv_master lok2 = wv_master <= expected_wvmax lok = lok1 * lok2 wv_master_eff = wv_master[lok] # perform initial wavelength calibration solution_wv = wvcal_spectrum( sp=sp_median, fxpeaks=fxpeaks, poly_degree_wfit=args_poldeg_initial, wv_master=wv_master_eff, wv_ini_search=expected_wvmin, wv_end_search=expected_wvmax, wvmin_useful=wvmin_useful, wvmax_useful=wvmax_useful, geometry=args_geometry, debugplot=slt.debugplot ) # store initial wavelength calibration polynomial in current # slitlet instance slt.wpoly = np.polynomial.Polynomial(solution_wv.coeff) pause_debugplot(debugplot) # clip initial master arc line list with all the lines to # the expected wavelength range lok1 = expected_wvmin <= wv_master_all lok2 = wv_master_all <= expected_wvmax lok = lok1 * lok2 wv_master_all_eff = wv_master_all[lok] # clip master arc line list to useful region if wvmin_useful is not None: lok = wvmin_useful <= wv_master_all_eff wv_master_all_eff = wv_master_all_eff[lok] if wvmax_useful is not None: lok = wv_master_all_eff <= wvmax_useful wv_master_all_eff = wv_master_all_eff[lok] # refine wavelength calibration if args_poldeg_refined > 0: plottitle = '[slitlet#{}, refined]'.format(islitlet) poly_refined, yres_summary = refine_arccalibration( sp=sp_median, poly_initial=slt.wpoly, wv_master=wv_master_all_eff, poldeg=args_poldeg_refined, ntimes_match_wv=1, interactive=args_interactive, threshold=args_threshold_wv, plottitle=plottitle, ylogscale=args_ylogscale, geometry=args_geometry, pdf=args_pdf, debugplot=slt.debugplot ) # store refined wavelength calibration polynomial in # current slitlet instance slt.wpoly = poly_refined # compute approximate linear values for CRVAL1 and CDELT1 naxis1_linear = sp_median.shape[0] crmin1_linear = slt.wpoly(1) crmax1_linear = slt.wpoly(naxis1_linear) slt.crval1_linear = crmin1_linear slt.cdelt1_linear = \ (crmax1_linear - crmin1_linear) / (naxis1_linear - 1) # check that the trimming of wv_master and wv_master_all has # preserved the wavelength range [crmin1_linear, crmax1_linear] if crmin1_linear < expected_wvmin: logger.warning(">>> islitlet: " +str(islitlet)) logger.warning("expected_wvmin: " + str(expected_wvmin)) logger.warning("crmin1_linear.: " + str(crmin1_linear)) logger.warning("WARNING: Unexpected crmin1_linear < " "expected_wvmin") if crmax1_linear > expected_wvmax: logger.warning(">>> islitlet: " +str(islitlet)) logger.warning("expected_wvmax: " + str(expected_wvmax)) logger.warning("crmax1_linear.: " + str(crmax1_linear)) logger.warning("WARNING: Unexpected crmax1_linear > " "expected_wvmax") cout += '.' else: cout += 'x' if islitlet % 10 == 0: if cout != 'x': cout = str(islitlet // 10) if debugplot != 0: pause_debugplot(debugplot) else: # define Slitlet2dArc object slt = Slitlet2dArc( islitlet=islitlet, csu_conf=csu_conf, ymargin_bb=args_ymargin_bb, params=None, parmodel=None, debugplot=debugplot ) cout += 'i' # store current slitlet in list of measured slitlets measured_slitlets.append(slt) logger.info(cout) # --- # generate FITS file structure with 55 spectra corresponding to the # median spectrum for each slitlet reduced_55sp = fits.PrimaryHDU(data=image2d_55sp) reduced_55sp.header['crpix1'] = (0.0, 'reference pixel') reduced_55sp.header['crval1'] = (0.0, 'central value at crpix2') reduced_55sp.header['cdelt1'] = (1.0, 'increment') reduced_55sp.header['ctype1'] = 'PIXEL' reduced_55sp.header['cunit1'] = ('Pixel', 'units along axis2') reduced_55sp.header['crpix2'] = (0.0, 'reference pixel') reduced_55sp.header['crval2'] = (0.0, 'central value at crpix2') reduced_55sp.header['cdelt2'] = (1.0, 'increment') reduced_55sp.header['ctype2'] = 'PIXEL' reduced_55sp.header['cunit2'] = ('Pixel', 'units along axis2') # --- # Generate structure to store intermediate results outdict = {} outdict['instrument'] = 'EMIR' outdict['meta_info'] = {} outdict['meta_info']['creation_date'] = datetime.now().isoformat() outdict['meta_info']['description'] = \ 'computation of rectification and wavelength calibration polynomial ' \ 'coefficients for a particular CSU configuration' outdict['meta_info']['recipe_name'] = 'undefined' outdict['meta_info']['origin'] = {} outdict['meta_info']['origin']['bound_param_uuid'] = \ bound_param.uuid outdict['meta_info']['origin']['arc_image_uuid'] = 'undefined' outdict['tags'] = {} outdict['tags']['grism'] = grism_name outdict['tags']['filter'] = filter_name outdict['tags']['islitlet_min'] = islitlet_min outdict['tags']['islitlet_max'] = islitlet_max outdict['dtu_configuration'] = dtu_conf.outdict() outdict['uuid'] = str(uuid4()) outdict['contents'] = {} missing_slitlets = [] for slt in measured_slitlets: islitlet = slt.islitlet if islitlet_min <= islitlet <= islitlet_max: # avoid error when creating a python list of coefficients from # numpy polynomials when the polynomials do not exist (note that # the JSON format doesn't handle numpy arrays and such arrays must # be transformed into native python lists) if slt.wpoly is None: wpoly_coeff = None else: wpoly_coeff = slt.wpoly.coef.tolist() if slt.wpoly_longslit_model is None: wpoly_coeff_longslit_model = None else: wpoly_coeff_longslit_model = \ slt.wpoly_longslit_model.coef.tolist() # avoid similar error when creating a python list of coefficients # when the numpy array does not exist; note that this problem # does not happen with tt?_aij_longslit_model and # tt?_bij_longslit_model because the latter have already been # created as native python lists if slt.ttd_aij is None: ttd_aij = None else: ttd_aij = slt.ttd_aij.tolist() if slt.ttd_bij is None: ttd_bij = None else: ttd_bij = slt.ttd_bij.tolist() if slt.tti_aij is None: tti_aij = None else: tti_aij = slt.tti_aij.tolist() if slt.tti_bij is None: tti_bij = None else: tti_bij = slt.tti_bij.tolist() # creating temporary dictionary with the information corresponding # to the current slitlett that will be saved in the JSON file tmp_dict = { 'csu_bar_left': slt.csu_bar_left, 'csu_bar_right': slt.csu_bar_right, 'csu_bar_slit_center': slt.csu_bar_slit_center, 'csu_bar_slit_width': slt.csu_bar_slit_width, 'x0_reference': slt.x0_reference, 'y0_reference_lower': slt.y0_reference_lower, 'y0_reference_middle': slt.y0_reference_middle, 'y0_reference_upper': slt.y0_reference_upper, 'y0_reference_lower_expected': slt.y0_reference_lower_expected, 'y0_reference_middle_expected': slt.y0_reference_middle_expected, 'y0_reference_upper_expected': slt.y0_reference_upper_expected, 'y0_frontier_lower': slt.y0_frontier_lower, 'y0_frontier_upper': slt.y0_frontier_upper, 'y0_frontier_lower_expected': slt.y0_frontier_lower_expected, 'y0_frontier_upper_expected': slt.y0_frontier_upper_expected, 'corr_yrect_a': slt.corr_yrect_a, 'corr_yrect_b': slt.corr_yrect_b, 'min_row_rectified': slt.min_row_rectified, 'max_row_rectified': slt.max_row_rectified, 'ymargin_bb': slt.ymargin_bb, 'bb_nc1_orig': slt.bb_nc1_orig, 'bb_nc2_orig': slt.bb_nc2_orig, 'bb_ns1_orig': slt.bb_ns1_orig, 'bb_ns2_orig': slt.bb_ns2_orig, 'spectrail': { 'poly_coef_lower': slt.list_spectrails[ slt.i_lower_spectrail].poly_funct.coef.tolist(), 'poly_coef_middle': slt.list_spectrails[ slt.i_middle_spectrail].poly_funct.coef.tolist(), 'poly_coef_upper': slt.list_spectrails[ slt.i_upper_spectrail].poly_funct.coef.tolist(), }, 'frontier': { 'poly_coef_lower': slt.list_frontiers[0].poly_funct.coef.tolist(), 'poly_coef_upper': slt.list_frontiers[1].poly_funct.coef.tolist(), }, 'ttd_order': slt.ttd_order, 'ttd_aij': ttd_aij, 'ttd_bij': ttd_bij, 'tti_aij': tti_aij, 'tti_bij': tti_bij, 'ttd_order_longslit_model': slt.ttd_order_longslit_model, 'ttd_aij_longslit_model': slt.ttd_aij_longslit_model, 'ttd_bij_longslit_model': slt.ttd_bij_longslit_model, 'tti_aij_longslit_model': slt.tti_aij_longslit_model, 'tti_bij_longslit_model': slt.tti_bij_longslit_model, 'wpoly_coeff': wpoly_coeff, 'wpoly_coeff_longslit_model': wpoly_coeff_longslit_model, 'crval1_linear': slt.crval1_linear, 'cdelt1_linear': slt.cdelt1_linear } else: missing_slitlets.append(islitlet) tmp_dict = { 'csu_bar_left': slt.csu_bar_left, 'csu_bar_right': slt.csu_bar_right, 'csu_bar_slit_center': slt.csu_bar_slit_center, 'csu_bar_slit_width': slt.csu_bar_slit_width, 'x0_reference': slt.x0_reference, 'y0_frontier_lower_expected': slt.y0_frontier_lower_expected, 'y0_frontier_upper_expected': slt.y0_frontier_upper_expected } slitlet_label = "slitlet" + str(islitlet).zfill(2) outdict['contents'][slitlet_label] = tmp_dict # --- # OBSOLETE ''' # save JSON file needed to compute the MOS model with open(args.out_json.name, 'w') as fstream: json.dump(outdict, fstream, indent=2, sort_keys=True) print('>>> Saving file ' + args.out_json.name) ''' # --- # Create object of type RectWaveCoeff with coefficients for # rectification and wavelength calibration rectwv_coeff = RectWaveCoeff(instrument='EMIR') rectwv_coeff.quality_control = numina.types.qc.QC.GOOD rectwv_coeff.tags['grism'] = grism_name rectwv_coeff.tags['filter'] = filter_name rectwv_coeff.meta_info['origin']['bound_param'] = \ 'uuid' + bound_param.uuid rectwv_coeff.meta_info['dtu_configuration'] = outdict['dtu_configuration'] rectwv_coeff.total_slitlets = EMIR_NBARS rectwv_coeff.missing_slitlets = missing_slitlets for i in range(EMIR_NBARS): islitlet = i + 1 dumdict = {'islitlet': islitlet} cslitlet = 'slitlet' + str(islitlet).zfill(2) if cslitlet in outdict['contents']: dumdict.update(outdict['contents'][cslitlet]) else: raise ValueError("Unexpected error") rectwv_coeff.contents.append(dumdict) # debugging __getstate__ and __setstate__ # rectwv_coeff.writeto(args.out_json.name) # print('>>> Saving file ' + args.out_json.name) # check_setstate_getstate(rectwv_coeff, args.out_json.name) logger.info('Generating RectWaveCoeff object with uuid=' + rectwv_coeff.uuid) return rectwv_coeff, reduced_55sp
[ "def", "rectwv_coeff_from_arc_image", "(", "reduced_image", ",", "bound_param", ",", "lines_catalog", ",", "args_nbrightlines", "=", "None", ",", "args_ymargin_bb", "=", "2", ",", "args_remove_sp_background", "=", "True", ",", "args_times_sigma_threshold", "=", "10", ",", "args_order_fmap", "=", "2", ",", "args_sigma_gaussian_filtering", "=", "2", ",", "args_margin_npix", "=", "50", ",", "args_poldeg_initial", "=", "3", ",", "args_poldeg_refined", "=", "5", ",", "args_interactive", "=", "False", ",", "args_threshold_wv", "=", "0", ",", "args_ylogscale", "=", "False", ",", "args_pdf", "=", "None", ",", "args_geometry", "=", "(", "0", ",", "0", ",", "640", ",", "480", ")", ",", "debugplot", "=", "0", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# protections", "if", "args_interactive", "and", "args_pdf", "is", "not", "None", ":", "logger", ".", "error", "(", "'--interactive and --pdf are incompatible options'", ")", "raise", "ValueError", "(", "'--interactive and --pdf are incompatible options'", ")", "# header and data array", "header", "=", "reduced_image", "[", "0", "]", ".", "header", "image2d", "=", "reduced_image", "[", "0", "]", ".", "data", "# check grism and filter", "filter_name", "=", "header", "[", "'filter'", "]", "logger", ".", "info", "(", "'Filter: '", "+", "filter_name", ")", "if", "filter_name", "!=", "bound_param", ".", "tags", "[", "'filter'", "]", ":", "raise", "ValueError", "(", "'Filter name does not match!'", ")", "grism_name", "=", "header", "[", "'grism'", "]", "logger", ".", "info", "(", "'Grism: '", "+", "grism_name", ")", "if", "grism_name", "!=", "bound_param", ".", "tags", "[", "'grism'", "]", ":", "raise", "ValueError", "(", "'Grism name does not match!'", ")", "# read the CSU configuration from the image header", "csu_conf", "=", "CsuConfiguration", ".", "define_from_header", "(", "header", ")", "logger", ".", "debug", "(", "csu_conf", ")", "# read the DTU configuration from the image header", "dtu_conf", "=", "DtuConfiguration", ".", "define_from_header", "(", "header", ")", "logger", ".", "debug", "(", "dtu_conf", ")", "# set boundary parameters", "parmodel", "=", "bound_param", ".", "meta_info", "[", "'parmodel'", "]", "params", "=", "bound_params_from_dict", "(", "bound_param", ".", "__getstate__", "(", ")", ")", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "print", "(", "'-'", "*", "83", ")", "print", "(", "'* FITTED BOUND PARAMETERS'", ")", "params", ".", "pretty_print", "(", ")", "pause_debugplot", "(", "debugplot", ")", "# determine parameters according to grism+filter combination", "wv_parameters", "=", "set_wv_parameters", "(", "filter_name", ",", "grism_name", ")", "islitlet_min", "=", "wv_parameters", "[", "'islitlet_min'", "]", "islitlet_max", "=", "wv_parameters", "[", "'islitlet_max'", "]", "if", "args_nbrightlines", "is", "None", ":", "nbrightlines", "=", "wv_parameters", "[", "'nbrightlines'", "]", "else", ":", "nbrightlines", "=", "[", "int", "(", "idum", ")", "for", "idum", "in", "args_nbrightlines", ".", "split", "(", "','", ")", "]", "poly_crval1_linear", "=", "wv_parameters", "[", "'poly_crval1_linear'", "]", "poly_cdelt1_linear", "=", "wv_parameters", "[", "'poly_cdelt1_linear'", "]", "wvmin_expected", "=", "wv_parameters", "[", "'wvmin_expected'", "]", "wvmax_expected", "=", "wv_parameters", "[", "'wvmax_expected'", "]", "wvmin_useful", "=", "wv_parameters", "[", "'wvmin_useful'", "]", "wvmax_useful", "=", "wv_parameters", "[", "'wvmax_useful'", "]", "# list of slitlets to be computed", "logger", ".", "info", "(", "'list_slitlets: ['", "+", "str", "(", "islitlet_min", ")", "+", "',... '", "+", "str", "(", "islitlet_max", ")", "+", "']'", ")", "# read master arc line wavelengths (only brightest lines)", "wv_master", "=", "read_wv_master_from_array", "(", "master_table", "=", "lines_catalog", ",", "lines", "=", "'brightest'", ",", "debugplot", "=", "debugplot", ")", "# read master arc line wavelengths (whole data set)", "wv_master_all", "=", "read_wv_master_from_array", "(", "master_table", "=", "lines_catalog", ",", "lines", "=", "'all'", ",", "debugplot", "=", "debugplot", ")", "# check that the arc lines in the master file are properly sorted", "# in ascending order", "for", "i", "in", "range", "(", "len", "(", "wv_master_all", ")", "-", "1", ")", ":", "if", "wv_master_all", "[", "i", "]", ">=", "wv_master_all", "[", "i", "+", "1", "]", ":", "logger", ".", "error", "(", "'>>> wavelengths: '", "+", "str", "(", "wv_master_all", "[", "i", "]", ")", "+", "' '", "+", "str", "(", "wv_master_all", "[", "i", "+", "1", "]", ")", ")", "raise", "ValueError", "(", "'Arc lines are not sorted in master file'", ")", "# ---", "image2d_55sp", "=", "np", ".", "zeros", "(", "(", "EMIR_NBARS", ",", "EMIR_NAXIS1", ")", ")", "# compute rectification transformation and wavelength calibration", "# polynomials", "measured_slitlets", "=", "[", "]", "cout", "=", "'0'", "for", "islitlet", "in", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ":", "if", "islitlet_min", "<=", "islitlet", "<=", "islitlet_max", ":", "# define Slitlet2dArc object", "slt", "=", "Slitlet2dArc", "(", "islitlet", "=", "islitlet", ",", "csu_conf", "=", "csu_conf", ",", "ymargin_bb", "=", "args_ymargin_bb", ",", "params", "=", "params", ",", "parmodel", "=", "parmodel", ",", "debugplot", "=", "debugplot", ")", "# extract 2D image corresponding to the selected slitlet, clipping", "# the image beyond the unrectified slitlet (in order to isolate", "# the arc lines of the current slitlet; otherwise there are", "# problems with arc lines from neighbour slitlets)", "image2d_tmp", "=", "select_unrectified_slitlet", "(", "image2d", "=", "image2d", ",", "islitlet", "=", "islitlet", ",", "csu_bar_slit_center", "=", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", ",", "params", "=", "params", ",", "parmodel", "=", "parmodel", ",", "maskonly", "=", "False", ")", "slitlet2d", "=", "slt", ".", "extract_slitlet2d", "(", "image2d_tmp", ")", "# subtract smooth background computed as follows:", "# - median collapsed spectrum of the whole slitlet2d", "# - independent median filtering of the previous spectrum in the", "# two halves in the spectral direction", "if", "args_remove_sp_background", ":", "spmedian", "=", "np", ".", "median", "(", "slitlet2d", ",", "axis", "=", "0", ")", "naxis1_tmp", "=", "spmedian", ".", "shape", "[", "0", "]", "jmidpoint", "=", "naxis1_tmp", "//", "2", "sp1", "=", "medfilt", "(", "spmedian", "[", ":", "jmidpoint", "]", ",", "[", "201", "]", ")", "sp2", "=", "medfilt", "(", "spmedian", "[", "jmidpoint", ":", "]", ",", "[", "201", "]", ")", "spbackground", "=", "np", ".", "concatenate", "(", "(", "sp1", ",", "sp2", ")", ")", "slitlet2d", "-=", "spbackground", "# locate unknown arc lines", "slt", ".", "locate_unknown_arc_lines", "(", "slitlet2d", "=", "slitlet2d", ",", "times_sigma_threshold", "=", "args_times_sigma_threshold", ")", "# continue working with current slitlet only if arc lines have", "# been detected", "if", "slt", ".", "list_arc_lines", "is", "not", "None", ":", "# compute intersections between spectrum trails and arc lines", "slt", ".", "xy_spectrail_arc_intersections", "(", "slitlet2d", "=", "slitlet2d", ")", "# compute rectification transformation", "slt", ".", "estimate_tt_to_rectify", "(", "order", "=", "args_order_fmap", ",", "slitlet2d", "=", "slitlet2d", ")", "# rectify image", "slitlet2d_rect", "=", "slt", ".", "rectify", "(", "slitlet2d", ",", "resampling", "=", "2", ",", "transformation", "=", "1", ")", "# median spectrum and line peaks from rectified image", "sp_median", ",", "fxpeaks", "=", "slt", ".", "median_spectrum_from_rectified_image", "(", "slitlet2d_rect", ",", "sigma_gaussian_filtering", "=", "args_sigma_gaussian_filtering", ",", "nwinwidth_initial", "=", "5", ",", "nwinwidth_refined", "=", "5", ",", "times_sigma_threshold", "=", "5", ",", "npix_avoid_border", "=", "6", ",", "nbrightlines", "=", "nbrightlines", ")", "image2d_55sp", "[", "islitlet", "-", "1", ",", ":", "]", "=", "sp_median", "# determine expected wavelength limits prior to the wavelength", "# calibration", "csu_bar_slit_center", "=", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", "crval1_linear", "=", "poly_crval1_linear", "(", "csu_bar_slit_center", ")", "cdelt1_linear", "=", "poly_cdelt1_linear", "(", "csu_bar_slit_center", ")", "expected_wvmin", "=", "crval1_linear", "-", "args_margin_npix", "*", "cdelt1_linear", "naxis1_linear", "=", "sp_median", ".", "shape", "[", "0", "]", "crvaln_linear", "=", "crval1_linear", "+", "(", "naxis1_linear", "-", "1", ")", "*", "cdelt1_linear", "expected_wvmax", "=", "crvaln_linear", "+", "args_margin_npix", "*", "cdelt1_linear", "# override previous estimates when necessary", "if", "wvmin_expected", "is", "not", "None", ":", "expected_wvmin", "=", "wvmin_expected", "if", "wvmax_expected", "is", "not", "None", ":", "expected_wvmax", "=", "wvmax_expected", "# clip initial master arc line list with bright lines to", "# the expected wavelength range", "lok1", "=", "expected_wvmin", "<=", "wv_master", "lok2", "=", "wv_master", "<=", "expected_wvmax", "lok", "=", "lok1", "*", "lok2", "wv_master_eff", "=", "wv_master", "[", "lok", "]", "# perform initial wavelength calibration", "solution_wv", "=", "wvcal_spectrum", "(", "sp", "=", "sp_median", ",", "fxpeaks", "=", "fxpeaks", ",", "poly_degree_wfit", "=", "args_poldeg_initial", ",", "wv_master", "=", "wv_master_eff", ",", "wv_ini_search", "=", "expected_wvmin", ",", "wv_end_search", "=", "expected_wvmax", ",", "wvmin_useful", "=", "wvmin_useful", ",", "wvmax_useful", "=", "wvmax_useful", ",", "geometry", "=", "args_geometry", ",", "debugplot", "=", "slt", ".", "debugplot", ")", "# store initial wavelength calibration polynomial in current", "# slitlet instance", "slt", ".", "wpoly", "=", "np", ".", "polynomial", ".", "Polynomial", "(", "solution_wv", ".", "coeff", ")", "pause_debugplot", "(", "debugplot", ")", "# clip initial master arc line list with all the lines to", "# the expected wavelength range", "lok1", "=", "expected_wvmin", "<=", "wv_master_all", "lok2", "=", "wv_master_all", "<=", "expected_wvmax", "lok", "=", "lok1", "*", "lok2", "wv_master_all_eff", "=", "wv_master_all", "[", "lok", "]", "# clip master arc line list to useful region", "if", "wvmin_useful", "is", "not", "None", ":", "lok", "=", "wvmin_useful", "<=", "wv_master_all_eff", "wv_master_all_eff", "=", "wv_master_all_eff", "[", "lok", "]", "if", "wvmax_useful", "is", "not", "None", ":", "lok", "=", "wv_master_all_eff", "<=", "wvmax_useful", "wv_master_all_eff", "=", "wv_master_all_eff", "[", "lok", "]", "# refine wavelength calibration", "if", "args_poldeg_refined", ">", "0", ":", "plottitle", "=", "'[slitlet#{}, refined]'", ".", "format", "(", "islitlet", ")", "poly_refined", ",", "yres_summary", "=", "refine_arccalibration", "(", "sp", "=", "sp_median", ",", "poly_initial", "=", "slt", ".", "wpoly", ",", "wv_master", "=", "wv_master_all_eff", ",", "poldeg", "=", "args_poldeg_refined", ",", "ntimes_match_wv", "=", "1", ",", "interactive", "=", "args_interactive", ",", "threshold", "=", "args_threshold_wv", ",", "plottitle", "=", "plottitle", ",", "ylogscale", "=", "args_ylogscale", ",", "geometry", "=", "args_geometry", ",", "pdf", "=", "args_pdf", ",", "debugplot", "=", "slt", ".", "debugplot", ")", "# store refined wavelength calibration polynomial in", "# current slitlet instance", "slt", ".", "wpoly", "=", "poly_refined", "# compute approximate linear values for CRVAL1 and CDELT1", "naxis1_linear", "=", "sp_median", ".", "shape", "[", "0", "]", "crmin1_linear", "=", "slt", ".", "wpoly", "(", "1", ")", "crmax1_linear", "=", "slt", ".", "wpoly", "(", "naxis1_linear", ")", "slt", ".", "crval1_linear", "=", "crmin1_linear", "slt", ".", "cdelt1_linear", "=", "(", "crmax1_linear", "-", "crmin1_linear", ")", "/", "(", "naxis1_linear", "-", "1", ")", "# check that the trimming of wv_master and wv_master_all has", "# preserved the wavelength range [crmin1_linear, crmax1_linear]", "if", "crmin1_linear", "<", "expected_wvmin", ":", "logger", ".", "warning", "(", "\">>> islitlet: \"", "+", "str", "(", "islitlet", ")", ")", "logger", ".", "warning", "(", "\"expected_wvmin: \"", "+", "str", "(", "expected_wvmin", ")", ")", "logger", ".", "warning", "(", "\"crmin1_linear.: \"", "+", "str", "(", "crmin1_linear", ")", ")", "logger", ".", "warning", "(", "\"WARNING: Unexpected crmin1_linear < \"", "\"expected_wvmin\"", ")", "if", "crmax1_linear", ">", "expected_wvmax", ":", "logger", ".", "warning", "(", "\">>> islitlet: \"", "+", "str", "(", "islitlet", ")", ")", "logger", ".", "warning", "(", "\"expected_wvmax: \"", "+", "str", "(", "expected_wvmax", ")", ")", "logger", ".", "warning", "(", "\"crmax1_linear.: \"", "+", "str", "(", "crmax1_linear", ")", ")", "logger", ".", "warning", "(", "\"WARNING: Unexpected crmax1_linear > \"", "\"expected_wvmax\"", ")", "cout", "+=", "'.'", "else", ":", "cout", "+=", "'x'", "if", "islitlet", "%", "10", "==", "0", ":", "if", "cout", "!=", "'x'", ":", "cout", "=", "str", "(", "islitlet", "//", "10", ")", "if", "debugplot", "!=", "0", ":", "pause_debugplot", "(", "debugplot", ")", "else", ":", "# define Slitlet2dArc object", "slt", "=", "Slitlet2dArc", "(", "islitlet", "=", "islitlet", ",", "csu_conf", "=", "csu_conf", ",", "ymargin_bb", "=", "args_ymargin_bb", ",", "params", "=", "None", ",", "parmodel", "=", "None", ",", "debugplot", "=", "debugplot", ")", "cout", "+=", "'i'", "# store current slitlet in list of measured slitlets", "measured_slitlets", ".", "append", "(", "slt", ")", "logger", ".", "info", "(", "cout", ")", "# ---", "# generate FITS file structure with 55 spectra corresponding to the", "# median spectrum for each slitlet", "reduced_55sp", "=", "fits", ".", "PrimaryHDU", "(", "data", "=", "image2d_55sp", ")", "reduced_55sp", ".", "header", "[", "'crpix1'", "]", "=", "(", "0.0", ",", "'reference pixel'", ")", "reduced_55sp", ".", "header", "[", "'crval1'", "]", "=", "(", "0.0", ",", "'central value at crpix2'", ")", "reduced_55sp", ".", "header", "[", "'cdelt1'", "]", "=", "(", "1.0", ",", "'increment'", ")", "reduced_55sp", ".", "header", "[", "'ctype1'", "]", "=", "'PIXEL'", "reduced_55sp", ".", "header", "[", "'cunit1'", "]", "=", "(", "'Pixel'", ",", "'units along axis2'", ")", "reduced_55sp", ".", "header", "[", "'crpix2'", "]", "=", "(", "0.0", ",", "'reference pixel'", ")", "reduced_55sp", ".", "header", "[", "'crval2'", "]", "=", "(", "0.0", ",", "'central value at crpix2'", ")", "reduced_55sp", ".", "header", "[", "'cdelt2'", "]", "=", "(", "1.0", ",", "'increment'", ")", "reduced_55sp", ".", "header", "[", "'ctype2'", "]", "=", "'PIXEL'", "reduced_55sp", ".", "header", "[", "'cunit2'", "]", "=", "(", "'Pixel'", ",", "'units along axis2'", ")", "# ---", "# Generate structure to store intermediate results", "outdict", "=", "{", "}", "outdict", "[", "'instrument'", "]", "=", "'EMIR'", "outdict", "[", "'meta_info'", "]", "=", "{", "}", "outdict", "[", "'meta_info'", "]", "[", "'creation_date'", "]", "=", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "outdict", "[", "'meta_info'", "]", "[", "'description'", "]", "=", "'computation of rectification and wavelength calibration polynomial '", "'coefficients for a particular CSU configuration'", "outdict", "[", "'meta_info'", "]", "[", "'recipe_name'", "]", "=", "'undefined'", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "=", "{", "}", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "[", "'bound_param_uuid'", "]", "=", "bound_param", ".", "uuid", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "[", "'arc_image_uuid'", "]", "=", "'undefined'", "outdict", "[", "'tags'", "]", "=", "{", "}", "outdict", "[", "'tags'", "]", "[", "'grism'", "]", "=", "grism_name", "outdict", "[", "'tags'", "]", "[", "'filter'", "]", "=", "filter_name", "outdict", "[", "'tags'", "]", "[", "'islitlet_min'", "]", "=", "islitlet_min", "outdict", "[", "'tags'", "]", "[", "'islitlet_max'", "]", "=", "islitlet_max", "outdict", "[", "'dtu_configuration'", "]", "=", "dtu_conf", ".", "outdict", "(", ")", "outdict", "[", "'uuid'", "]", "=", "str", "(", "uuid4", "(", ")", ")", "outdict", "[", "'contents'", "]", "=", "{", "}", "missing_slitlets", "=", "[", "]", "for", "slt", "in", "measured_slitlets", ":", "islitlet", "=", "slt", ".", "islitlet", "if", "islitlet_min", "<=", "islitlet", "<=", "islitlet_max", ":", "# avoid error when creating a python list of coefficients from", "# numpy polynomials when the polynomials do not exist (note that", "# the JSON format doesn't handle numpy arrays and such arrays must", "# be transformed into native python lists)", "if", "slt", ".", "wpoly", "is", "None", ":", "wpoly_coeff", "=", "None", "else", ":", "wpoly_coeff", "=", "slt", ".", "wpoly", ".", "coef", ".", "tolist", "(", ")", "if", "slt", ".", "wpoly_longslit_model", "is", "None", ":", "wpoly_coeff_longslit_model", "=", "None", "else", ":", "wpoly_coeff_longslit_model", "=", "slt", ".", "wpoly_longslit_model", ".", "coef", ".", "tolist", "(", ")", "# avoid similar error when creating a python list of coefficients", "# when the numpy array does not exist; note that this problem", "# does not happen with tt?_aij_longslit_model and", "# tt?_bij_longslit_model because the latter have already been", "# created as native python lists", "if", "slt", ".", "ttd_aij", "is", "None", ":", "ttd_aij", "=", "None", "else", ":", "ttd_aij", "=", "slt", ".", "ttd_aij", ".", "tolist", "(", ")", "if", "slt", ".", "ttd_bij", "is", "None", ":", "ttd_bij", "=", "None", "else", ":", "ttd_bij", "=", "slt", ".", "ttd_bij", ".", "tolist", "(", ")", "if", "slt", ".", "tti_aij", "is", "None", ":", "tti_aij", "=", "None", "else", ":", "tti_aij", "=", "slt", ".", "tti_aij", ".", "tolist", "(", ")", "if", "slt", ".", "tti_bij", "is", "None", ":", "tti_bij", "=", "None", "else", ":", "tti_bij", "=", "slt", ".", "tti_bij", ".", "tolist", "(", ")", "# creating temporary dictionary with the information corresponding", "# to the current slitlett that will be saved in the JSON file", "tmp_dict", "=", "{", "'csu_bar_left'", ":", "slt", ".", "csu_bar_left", ",", "'csu_bar_right'", ":", "slt", ".", "csu_bar_right", ",", "'csu_bar_slit_center'", ":", "slt", ".", "csu_bar_slit_center", ",", "'csu_bar_slit_width'", ":", "slt", ".", "csu_bar_slit_width", ",", "'x0_reference'", ":", "slt", ".", "x0_reference", ",", "'y0_reference_lower'", ":", "slt", ".", "y0_reference_lower", ",", "'y0_reference_middle'", ":", "slt", ".", "y0_reference_middle", ",", "'y0_reference_upper'", ":", "slt", ".", "y0_reference_upper", ",", "'y0_reference_lower_expected'", ":", "slt", ".", "y0_reference_lower_expected", ",", "'y0_reference_middle_expected'", ":", "slt", ".", "y0_reference_middle_expected", ",", "'y0_reference_upper_expected'", ":", "slt", ".", "y0_reference_upper_expected", ",", "'y0_frontier_lower'", ":", "slt", ".", "y0_frontier_lower", ",", "'y0_frontier_upper'", ":", "slt", ".", "y0_frontier_upper", ",", "'y0_frontier_lower_expected'", ":", "slt", ".", "y0_frontier_lower_expected", ",", "'y0_frontier_upper_expected'", ":", "slt", ".", "y0_frontier_upper_expected", ",", "'corr_yrect_a'", ":", "slt", ".", "corr_yrect_a", ",", "'corr_yrect_b'", ":", "slt", ".", "corr_yrect_b", ",", "'min_row_rectified'", ":", "slt", ".", "min_row_rectified", ",", "'max_row_rectified'", ":", "slt", ".", "max_row_rectified", ",", "'ymargin_bb'", ":", "slt", ".", "ymargin_bb", ",", "'bb_nc1_orig'", ":", "slt", ".", "bb_nc1_orig", ",", "'bb_nc2_orig'", ":", "slt", ".", "bb_nc2_orig", ",", "'bb_ns1_orig'", ":", "slt", ".", "bb_ns1_orig", ",", "'bb_ns2_orig'", ":", "slt", ".", "bb_ns2_orig", ",", "'spectrail'", ":", "{", "'poly_coef_lower'", ":", "slt", ".", "list_spectrails", "[", "slt", ".", "i_lower_spectrail", "]", ".", "poly_funct", ".", "coef", ".", "tolist", "(", ")", ",", "'poly_coef_middle'", ":", "slt", ".", "list_spectrails", "[", "slt", ".", "i_middle_spectrail", "]", ".", "poly_funct", ".", "coef", ".", "tolist", "(", ")", ",", "'poly_coef_upper'", ":", "slt", ".", "list_spectrails", "[", "slt", ".", "i_upper_spectrail", "]", ".", "poly_funct", ".", "coef", ".", "tolist", "(", ")", ",", "}", ",", "'frontier'", ":", "{", "'poly_coef_lower'", ":", "slt", ".", "list_frontiers", "[", "0", "]", ".", "poly_funct", ".", "coef", ".", "tolist", "(", ")", ",", "'poly_coef_upper'", ":", "slt", ".", "list_frontiers", "[", "1", "]", ".", "poly_funct", ".", "coef", ".", "tolist", "(", ")", ",", "}", ",", "'ttd_order'", ":", "slt", ".", "ttd_order", ",", "'ttd_aij'", ":", "ttd_aij", ",", "'ttd_bij'", ":", "ttd_bij", ",", "'tti_aij'", ":", "tti_aij", ",", "'tti_bij'", ":", "tti_bij", ",", "'ttd_order_longslit_model'", ":", "slt", ".", "ttd_order_longslit_model", ",", "'ttd_aij_longslit_model'", ":", "slt", ".", "ttd_aij_longslit_model", ",", "'ttd_bij_longslit_model'", ":", "slt", ".", "ttd_bij_longslit_model", ",", "'tti_aij_longslit_model'", ":", "slt", ".", "tti_aij_longslit_model", ",", "'tti_bij_longslit_model'", ":", "slt", ".", "tti_bij_longslit_model", ",", "'wpoly_coeff'", ":", "wpoly_coeff", ",", "'wpoly_coeff_longslit_model'", ":", "wpoly_coeff_longslit_model", ",", "'crval1_linear'", ":", "slt", ".", "crval1_linear", ",", "'cdelt1_linear'", ":", "slt", ".", "cdelt1_linear", "}", "else", ":", "missing_slitlets", ".", "append", "(", "islitlet", ")", "tmp_dict", "=", "{", "'csu_bar_left'", ":", "slt", ".", "csu_bar_left", ",", "'csu_bar_right'", ":", "slt", ".", "csu_bar_right", ",", "'csu_bar_slit_center'", ":", "slt", ".", "csu_bar_slit_center", ",", "'csu_bar_slit_width'", ":", "slt", ".", "csu_bar_slit_width", ",", "'x0_reference'", ":", "slt", ".", "x0_reference", ",", "'y0_frontier_lower_expected'", ":", "slt", ".", "y0_frontier_lower_expected", ",", "'y0_frontier_upper_expected'", ":", "slt", ".", "y0_frontier_upper_expected", "}", "slitlet_label", "=", "\"slitlet\"", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "outdict", "[", "'contents'", "]", "[", "slitlet_label", "]", "=", "tmp_dict", "# ---", "# OBSOLETE", "'''\n # save JSON file needed to compute the MOS model\n with open(args.out_json.name, 'w') as fstream:\n json.dump(outdict, fstream, indent=2, sort_keys=True)\n print('>>> Saving file ' + args.out_json.name)\n '''", "# ---", "# Create object of type RectWaveCoeff with coefficients for", "# rectification and wavelength calibration", "rectwv_coeff", "=", "RectWaveCoeff", "(", "instrument", "=", "'EMIR'", ")", "rectwv_coeff", ".", "quality_control", "=", "numina", ".", "types", ".", "qc", ".", "QC", ".", "GOOD", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", "=", "grism_name", "rectwv_coeff", ".", "tags", "[", "'filter'", "]", "=", "filter_name", "rectwv_coeff", ".", "meta_info", "[", "'origin'", "]", "[", "'bound_param'", "]", "=", "'uuid'", "+", "bound_param", ".", "uuid", "rectwv_coeff", ".", "meta_info", "[", "'dtu_configuration'", "]", "=", "outdict", "[", "'dtu_configuration'", "]", "rectwv_coeff", ".", "total_slitlets", "=", "EMIR_NBARS", "rectwv_coeff", ".", "missing_slitlets", "=", "missing_slitlets", "for", "i", "in", "range", "(", "EMIR_NBARS", ")", ":", "islitlet", "=", "i", "+", "1", "dumdict", "=", "{", "'islitlet'", ":", "islitlet", "}", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "if", "cslitlet", "in", "outdict", "[", "'contents'", "]", ":", "dumdict", ".", "update", "(", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Unexpected error\"", ")", "rectwv_coeff", ".", "contents", ".", "append", "(", "dumdict", ")", "# debugging __getstate__ and __setstate__", "# rectwv_coeff.writeto(args.out_json.name)", "# print('>>> Saving file ' + args.out_json.name)", "# check_setstate_getstate(rectwv_coeff, args.out_json.name)", "logger", ".", "info", "(", "'Generating RectWaveCoeff object with uuid='", "+", "rectwv_coeff", ".", "uuid", ")", "return", "rectwv_coeff", ",", "reduced_55sp" ]
Evaluate rect.+wavecal. coefficients from arc image Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. bound_param : RefinedBoundaryModelParam instance Refined boundary model. lines_catalog : Numpy array 2D numpy array with the contents of the master file with the expected arc line wavelengths. args_nbrightlines : int TBD args_ymargin_bb : int TBD args_remove_sp_background : bool TBD args_times_sigma_threshold : float TBD args_order_fmap : int TBD args_sigma_gaussian_filtering : float TBD args_margin_npix : int TBD args_poldeg_initial : int TBD args_poldeg_refined : int TBD args_interactive : bool TBD args_threshold_wv : float TBD args_ylogscale : bool TBD args_pdf : TBD args_geometry : TBD debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration of the input arc image. reduced_55sp : HDUList object Image with 55 spectra corresponding to the median spectrum for each slitlet, employed to derived the wavelength calibration polynomial.
[ "Evaluate", "rect", ".", "+", "wavecal", ".", "coefficients", "from", "arc", "image" ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_from_arc_image.py#L60-L625
guaix-ucm/pyemir
emirdrp/processing/wcs.py
offsets_from_wcs
def offsets_from_wcs(frames, pixref): '''Compute offsets between frames using WCS information. :parameter frames: sequence of FITS filenames or file descriptors :parameter pixref: numpy array used as reference pixel The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a numpy array with the difference between the computed pixel value and the reference pixel. The first line of the array is [0, 0], being the offset from the first image to itself. ''' result = numpy.zeros((len(frames), pixref.shape[1])) with frames[0].open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) skyref = wcsh.wcs_pix2world(pixref, 1) for idx, frame in enumerate(frames[1:]): with frame.open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) pixval = wcsh.wcs_world2pix(skyref, 1) result[idx + 1] = -(pixval[0] - pixref[0]) return result
python
def offsets_from_wcs(frames, pixref): '''Compute offsets between frames using WCS information. :parameter frames: sequence of FITS filenames or file descriptors :parameter pixref: numpy array used as reference pixel The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a numpy array with the difference between the computed pixel value and the reference pixel. The first line of the array is [0, 0], being the offset from the first image to itself. ''' result = numpy.zeros((len(frames), pixref.shape[1])) with frames[0].open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) skyref = wcsh.wcs_pix2world(pixref, 1) for idx, frame in enumerate(frames[1:]): with frame.open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) pixval = wcsh.wcs_world2pix(skyref, 1) result[idx + 1] = -(pixval[0] - pixref[0]) return result
[ "def", "offsets_from_wcs", "(", "frames", ",", "pixref", ")", ":", "result", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "frames", ")", ",", "pixref", ".", "shape", "[", "1", "]", ")", ")", "with", "frames", "[", "0", "]", ".", "open", "(", ")", "as", "hdulist", ":", "wcsh", "=", "wcs", ".", "WCS", "(", "hdulist", "[", "0", "]", ".", "header", ")", "skyref", "=", "wcsh", ".", "wcs_pix2world", "(", "pixref", ",", "1", ")", "for", "idx", ",", "frame", "in", "enumerate", "(", "frames", "[", "1", ":", "]", ")", ":", "with", "frame", ".", "open", "(", ")", "as", "hdulist", ":", "wcsh", "=", "wcs", ".", "WCS", "(", "hdulist", "[", "0", "]", ".", "header", ")", "pixval", "=", "wcsh", ".", "wcs_world2pix", "(", "skyref", ",", "1", ")", "result", "[", "idx", "+", "1", "]", "=", "-", "(", "pixval", "[", "0", "]", "-", "pixref", "[", "0", "]", ")", "return", "result" ]
Compute offsets between frames using WCS information. :parameter frames: sequence of FITS filenames or file descriptors :parameter pixref: numpy array used as reference pixel The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a numpy array with the difference between the computed pixel value and the reference pixel. The first line of the array is [0, 0], being the offset from the first image to itself.
[ "Compute", "offsets", "between", "frames", "using", "WCS", "information", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wcs.py#L24-L54
guaix-ucm/pyemir
emirdrp/processing/wcs.py
reference_pix_from_wcs
def reference_pix_from_wcs(frames, pixref, origin=1): """Compute reference pixels between frames using WCS information. The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a list with the position of the reference pixel in each image """ result = [] with frames[0].open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) skyref = wcsh.wcs_pix2world([pixref], origin) result.append(pixref) for idx, frame in enumerate(frames[1:]): with frame.open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) pixval = wcsh.wcs_world2pix(skyref, origin) result.append(tuple(pixval[0])) return result
python
def reference_pix_from_wcs(frames, pixref, origin=1): """Compute reference pixels between frames using WCS information. The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a list with the position of the reference pixel in each image """ result = [] with frames[0].open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) skyref = wcsh.wcs_pix2world([pixref], origin) result.append(pixref) for idx, frame in enumerate(frames[1:]): with frame.open() as hdulist: wcsh = wcs.WCS(hdulist[0].header) pixval = wcsh.wcs_world2pix(skyref, origin) result.append(tuple(pixval[0])) return result
[ "def", "reference_pix_from_wcs", "(", "frames", ",", "pixref", ",", "origin", "=", "1", ")", ":", "result", "=", "[", "]", "with", "frames", "[", "0", "]", ".", "open", "(", ")", "as", "hdulist", ":", "wcsh", "=", "wcs", ".", "WCS", "(", "hdulist", "[", "0", "]", ".", "header", ")", "skyref", "=", "wcsh", ".", "wcs_pix2world", "(", "[", "pixref", "]", ",", "origin", ")", "result", ".", "append", "(", "pixref", ")", "for", "idx", ",", "frame", "in", "enumerate", "(", "frames", "[", "1", ":", "]", ")", ":", "with", "frame", ".", "open", "(", ")", "as", "hdulist", ":", "wcsh", "=", "wcs", ".", "WCS", "(", "hdulist", "[", "0", "]", ".", "header", ")", "pixval", "=", "wcsh", ".", "wcs_world2pix", "(", "skyref", ",", "origin", ")", "result", ".", "append", "(", "tuple", "(", "pixval", "[", "0", "]", ")", ")", "return", "result" ]
Compute reference pixels between frames using WCS information. The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a list with the position of the reference pixel in each image
[ "Compute", "reference", "pixels", "between", "frames", "using", "WCS", "information", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wcs.py#L73-L99
guaix-ucm/pyemir
emirdrp/processing/wcs.py
reference_pix_from_wcs_imgs
def reference_pix_from_wcs_imgs(imgs, pixref, origin=1): """Compute reference pixels between frames using WCS information. The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a list with the position of the reference pixel in each image """ result = [] refimg = imgs[0] wcsh = wcs.WCS(refimg[0].header) skyref = wcsh.wcs_pix2world([pixref], origin) result.append(pixref) for idx, img in enumerate(imgs[1:]): wcsh = wcs.WCS(img[0].header) pixval = wcsh.wcs_world2pix(skyref, origin) result.append(tuple(pixval[0])) return result
python
def reference_pix_from_wcs_imgs(imgs, pixref, origin=1): """Compute reference pixels between frames using WCS information. The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a list with the position of the reference pixel in each image """ result = [] refimg = imgs[0] wcsh = wcs.WCS(refimg[0].header) skyref = wcsh.wcs_pix2world([pixref], origin) result.append(pixref) for idx, img in enumerate(imgs[1:]): wcsh = wcs.WCS(img[0].header) pixval = wcsh.wcs_world2pix(skyref, origin) result.append(tuple(pixval[0])) return result
[ "def", "reference_pix_from_wcs_imgs", "(", "imgs", ",", "pixref", ",", "origin", "=", "1", ")", ":", "result", "=", "[", "]", "refimg", "=", "imgs", "[", "0", "]", "wcsh", "=", "wcs", ".", "WCS", "(", "refimg", "[", "0", "]", ".", "header", ")", "skyref", "=", "wcsh", ".", "wcs_pix2world", "(", "[", "pixref", "]", ",", "origin", ")", "result", ".", "append", "(", "pixref", ")", "for", "idx", ",", "img", "in", "enumerate", "(", "imgs", "[", "1", ":", "]", ")", ":", "wcsh", "=", "wcs", ".", "WCS", "(", "img", "[", "0", "]", ".", "header", ")", "pixval", "=", "wcsh", ".", "wcs_world2pix", "(", "skyref", ",", "origin", ")", "result", ".", "append", "(", "tuple", "(", "pixval", "[", "0", "]", ")", ")", "return", "result" ]
Compute reference pixels between frames using WCS information. The sky world coordinates are computed on *pixref* using the WCS of the first frame in the sequence. Then, the pixel coordinates of the reference sky world-coordinates are computed for the rest of the frames. The results is a list with the position of the reference pixel in each image
[ "Compute", "reference", "pixels", "between", "frames", "using", "WCS", "information", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wcs.py#L102-L128
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/session.py
Session.process_request
def process_request(self, request=None, cookie=None, **kwargs): """ Perform user logout :param request: :param cookie: :param kwargs: :return: """ _cntx = self.endpoint_context _sdb = _cntx.sdb if 'post_logout_redirect_uri' in request: if 'id_token_hint' not in request: raise InvalidRequest( "If post_logout_redirect_uri then id_token_hint is a MUST") _cookie_name = self.endpoint_context.cookie_name['session'] try: part = self.endpoint_context.cookie_dealer.get_cookie_value( cookie, cookie_name=_cookie_name) except IndexError: raise InvalidRequest('Cookie error') except KeyError: part = None if part: # value is a base64 encoded JSON document _cookie_info = json.loads(as_unicode(b64d(as_bytes(part[0])))) logger.debug('Cookie info: {}'.format(_cookie_info)) _sid = _cookie_info['sid'] else: logger.debug('No relevant cookie') _sid = '' _cookie_info = {} if 'id_token_hint' in request: logger.debug( 'ID token hint: {}'.format( request[verified_claim_name("id_token_hint")])) auds = request[verified_claim_name("id_token_hint")]['aud'] _ith_sid = '' _sids = _sdb.sso_db.get_sids_by_sub( request[verified_claim_name("id_token_hint")]['sub']) if _sids is None: raise ValueError('Unknown subject identifier') for _isid in _sids: if _sdb[_isid]['authn_req']['client_id'] in auds: _ith_sid = _isid break if not _ith_sid: raise ValueError('Unknown subject') if _sid: if _ith_sid != _sid: # someone's messing with me raise ValueError('Wrong ID Token hint') else: _sid = _ith_sid else: auds = [] try: session = _sdb[_sid] except KeyError: raise ValueError("Can't find any corresponding session") client_id = session['authn_req']['client_id'] # Does this match what's in the cookie ? if _cookie_info: if client_id != _cookie_info['client_id']: logger.warning( 'Client ID in authz request and in cookie does not match') raise ValueError("Wrong Client") if auds: if client_id not in auds: raise ValueError('Incorrect ID Token hint') _cinfo = _cntx.cdb[client_id] # verify that the post_logout_redirect_uri if present are among the ones # registered try: _uri = request['post_logout_redirect_uri'] except KeyError: if _cntx.issuer.endswith('/'): _uri = "{}{}".format(_cntx.issuer, self.kwargs['post_logout_uri_path']) else: _uri = "{}/{}".format(_cntx.issuer, self.kwargs['post_logout_uri_path']) plur = False else: plur = True verify_uri(_cntx, request, 'post_logout_redirect_uri', client_id=client_id) payload = { 'sid': _sid, 'client_id': client_id, 'user': session['authn_event']['uid'] } # redirect user to OP logout verification page if plur and 'state' in request: _uri = '{}?{}'.format(_uri, urlencode({'state': request['state']})) payload['state'] = request['state'] payload['redirect_uri'] = _uri logger.debug('JWS payload: {}'.format(payload)) # From me to me _jws = JWT(_cntx.keyjar, iss=_cntx.issuer, lifetime=86400, sign_alg=self.kwargs['signing_alg']) sjwt = _jws.pack(payload=payload, recv=_cntx.issuer) location = '{}?{}'.format(self.kwargs['logout_verify_url'], urlencode({'sjwt': sjwt})) return {'redirect_location': location}
python
def process_request(self, request=None, cookie=None, **kwargs): """ Perform user logout :param request: :param cookie: :param kwargs: :return: """ _cntx = self.endpoint_context _sdb = _cntx.sdb if 'post_logout_redirect_uri' in request: if 'id_token_hint' not in request: raise InvalidRequest( "If post_logout_redirect_uri then id_token_hint is a MUST") _cookie_name = self.endpoint_context.cookie_name['session'] try: part = self.endpoint_context.cookie_dealer.get_cookie_value( cookie, cookie_name=_cookie_name) except IndexError: raise InvalidRequest('Cookie error') except KeyError: part = None if part: # value is a base64 encoded JSON document _cookie_info = json.loads(as_unicode(b64d(as_bytes(part[0])))) logger.debug('Cookie info: {}'.format(_cookie_info)) _sid = _cookie_info['sid'] else: logger.debug('No relevant cookie') _sid = '' _cookie_info = {} if 'id_token_hint' in request: logger.debug( 'ID token hint: {}'.format( request[verified_claim_name("id_token_hint")])) auds = request[verified_claim_name("id_token_hint")]['aud'] _ith_sid = '' _sids = _sdb.sso_db.get_sids_by_sub( request[verified_claim_name("id_token_hint")]['sub']) if _sids is None: raise ValueError('Unknown subject identifier') for _isid in _sids: if _sdb[_isid]['authn_req']['client_id'] in auds: _ith_sid = _isid break if not _ith_sid: raise ValueError('Unknown subject') if _sid: if _ith_sid != _sid: # someone's messing with me raise ValueError('Wrong ID Token hint') else: _sid = _ith_sid else: auds = [] try: session = _sdb[_sid] except KeyError: raise ValueError("Can't find any corresponding session") client_id = session['authn_req']['client_id'] # Does this match what's in the cookie ? if _cookie_info: if client_id != _cookie_info['client_id']: logger.warning( 'Client ID in authz request and in cookie does not match') raise ValueError("Wrong Client") if auds: if client_id not in auds: raise ValueError('Incorrect ID Token hint') _cinfo = _cntx.cdb[client_id] # verify that the post_logout_redirect_uri if present are among the ones # registered try: _uri = request['post_logout_redirect_uri'] except KeyError: if _cntx.issuer.endswith('/'): _uri = "{}{}".format(_cntx.issuer, self.kwargs['post_logout_uri_path']) else: _uri = "{}/{}".format(_cntx.issuer, self.kwargs['post_logout_uri_path']) plur = False else: plur = True verify_uri(_cntx, request, 'post_logout_redirect_uri', client_id=client_id) payload = { 'sid': _sid, 'client_id': client_id, 'user': session['authn_event']['uid'] } # redirect user to OP logout verification page if plur and 'state' in request: _uri = '{}?{}'.format(_uri, urlencode({'state': request['state']})) payload['state'] = request['state'] payload['redirect_uri'] = _uri logger.debug('JWS payload: {}'.format(payload)) # From me to me _jws = JWT(_cntx.keyjar, iss=_cntx.issuer, lifetime=86400, sign_alg=self.kwargs['signing_alg']) sjwt = _jws.pack(payload=payload, recv=_cntx.issuer) location = '{}?{}'.format(self.kwargs['logout_verify_url'], urlencode({'sjwt': sjwt})) return {'redirect_location': location}
[ "def", "process_request", "(", "self", ",", "request", "=", "None", ",", "cookie", "=", "None", ",", "*", "*", "kwargs", ")", ":", "_cntx", "=", "self", ".", "endpoint_context", "_sdb", "=", "_cntx", ".", "sdb", "if", "'post_logout_redirect_uri'", "in", "request", ":", "if", "'id_token_hint'", "not", "in", "request", ":", "raise", "InvalidRequest", "(", "\"If post_logout_redirect_uri then id_token_hint is a MUST\"", ")", "_cookie_name", "=", "self", ".", "endpoint_context", ".", "cookie_name", "[", "'session'", "]", "try", ":", "part", "=", "self", ".", "endpoint_context", ".", "cookie_dealer", ".", "get_cookie_value", "(", "cookie", ",", "cookie_name", "=", "_cookie_name", ")", "except", "IndexError", ":", "raise", "InvalidRequest", "(", "'Cookie error'", ")", "except", "KeyError", ":", "part", "=", "None", "if", "part", ":", "# value is a base64 encoded JSON document", "_cookie_info", "=", "json", ".", "loads", "(", "as_unicode", "(", "b64d", "(", "as_bytes", "(", "part", "[", "0", "]", ")", ")", ")", ")", "logger", ".", "debug", "(", "'Cookie info: {}'", ".", "format", "(", "_cookie_info", ")", ")", "_sid", "=", "_cookie_info", "[", "'sid'", "]", "else", ":", "logger", ".", "debug", "(", "'No relevant cookie'", ")", "_sid", "=", "''", "_cookie_info", "=", "{", "}", "if", "'id_token_hint'", "in", "request", ":", "logger", ".", "debug", "(", "'ID token hint: {}'", ".", "format", "(", "request", "[", "verified_claim_name", "(", "\"id_token_hint\"", ")", "]", ")", ")", "auds", "=", "request", "[", "verified_claim_name", "(", "\"id_token_hint\"", ")", "]", "[", "'aud'", "]", "_ith_sid", "=", "''", "_sids", "=", "_sdb", ".", "sso_db", ".", "get_sids_by_sub", "(", "request", "[", "verified_claim_name", "(", "\"id_token_hint\"", ")", "]", "[", "'sub'", "]", ")", "if", "_sids", "is", "None", ":", "raise", "ValueError", "(", "'Unknown subject identifier'", ")", "for", "_isid", "in", "_sids", ":", "if", "_sdb", "[", "_isid", "]", "[", "'authn_req'", "]", "[", "'client_id'", "]", "in", "auds", ":", "_ith_sid", "=", "_isid", "break", "if", "not", "_ith_sid", ":", "raise", "ValueError", "(", "'Unknown subject'", ")", "if", "_sid", ":", "if", "_ith_sid", "!=", "_sid", ":", "# someone's messing with me", "raise", "ValueError", "(", "'Wrong ID Token hint'", ")", "else", ":", "_sid", "=", "_ith_sid", "else", ":", "auds", "=", "[", "]", "try", ":", "session", "=", "_sdb", "[", "_sid", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Can't find any corresponding session\"", ")", "client_id", "=", "session", "[", "'authn_req'", "]", "[", "'client_id'", "]", "# Does this match what's in the cookie ?", "if", "_cookie_info", ":", "if", "client_id", "!=", "_cookie_info", "[", "'client_id'", "]", ":", "logger", ".", "warning", "(", "'Client ID in authz request and in cookie does not match'", ")", "raise", "ValueError", "(", "\"Wrong Client\"", ")", "if", "auds", ":", "if", "client_id", "not", "in", "auds", ":", "raise", "ValueError", "(", "'Incorrect ID Token hint'", ")", "_cinfo", "=", "_cntx", ".", "cdb", "[", "client_id", "]", "# verify that the post_logout_redirect_uri if present are among the ones", "# registered", "try", ":", "_uri", "=", "request", "[", "'post_logout_redirect_uri'", "]", "except", "KeyError", ":", "if", "_cntx", ".", "issuer", ".", "endswith", "(", "'/'", ")", ":", "_uri", "=", "\"{}{}\"", ".", "format", "(", "_cntx", ".", "issuer", ",", "self", ".", "kwargs", "[", "'post_logout_uri_path'", "]", ")", "else", ":", "_uri", "=", "\"{}/{}\"", ".", "format", "(", "_cntx", ".", "issuer", ",", "self", ".", "kwargs", "[", "'post_logout_uri_path'", "]", ")", "plur", "=", "False", "else", ":", "plur", "=", "True", "verify_uri", "(", "_cntx", ",", "request", ",", "'post_logout_redirect_uri'", ",", "client_id", "=", "client_id", ")", "payload", "=", "{", "'sid'", ":", "_sid", ",", "'client_id'", ":", "client_id", ",", "'user'", ":", "session", "[", "'authn_event'", "]", "[", "'uid'", "]", "}", "# redirect user to OP logout verification page", "if", "plur", "and", "'state'", "in", "request", ":", "_uri", "=", "'{}?{}'", ".", "format", "(", "_uri", ",", "urlencode", "(", "{", "'state'", ":", "request", "[", "'state'", "]", "}", ")", ")", "payload", "[", "'state'", "]", "=", "request", "[", "'state'", "]", "payload", "[", "'redirect_uri'", "]", "=", "_uri", "logger", ".", "debug", "(", "'JWS payload: {}'", ".", "format", "(", "payload", ")", ")", "# From me to me", "_jws", "=", "JWT", "(", "_cntx", ".", "keyjar", ",", "iss", "=", "_cntx", ".", "issuer", ",", "lifetime", "=", "86400", ",", "sign_alg", "=", "self", ".", "kwargs", "[", "'signing_alg'", "]", ")", "sjwt", "=", "_jws", ".", "pack", "(", "payload", "=", "payload", ",", "recv", "=", "_cntx", ".", "issuer", ")", "location", "=", "'{}?{}'", ".", "format", "(", "self", ".", "kwargs", "[", "'logout_verify_url'", "]", ",", "urlencode", "(", "{", "'sjwt'", ":", "sjwt", "}", ")", ")", "return", "{", "'redirect_location'", ":", "location", "}" ]
Perform user logout :param request: :param cookie: :param kwargs: :return:
[ "Perform", "user", "logout" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/session.py#L193-L314
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_from_mos_library.py
rectwv_coeff_from_mos_library
def rectwv_coeff_from_mos_library(reduced_image, master_rectwv, ignore_dtu_configuration=True, debugplot=0): """Evaluate rect.+wavecal. coefficients from MOS library Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. master_rectwv : MasterRectWave instance Rectification and Wavelength Calibrartion Library product. Contains the library of polynomial coefficients necessary to generate an instance of RectWaveCoeff with the rectification and wavelength calibration coefficients for the particular CSU configuration. ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. """ logger = logging.getLogger(__name__) logger.info('Computing expected RectWaveCoeff from CSU configuration') # header header = reduced_image[0].header # read the CSU configuration from the image header csu_conf = CsuConfiguration.define_from_header(header) # read the DTU configuration from the image header dtu_conf = DtuConfiguration.define_from_header(header) # retrieve DTU configuration from MasterRectWave object dtu_conf_calib = DtuConfiguration.define_from_dictionary( master_rectwv.meta_info['dtu_configuration'] ) # check that the DTU configuration employed to obtain the calibration # corresponds to the DTU configuration in the input FITS file if dtu_conf != dtu_conf_calib: if ignore_dtu_configuration: logger.warning('DTU configuration differences found!') else: logger.info('DTU configuration from image header:') logger.info(dtu_conf) logger.info('DTU configuration from master calibration:') logger.info(dtu_conf_calib) raise ValueError("DTU configurations do not match!") else: logger.info('DTU configuration match!') # check grism and filter filter_name = header['filter'] logger.debug('Filter: ' + filter_name) if filter_name != master_rectwv.tags['filter']: raise ValueError('Filter name does not match!') grism_name = header['grism'] logger.debug('Grism: ' + grism_name) if grism_name != master_rectwv.tags['grism']: raise ValueError('Grism name does not match!') # valid slitlet numbers list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in master_rectwv.missing_slitlets: list_valid_islitlets.remove(idel) logger.debug('valid slitlet numbers: ' + str(list_valid_islitlets)) # initialize intermediate dictionary with relevant information # (note: this dictionary corresponds to an old structure employed to # store the information in a JSON file; this is no longer necessary, # but here we reuse that dictionary for convenience) outdict = {} outdict['instrument'] = 'EMIR' outdict['meta_info'] = {} outdict['meta_info']['creation_date'] = datetime.now().isoformat() outdict['meta_info']['description'] = \ 'computation of rectification and wavelength calibration polynomial ' \ 'coefficients for a particular CSU configuration from a MOS model ' outdict['meta_info']['recipe_name'] = 'undefined' outdict['meta_info']['origin'] = {} outdict['meta_info']['origin']['fits_frame_uuid'] = 'TBD' outdict['meta_info']['origin']['rect_wpoly_mos_uuid'] = \ master_rectwv.uuid outdict['meta_info']['origin']['fitted_boundary_param_uuid'] = \ master_rectwv.meta_info['origin']['bound_param'] outdict['tags'] = {} outdict['tags']['grism'] = grism_name outdict['tags']['filter'] = filter_name outdict['dtu_configuration'] = dtu_conf.outdict() outdict['uuid'] = str(uuid4()) outdict['contents'] = {} # compute rectification and wavelength calibration coefficients for each # slitlet according to its csu_bar_slit_center value for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # input data structure tmpdict = master_rectwv.contents[islitlet - 1] list_csu_bar_slit_center = tmpdict['list_csu_bar_slit_center'] # check extrapolations if csu_bar_slit_center < min(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('minimum tabulated value: ' + str(min(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) if csu_bar_slit_center > max(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('maximum tabulated value: ' + str(max(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) # rectification coefficients ttd_order = tmpdict['ttd_order'] ncoef = ncoef_fmap(ttd_order) outdict['contents'][cslitlet] = {} outdict['contents'][cslitlet]['ttd_order'] = ttd_order outdict['contents'][cslitlet]['ttd_order_longslit_model'] = None for keycoef in ['ttd_aij', 'ttd_bij', 'tti_aij', 'tti_bij']: coef_out = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_' + keycoef + '_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) coef_out.append(dum[0]) outdict['contents'][cslitlet][keycoef] = coef_out outdict['contents'][cslitlet][keycoef + '_longslit_model'] = None # wavelength calibration coefficients ncoef = tmpdict['wpoly_degree'] + 1 wpoly_coeff = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_wpoly_coeff_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) wpoly_coeff.append(dum[0]) outdict['contents'][cslitlet]['wpoly_coeff'] = wpoly_coeff outdict['contents'][cslitlet]['wpoly_coeff_longslit_model'] = None # update cdelt1_linear and crval1_linear wpoly_function = np.polynomial.Polynomial(wpoly_coeff) crmin1_linear = wpoly_function(1) crmax1_linear = wpoly_function(EMIR_NAXIS1) cdelt1_linear = (crmax1_linear - crmin1_linear) / (EMIR_NAXIS1 - 1) crval1_linear = crmin1_linear outdict['contents'][cslitlet]['crval1_linear'] = crval1_linear outdict['contents'][cslitlet]['cdelt1_linear'] = cdelt1_linear # update CSU keywords outdict['contents'][cslitlet]['csu_bar_left'] = \ csu_conf.csu_bar_left(islitlet) outdict['contents'][cslitlet]['csu_bar_right'] = \ csu_conf.csu_bar_right(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_center'] = \ csu_conf.csu_bar_slit_center(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_width'] = \ csu_conf.csu_bar_slit_width(islitlet) # for each slitlet compute spectrum trails and frontiers using the # fitted boundary parameters fitted_bound_param_json = { 'contents': master_rectwv.meta_info['refined_boundary_model'] } parmodel = fitted_bound_param_json['contents']['parmodel'] fitted_bound_param_json.update({'meta_info': {'parmodel': parmodel}}) params = bound_params_from_dict(fitted_bound_param_json) if abs(debugplot) >= 10: logger.debug('Fitted boundary parameters:') logger.debug(params.pretty_print()) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # compute and store x0_reference value x0_reference = float(EMIR_NAXIS1) / 2.0 + 0.5 outdict['contents'][cslitlet]['x0_reference'] = x0_reference # compute spectrum trails (lower, middle and upper) list_spectrails = expected_distorted_boundaries( islitlet, csu_bar_slit_center, [0, 0.5, 1], params, parmodel, numpts=101, deg=5, debugplot=0 ) # store spectrails in output JSON file outdict['contents'][cslitlet]['spectrail'] = {} for idum, cdum in zip(range(3), ['lower', 'middle', 'upper']): outdict['contents'][cslitlet]['spectrail']['poly_coef_' + cdum] = \ list_spectrails[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_reference_' + cdum] = \ list_spectrails[idum].poly_funct(x0_reference) # compute frontiers (lower, upper) list_frontiers = expected_distorted_frontiers( islitlet, csu_bar_slit_center, params, parmodel, numpts=101, deg=5, debugplot=0 ) # store frontiers in output JSON outdict['contents'][cslitlet]['frontier'] = {} for idum, cdum in zip(range(2), ['lower', 'upper']): outdict['contents'][cslitlet]['frontier']['poly_coef_' + cdum] = \ list_frontiers[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_frontier_' + cdum] = \ list_frontiers[idum].poly_funct(x0_reference) # store bounding box parameters for each slitlet xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # parameters already available in the input JSON file for par in ['bb_nc1_orig', 'bb_nc2_orig', 'ymargin_bb']: outdict['contents'][cslitlet][par] = \ master_rectwv.contents[islitlet - 1][par] # estimate bb_ns1_orig and bb_ns2_orig using the already computed # frontiers and the value of ymargin_bb, following the same approach # employed in Slitlet2dArc.__init__() poly_lower_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_lower'] ) poly_upper_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_upper'] ) ylower = poly_lower_frontier(xdum) yupper = poly_upper_frontier(xdum) ymargin_bb = master_rectwv.contents[islitlet - 1]['ymargin_bb'] bb_ns1_orig = int(ylower.min() + 0.5) - ymargin_bb if bb_ns1_orig < 1: bb_ns1_orig = 1 bb_ns2_orig = int(yupper.max() + 0.5) + ymargin_bb if bb_ns2_orig > EMIR_NAXIS2: bb_ns2_orig = EMIR_NAXIS2 outdict['contents'][cslitlet]['bb_ns1_orig'] = bb_ns1_orig outdict['contents'][cslitlet]['bb_ns2_orig'] = bb_ns2_orig # additional parameters (see Slitlet2dArc.__init__) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # define expected frontier ordinates at x0_reference for the rectified # image imposing the vertical length of the slitlet to be constant # and equal to EMIR_NPIXPERSLIT_RECTIFIED outdict['contents'][cslitlet]['y0_frontier_lower_expected'] = \ expected_y0_lower_frontier(islitlet) outdict['contents'][cslitlet]['y0_frontier_upper_expected'] = \ expected_y0_upper_frontier(islitlet) # compute linear transformation to place the rectified slitlet at # the center of the current slitlet bounding box tmpdict = outdict['contents'][cslitlet] xdum1 = tmpdict['y0_frontier_lower'] ydum1 = tmpdict['y0_frontier_lower_expected'] xdum2 = tmpdict['y0_frontier_upper'] ydum2 = tmpdict['y0_frontier_upper_expected'] corr_yrect_b = (ydum2 - ydum1) / (xdum2 - xdum1) corr_yrect_a = ydum1 - corr_yrect_b * xdum1 # compute expected location of rectified boundaries y0_reference_lower_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_lower'] y0_reference_middle_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_middle'] y0_reference_upper_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_upper'] # shift transformation to center the rectified slitlet within the # slitlet bounding box ydummid = (ydum1 + ydum2) / 2 ioffset = int( ydummid - (tmpdict['bb_ns1_orig'] + tmpdict['bb_ns2_orig']) / 2.0) corr_yrect_a -= ioffset # minimum and maximum row in the rectified slitlet encompassing # EMIR_NPIXPERSLIT_RECTIFIED pixels # a) scan number (in pixels, from 1 to NAXIS2) xdum1 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_lower'] xdum2 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_upper'] # b) row number (starting from zero) min_row_rectified = \ int((round(xdum1 * 10) + 5) / 10) - tmpdict['bb_ns1_orig'] max_row_rectified = \ int((round(xdum2 * 10) - 5) / 10) - tmpdict['bb_ns1_orig'] # save previous results in outdict outdict['contents'][cslitlet]['y0_reference_lower_expected'] = \ y0_reference_lower_expected outdict['contents'][cslitlet]['y0_reference_middle_expected'] = \ y0_reference_middle_expected outdict['contents'][cslitlet]['y0_reference_upper_expected'] = \ y0_reference_upper_expected outdict['contents'][cslitlet]['corr_yrect_a'] = corr_yrect_a outdict['contents'][cslitlet]['corr_yrect_b'] = corr_yrect_b outdict['contents'][cslitlet]['min_row_rectified'] = min_row_rectified outdict['contents'][cslitlet]['max_row_rectified'] = max_row_rectified # --- # Create object of type RectWaveCoeff with coefficients for # rectification and wavelength calibration rectwv_coeff = RectWaveCoeff(instrument='EMIR') rectwv_coeff.quality_control = numina.types.qc.QC.GOOD rectwv_coeff.tags['grism'] = grism_name rectwv_coeff.tags['filter'] = filter_name rectwv_coeff.meta_info['origin']['bound_param'] = \ master_rectwv.meta_info['origin']['bound_param'] rectwv_coeff.meta_info['origin']['master_rectwv'] = \ 'uuid' + master_rectwv.uuid rectwv_coeff.meta_info['dtu_configuration'] = outdict['dtu_configuration'] rectwv_coeff.total_slitlets = EMIR_NBARS for i in range(EMIR_NBARS): islitlet = i + 1 dumdict = {'islitlet': islitlet} cslitlet = 'slitlet' + str(islitlet).zfill(2) if cslitlet in outdict['contents']: dumdict.update(outdict['contents'][cslitlet]) else: dumdict.update({ 'csu_bar_left': csu_conf.csu_bar_left(islitlet), 'csu_bar_right': csu_conf.csu_bar_right(islitlet), 'csu_bar_slit_center': csu_conf.csu_bar_slit_center(islitlet), 'csu_bar_slit_width': csu_conf.csu_bar_slit_width(islitlet), 'x0_reference': float(EMIR_NAXIS1) / 2.0 + 0.5, 'y0_frontier_lower_expected': expected_y0_lower_frontier(islitlet), 'y0_frontier_upper_expected': expected_y0_upper_frontier(islitlet) }) rectwv_coeff.missing_slitlets.append(islitlet) rectwv_coeff.contents.append(dumdict) # debugging __getstate__ and __setstate__ # rectwv_coeff.writeto(args.out_rect_wpoly.name) # print('>>> Saving file ' + args.out_rect_wpoly.name) # check_setstate_getstate(rectwv_coeff, args.out_rect_wpoly.name) logger.info('Generating RectWaveCoeff object with uuid=' + rectwv_coeff.uuid) return rectwv_coeff
python
def rectwv_coeff_from_mos_library(reduced_image, master_rectwv, ignore_dtu_configuration=True, debugplot=0): """Evaluate rect.+wavecal. coefficients from MOS library Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. master_rectwv : MasterRectWave instance Rectification and Wavelength Calibrartion Library product. Contains the library of polynomial coefficients necessary to generate an instance of RectWaveCoeff with the rectification and wavelength calibration coefficients for the particular CSU configuration. ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. """ logger = logging.getLogger(__name__) logger.info('Computing expected RectWaveCoeff from CSU configuration') # header header = reduced_image[0].header # read the CSU configuration from the image header csu_conf = CsuConfiguration.define_from_header(header) # read the DTU configuration from the image header dtu_conf = DtuConfiguration.define_from_header(header) # retrieve DTU configuration from MasterRectWave object dtu_conf_calib = DtuConfiguration.define_from_dictionary( master_rectwv.meta_info['dtu_configuration'] ) # check that the DTU configuration employed to obtain the calibration # corresponds to the DTU configuration in the input FITS file if dtu_conf != dtu_conf_calib: if ignore_dtu_configuration: logger.warning('DTU configuration differences found!') else: logger.info('DTU configuration from image header:') logger.info(dtu_conf) logger.info('DTU configuration from master calibration:') logger.info(dtu_conf_calib) raise ValueError("DTU configurations do not match!") else: logger.info('DTU configuration match!') # check grism and filter filter_name = header['filter'] logger.debug('Filter: ' + filter_name) if filter_name != master_rectwv.tags['filter']: raise ValueError('Filter name does not match!') grism_name = header['grism'] logger.debug('Grism: ' + grism_name) if grism_name != master_rectwv.tags['grism']: raise ValueError('Grism name does not match!') # valid slitlet numbers list_valid_islitlets = list(range(1, EMIR_NBARS + 1)) for idel in master_rectwv.missing_slitlets: list_valid_islitlets.remove(idel) logger.debug('valid slitlet numbers: ' + str(list_valid_islitlets)) # initialize intermediate dictionary with relevant information # (note: this dictionary corresponds to an old structure employed to # store the information in a JSON file; this is no longer necessary, # but here we reuse that dictionary for convenience) outdict = {} outdict['instrument'] = 'EMIR' outdict['meta_info'] = {} outdict['meta_info']['creation_date'] = datetime.now().isoformat() outdict['meta_info']['description'] = \ 'computation of rectification and wavelength calibration polynomial ' \ 'coefficients for a particular CSU configuration from a MOS model ' outdict['meta_info']['recipe_name'] = 'undefined' outdict['meta_info']['origin'] = {} outdict['meta_info']['origin']['fits_frame_uuid'] = 'TBD' outdict['meta_info']['origin']['rect_wpoly_mos_uuid'] = \ master_rectwv.uuid outdict['meta_info']['origin']['fitted_boundary_param_uuid'] = \ master_rectwv.meta_info['origin']['bound_param'] outdict['tags'] = {} outdict['tags']['grism'] = grism_name outdict['tags']['filter'] = filter_name outdict['dtu_configuration'] = dtu_conf.outdict() outdict['uuid'] = str(uuid4()) outdict['contents'] = {} # compute rectification and wavelength calibration coefficients for each # slitlet according to its csu_bar_slit_center value for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # input data structure tmpdict = master_rectwv.contents[islitlet - 1] list_csu_bar_slit_center = tmpdict['list_csu_bar_slit_center'] # check extrapolations if csu_bar_slit_center < min(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('minimum tabulated value: ' + str(min(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) if csu_bar_slit_center > max(list_csu_bar_slit_center): logger.warning('extrapolating table with ' + cslitlet) logger.warning('maximum tabulated value: ' + str(max(list_csu_bar_slit_center))) logger.warning('sought value...........: ' + str(csu_bar_slit_center)) # rectification coefficients ttd_order = tmpdict['ttd_order'] ncoef = ncoef_fmap(ttd_order) outdict['contents'][cslitlet] = {} outdict['contents'][cslitlet]['ttd_order'] = ttd_order outdict['contents'][cslitlet]['ttd_order_longslit_model'] = None for keycoef in ['ttd_aij', 'ttd_bij', 'tti_aij', 'tti_bij']: coef_out = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_' + keycoef + '_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) coef_out.append(dum[0]) outdict['contents'][cslitlet][keycoef] = coef_out outdict['contents'][cslitlet][keycoef + '_longslit_model'] = None # wavelength calibration coefficients ncoef = tmpdict['wpoly_degree'] + 1 wpoly_coeff = [] for icoef in range(ncoef): ccoef = str(icoef).zfill(2) list_cij = tmpdict['list_wpoly_coeff_' + ccoef] funinterp_coef = interp1d(list_csu_bar_slit_center, list_cij, kind='linear', fill_value='extrapolate') # note: funinterp_coef expects a numpy array dum = funinterp_coef([csu_bar_slit_center]) wpoly_coeff.append(dum[0]) outdict['contents'][cslitlet]['wpoly_coeff'] = wpoly_coeff outdict['contents'][cslitlet]['wpoly_coeff_longslit_model'] = None # update cdelt1_linear and crval1_linear wpoly_function = np.polynomial.Polynomial(wpoly_coeff) crmin1_linear = wpoly_function(1) crmax1_linear = wpoly_function(EMIR_NAXIS1) cdelt1_linear = (crmax1_linear - crmin1_linear) / (EMIR_NAXIS1 - 1) crval1_linear = crmin1_linear outdict['contents'][cslitlet]['crval1_linear'] = crval1_linear outdict['contents'][cslitlet]['cdelt1_linear'] = cdelt1_linear # update CSU keywords outdict['contents'][cslitlet]['csu_bar_left'] = \ csu_conf.csu_bar_left(islitlet) outdict['contents'][cslitlet]['csu_bar_right'] = \ csu_conf.csu_bar_right(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_center'] = \ csu_conf.csu_bar_slit_center(islitlet) outdict['contents'][cslitlet]['csu_bar_slit_width'] = \ csu_conf.csu_bar_slit_width(islitlet) # for each slitlet compute spectrum trails and frontiers using the # fitted boundary parameters fitted_bound_param_json = { 'contents': master_rectwv.meta_info['refined_boundary_model'] } parmodel = fitted_bound_param_json['contents']['parmodel'] fitted_bound_param_json.update({'meta_info': {'parmodel': parmodel}}) params = bound_params_from_dict(fitted_bound_param_json) if abs(debugplot) >= 10: logger.debug('Fitted boundary parameters:') logger.debug(params.pretty_print()) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # csu_bar_slit_center of current slitlet in initial FITS image csu_bar_slit_center = csu_conf.csu_bar_slit_center(islitlet) # compute and store x0_reference value x0_reference = float(EMIR_NAXIS1) / 2.0 + 0.5 outdict['contents'][cslitlet]['x0_reference'] = x0_reference # compute spectrum trails (lower, middle and upper) list_spectrails = expected_distorted_boundaries( islitlet, csu_bar_slit_center, [0, 0.5, 1], params, parmodel, numpts=101, deg=5, debugplot=0 ) # store spectrails in output JSON file outdict['contents'][cslitlet]['spectrail'] = {} for idum, cdum in zip(range(3), ['lower', 'middle', 'upper']): outdict['contents'][cslitlet]['spectrail']['poly_coef_' + cdum] = \ list_spectrails[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_reference_' + cdum] = \ list_spectrails[idum].poly_funct(x0_reference) # compute frontiers (lower, upper) list_frontiers = expected_distorted_frontiers( islitlet, csu_bar_slit_center, params, parmodel, numpts=101, deg=5, debugplot=0 ) # store frontiers in output JSON outdict['contents'][cslitlet]['frontier'] = {} for idum, cdum in zip(range(2), ['lower', 'upper']): outdict['contents'][cslitlet]['frontier']['poly_coef_' + cdum] = \ list_frontiers[idum].poly_funct.coef.tolist() outdict['contents'][cslitlet]['y0_frontier_' + cdum] = \ list_frontiers[idum].poly_funct(x0_reference) # store bounding box parameters for each slitlet xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # parameters already available in the input JSON file for par in ['bb_nc1_orig', 'bb_nc2_orig', 'ymargin_bb']: outdict['contents'][cslitlet][par] = \ master_rectwv.contents[islitlet - 1][par] # estimate bb_ns1_orig and bb_ns2_orig using the already computed # frontiers and the value of ymargin_bb, following the same approach # employed in Slitlet2dArc.__init__() poly_lower_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_lower'] ) poly_upper_frontier = np.polynomial.Polynomial( outdict['contents'][cslitlet]['frontier']['poly_coef_upper'] ) ylower = poly_lower_frontier(xdum) yupper = poly_upper_frontier(xdum) ymargin_bb = master_rectwv.contents[islitlet - 1]['ymargin_bb'] bb_ns1_orig = int(ylower.min() + 0.5) - ymargin_bb if bb_ns1_orig < 1: bb_ns1_orig = 1 bb_ns2_orig = int(yupper.max() + 0.5) + ymargin_bb if bb_ns2_orig > EMIR_NAXIS2: bb_ns2_orig = EMIR_NAXIS2 outdict['contents'][cslitlet]['bb_ns1_orig'] = bb_ns1_orig outdict['contents'][cslitlet]['bb_ns2_orig'] = bb_ns2_orig # additional parameters (see Slitlet2dArc.__init__) for islitlet in list_valid_islitlets: cslitlet = 'slitlet' + str(islitlet).zfill(2) # define expected frontier ordinates at x0_reference for the rectified # image imposing the vertical length of the slitlet to be constant # and equal to EMIR_NPIXPERSLIT_RECTIFIED outdict['contents'][cslitlet]['y0_frontier_lower_expected'] = \ expected_y0_lower_frontier(islitlet) outdict['contents'][cslitlet]['y0_frontier_upper_expected'] = \ expected_y0_upper_frontier(islitlet) # compute linear transformation to place the rectified slitlet at # the center of the current slitlet bounding box tmpdict = outdict['contents'][cslitlet] xdum1 = tmpdict['y0_frontier_lower'] ydum1 = tmpdict['y0_frontier_lower_expected'] xdum2 = tmpdict['y0_frontier_upper'] ydum2 = tmpdict['y0_frontier_upper_expected'] corr_yrect_b = (ydum2 - ydum1) / (xdum2 - xdum1) corr_yrect_a = ydum1 - corr_yrect_b * xdum1 # compute expected location of rectified boundaries y0_reference_lower_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_lower'] y0_reference_middle_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_middle'] y0_reference_upper_expected = \ corr_yrect_a + corr_yrect_b * tmpdict['y0_reference_upper'] # shift transformation to center the rectified slitlet within the # slitlet bounding box ydummid = (ydum1 + ydum2) / 2 ioffset = int( ydummid - (tmpdict['bb_ns1_orig'] + tmpdict['bb_ns2_orig']) / 2.0) corr_yrect_a -= ioffset # minimum and maximum row in the rectified slitlet encompassing # EMIR_NPIXPERSLIT_RECTIFIED pixels # a) scan number (in pixels, from 1 to NAXIS2) xdum1 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_lower'] xdum2 = corr_yrect_a + \ corr_yrect_b * tmpdict['y0_frontier_upper'] # b) row number (starting from zero) min_row_rectified = \ int((round(xdum1 * 10) + 5) / 10) - tmpdict['bb_ns1_orig'] max_row_rectified = \ int((round(xdum2 * 10) - 5) / 10) - tmpdict['bb_ns1_orig'] # save previous results in outdict outdict['contents'][cslitlet]['y0_reference_lower_expected'] = \ y0_reference_lower_expected outdict['contents'][cslitlet]['y0_reference_middle_expected'] = \ y0_reference_middle_expected outdict['contents'][cslitlet]['y0_reference_upper_expected'] = \ y0_reference_upper_expected outdict['contents'][cslitlet]['corr_yrect_a'] = corr_yrect_a outdict['contents'][cslitlet]['corr_yrect_b'] = corr_yrect_b outdict['contents'][cslitlet]['min_row_rectified'] = min_row_rectified outdict['contents'][cslitlet]['max_row_rectified'] = max_row_rectified # --- # Create object of type RectWaveCoeff with coefficients for # rectification and wavelength calibration rectwv_coeff = RectWaveCoeff(instrument='EMIR') rectwv_coeff.quality_control = numina.types.qc.QC.GOOD rectwv_coeff.tags['grism'] = grism_name rectwv_coeff.tags['filter'] = filter_name rectwv_coeff.meta_info['origin']['bound_param'] = \ master_rectwv.meta_info['origin']['bound_param'] rectwv_coeff.meta_info['origin']['master_rectwv'] = \ 'uuid' + master_rectwv.uuid rectwv_coeff.meta_info['dtu_configuration'] = outdict['dtu_configuration'] rectwv_coeff.total_slitlets = EMIR_NBARS for i in range(EMIR_NBARS): islitlet = i + 1 dumdict = {'islitlet': islitlet} cslitlet = 'slitlet' + str(islitlet).zfill(2) if cslitlet in outdict['contents']: dumdict.update(outdict['contents'][cslitlet]) else: dumdict.update({ 'csu_bar_left': csu_conf.csu_bar_left(islitlet), 'csu_bar_right': csu_conf.csu_bar_right(islitlet), 'csu_bar_slit_center': csu_conf.csu_bar_slit_center(islitlet), 'csu_bar_slit_width': csu_conf.csu_bar_slit_width(islitlet), 'x0_reference': float(EMIR_NAXIS1) / 2.0 + 0.5, 'y0_frontier_lower_expected': expected_y0_lower_frontier(islitlet), 'y0_frontier_upper_expected': expected_y0_upper_frontier(islitlet) }) rectwv_coeff.missing_slitlets.append(islitlet) rectwv_coeff.contents.append(dumdict) # debugging __getstate__ and __setstate__ # rectwv_coeff.writeto(args.out_rect_wpoly.name) # print('>>> Saving file ' + args.out_rect_wpoly.name) # check_setstate_getstate(rectwv_coeff, args.out_rect_wpoly.name) logger.info('Generating RectWaveCoeff object with uuid=' + rectwv_coeff.uuid) return rectwv_coeff
[ "def", "rectwv_coeff_from_mos_library", "(", "reduced_image", ",", "master_rectwv", ",", "ignore_dtu_configuration", "=", "True", ",", "debugplot", "=", "0", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "info", "(", "'Computing expected RectWaveCoeff from CSU configuration'", ")", "# header", "header", "=", "reduced_image", "[", "0", "]", ".", "header", "# read the CSU configuration from the image header", "csu_conf", "=", "CsuConfiguration", ".", "define_from_header", "(", "header", ")", "# read the DTU configuration from the image header", "dtu_conf", "=", "DtuConfiguration", ".", "define_from_header", "(", "header", ")", "# retrieve DTU configuration from MasterRectWave object", "dtu_conf_calib", "=", "DtuConfiguration", ".", "define_from_dictionary", "(", "master_rectwv", ".", "meta_info", "[", "'dtu_configuration'", "]", ")", "# check that the DTU configuration employed to obtain the calibration", "# corresponds to the DTU configuration in the input FITS file", "if", "dtu_conf", "!=", "dtu_conf_calib", ":", "if", "ignore_dtu_configuration", ":", "logger", ".", "warning", "(", "'DTU configuration differences found!'", ")", "else", ":", "logger", ".", "info", "(", "'DTU configuration from image header:'", ")", "logger", ".", "info", "(", "dtu_conf", ")", "logger", ".", "info", "(", "'DTU configuration from master calibration:'", ")", "logger", ".", "info", "(", "dtu_conf_calib", ")", "raise", "ValueError", "(", "\"DTU configurations do not match!\"", ")", "else", ":", "logger", ".", "info", "(", "'DTU configuration match!'", ")", "# check grism and filter", "filter_name", "=", "header", "[", "'filter'", "]", "logger", ".", "debug", "(", "'Filter: '", "+", "filter_name", ")", "if", "filter_name", "!=", "master_rectwv", ".", "tags", "[", "'filter'", "]", ":", "raise", "ValueError", "(", "'Filter name does not match!'", ")", "grism_name", "=", "header", "[", "'grism'", "]", "logger", ".", "debug", "(", "'Grism: '", "+", "grism_name", ")", "if", "grism_name", "!=", "master_rectwv", ".", "tags", "[", "'grism'", "]", ":", "raise", "ValueError", "(", "'Grism name does not match!'", ")", "# valid slitlet numbers", "list_valid_islitlets", "=", "list", "(", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ")", "for", "idel", "in", "master_rectwv", ".", "missing_slitlets", ":", "list_valid_islitlets", ".", "remove", "(", "idel", ")", "logger", ".", "debug", "(", "'valid slitlet numbers: '", "+", "str", "(", "list_valid_islitlets", ")", ")", "# initialize intermediate dictionary with relevant information", "# (note: this dictionary corresponds to an old structure employed to", "# store the information in a JSON file; this is no longer necessary,", "# but here we reuse that dictionary for convenience)", "outdict", "=", "{", "}", "outdict", "[", "'instrument'", "]", "=", "'EMIR'", "outdict", "[", "'meta_info'", "]", "=", "{", "}", "outdict", "[", "'meta_info'", "]", "[", "'creation_date'", "]", "=", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "outdict", "[", "'meta_info'", "]", "[", "'description'", "]", "=", "'computation of rectification and wavelength calibration polynomial '", "'coefficients for a particular CSU configuration from a MOS model '", "outdict", "[", "'meta_info'", "]", "[", "'recipe_name'", "]", "=", "'undefined'", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "=", "{", "}", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "[", "'fits_frame_uuid'", "]", "=", "'TBD'", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "[", "'rect_wpoly_mos_uuid'", "]", "=", "master_rectwv", ".", "uuid", "outdict", "[", "'meta_info'", "]", "[", "'origin'", "]", "[", "'fitted_boundary_param_uuid'", "]", "=", "master_rectwv", ".", "meta_info", "[", "'origin'", "]", "[", "'bound_param'", "]", "outdict", "[", "'tags'", "]", "=", "{", "}", "outdict", "[", "'tags'", "]", "[", "'grism'", "]", "=", "grism_name", "outdict", "[", "'tags'", "]", "[", "'filter'", "]", "=", "filter_name", "outdict", "[", "'dtu_configuration'", "]", "=", "dtu_conf", ".", "outdict", "(", ")", "outdict", "[", "'uuid'", "]", "=", "str", "(", "uuid4", "(", ")", ")", "outdict", "[", "'contents'", "]", "=", "{", "}", "# compute rectification and wavelength calibration coefficients for each", "# slitlet according to its csu_bar_slit_center value", "for", "islitlet", "in", "list_valid_islitlets", ":", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "# csu_bar_slit_center of current slitlet in initial FITS image", "csu_bar_slit_center", "=", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", "# input data structure", "tmpdict", "=", "master_rectwv", ".", "contents", "[", "islitlet", "-", "1", "]", "list_csu_bar_slit_center", "=", "tmpdict", "[", "'list_csu_bar_slit_center'", "]", "# check extrapolations", "if", "csu_bar_slit_center", "<", "min", "(", "list_csu_bar_slit_center", ")", ":", "logger", ".", "warning", "(", "'extrapolating table with '", "+", "cslitlet", ")", "logger", ".", "warning", "(", "'minimum tabulated value: '", "+", "str", "(", "min", "(", "list_csu_bar_slit_center", ")", ")", ")", "logger", ".", "warning", "(", "'sought value...........: '", "+", "str", "(", "csu_bar_slit_center", ")", ")", "if", "csu_bar_slit_center", ">", "max", "(", "list_csu_bar_slit_center", ")", ":", "logger", ".", "warning", "(", "'extrapolating table with '", "+", "cslitlet", ")", "logger", ".", "warning", "(", "'maximum tabulated value: '", "+", "str", "(", "max", "(", "list_csu_bar_slit_center", ")", ")", ")", "logger", ".", "warning", "(", "'sought value...........: '", "+", "str", "(", "csu_bar_slit_center", ")", ")", "# rectification coefficients", "ttd_order", "=", "tmpdict", "[", "'ttd_order'", "]", "ncoef", "=", "ncoef_fmap", "(", "ttd_order", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "=", "{", "}", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'ttd_order'", "]", "=", "ttd_order", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'ttd_order_longslit_model'", "]", "=", "None", "for", "keycoef", "in", "[", "'ttd_aij'", ",", "'ttd_bij'", ",", "'tti_aij'", ",", "'tti_bij'", "]", ":", "coef_out", "=", "[", "]", "for", "icoef", "in", "range", "(", "ncoef", ")", ":", "ccoef", "=", "str", "(", "icoef", ")", ".", "zfill", "(", "2", ")", "list_cij", "=", "tmpdict", "[", "'list_'", "+", "keycoef", "+", "'_'", "+", "ccoef", "]", "funinterp_coef", "=", "interp1d", "(", "list_csu_bar_slit_center", ",", "list_cij", ",", "kind", "=", "'linear'", ",", "fill_value", "=", "'extrapolate'", ")", "# note: funinterp_coef expects a numpy array", "dum", "=", "funinterp_coef", "(", "[", "csu_bar_slit_center", "]", ")", "coef_out", ".", "append", "(", "dum", "[", "0", "]", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "keycoef", "]", "=", "coef_out", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "keycoef", "+", "'_longslit_model'", "]", "=", "None", "# wavelength calibration coefficients", "ncoef", "=", "tmpdict", "[", "'wpoly_degree'", "]", "+", "1", "wpoly_coeff", "=", "[", "]", "for", "icoef", "in", "range", "(", "ncoef", ")", ":", "ccoef", "=", "str", "(", "icoef", ")", ".", "zfill", "(", "2", ")", "list_cij", "=", "tmpdict", "[", "'list_wpoly_coeff_'", "+", "ccoef", "]", "funinterp_coef", "=", "interp1d", "(", "list_csu_bar_slit_center", ",", "list_cij", ",", "kind", "=", "'linear'", ",", "fill_value", "=", "'extrapolate'", ")", "# note: funinterp_coef expects a numpy array", "dum", "=", "funinterp_coef", "(", "[", "csu_bar_slit_center", "]", ")", "wpoly_coeff", ".", "append", "(", "dum", "[", "0", "]", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'wpoly_coeff'", "]", "=", "wpoly_coeff", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'wpoly_coeff_longslit_model'", "]", "=", "None", "# update cdelt1_linear and crval1_linear", "wpoly_function", "=", "np", ".", "polynomial", ".", "Polynomial", "(", "wpoly_coeff", ")", "crmin1_linear", "=", "wpoly_function", "(", "1", ")", "crmax1_linear", "=", "wpoly_function", "(", "EMIR_NAXIS1", ")", "cdelt1_linear", "=", "(", "crmax1_linear", "-", "crmin1_linear", ")", "/", "(", "EMIR_NAXIS1", "-", "1", ")", "crval1_linear", "=", "crmin1_linear", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'crval1_linear'", "]", "=", "crval1_linear", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'cdelt1_linear'", "]", "=", "cdelt1_linear", "# update CSU keywords", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'csu_bar_left'", "]", "=", "csu_conf", ".", "csu_bar_left", "(", "islitlet", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'csu_bar_right'", "]", "=", "csu_conf", ".", "csu_bar_right", "(", "islitlet", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'csu_bar_slit_center'", "]", "=", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'csu_bar_slit_width'", "]", "=", "csu_conf", ".", "csu_bar_slit_width", "(", "islitlet", ")", "# for each slitlet compute spectrum trails and frontiers using the", "# fitted boundary parameters", "fitted_bound_param_json", "=", "{", "'contents'", ":", "master_rectwv", ".", "meta_info", "[", "'refined_boundary_model'", "]", "}", "parmodel", "=", "fitted_bound_param_json", "[", "'contents'", "]", "[", "'parmodel'", "]", "fitted_bound_param_json", ".", "update", "(", "{", "'meta_info'", ":", "{", "'parmodel'", ":", "parmodel", "}", "}", ")", "params", "=", "bound_params_from_dict", "(", "fitted_bound_param_json", ")", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "logger", ".", "debug", "(", "'Fitted boundary parameters:'", ")", "logger", ".", "debug", "(", "params", ".", "pretty_print", "(", ")", ")", "for", "islitlet", "in", "list_valid_islitlets", ":", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "# csu_bar_slit_center of current slitlet in initial FITS image", "csu_bar_slit_center", "=", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", "# compute and store x0_reference value", "x0_reference", "=", "float", "(", "EMIR_NAXIS1", ")", "/", "2.0", "+", "0.5", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'x0_reference'", "]", "=", "x0_reference", "# compute spectrum trails (lower, middle and upper)", "list_spectrails", "=", "expected_distorted_boundaries", "(", "islitlet", ",", "csu_bar_slit_center", ",", "[", "0", ",", "0.5", ",", "1", "]", ",", "params", ",", "parmodel", ",", "numpts", "=", "101", ",", "deg", "=", "5", ",", "debugplot", "=", "0", ")", "# store spectrails in output JSON file", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'spectrail'", "]", "=", "{", "}", "for", "idum", ",", "cdum", "in", "zip", "(", "range", "(", "3", ")", ",", "[", "'lower'", ",", "'middle'", ",", "'upper'", "]", ")", ":", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'spectrail'", "]", "[", "'poly_coef_'", "+", "cdum", "]", "=", "list_spectrails", "[", "idum", "]", ".", "poly_funct", ".", "coef", ".", "tolist", "(", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_reference_'", "+", "cdum", "]", "=", "list_spectrails", "[", "idum", "]", ".", "poly_funct", "(", "x0_reference", ")", "# compute frontiers (lower, upper)", "list_frontiers", "=", "expected_distorted_frontiers", "(", "islitlet", ",", "csu_bar_slit_center", ",", "params", ",", "parmodel", ",", "numpts", "=", "101", ",", "deg", "=", "5", ",", "debugplot", "=", "0", ")", "# store frontiers in output JSON", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'frontier'", "]", "=", "{", "}", "for", "idum", ",", "cdum", "in", "zip", "(", "range", "(", "2", ")", ",", "[", "'lower'", ",", "'upper'", "]", ")", ":", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'frontier'", "]", "[", "'poly_coef_'", "+", "cdum", "]", "=", "list_frontiers", "[", "idum", "]", ".", "poly_funct", ".", "coef", ".", "tolist", "(", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_frontier_'", "+", "cdum", "]", "=", "list_frontiers", "[", "idum", "]", ".", "poly_funct", "(", "x0_reference", ")", "# store bounding box parameters for each slitlet", "xdum", "=", "np", ".", "linspace", "(", "1", ",", "EMIR_NAXIS1", ",", "num", "=", "EMIR_NAXIS1", ")", "for", "islitlet", "in", "list_valid_islitlets", ":", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "# parameters already available in the input JSON file", "for", "par", "in", "[", "'bb_nc1_orig'", ",", "'bb_nc2_orig'", ",", "'ymargin_bb'", "]", ":", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "par", "]", "=", "master_rectwv", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "par", "]", "# estimate bb_ns1_orig and bb_ns2_orig using the already computed", "# frontiers and the value of ymargin_bb, following the same approach", "# employed in Slitlet2dArc.__init__()", "poly_lower_frontier", "=", "np", ".", "polynomial", ".", "Polynomial", "(", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'frontier'", "]", "[", "'poly_coef_lower'", "]", ")", "poly_upper_frontier", "=", "np", ".", "polynomial", ".", "Polynomial", "(", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'frontier'", "]", "[", "'poly_coef_upper'", "]", ")", "ylower", "=", "poly_lower_frontier", "(", "xdum", ")", "yupper", "=", "poly_upper_frontier", "(", "xdum", ")", "ymargin_bb", "=", "master_rectwv", ".", "contents", "[", "islitlet", "-", "1", "]", "[", "'ymargin_bb'", "]", "bb_ns1_orig", "=", "int", "(", "ylower", ".", "min", "(", ")", "+", "0.5", ")", "-", "ymargin_bb", "if", "bb_ns1_orig", "<", "1", ":", "bb_ns1_orig", "=", "1", "bb_ns2_orig", "=", "int", "(", "yupper", ".", "max", "(", ")", "+", "0.5", ")", "+", "ymargin_bb", "if", "bb_ns2_orig", ">", "EMIR_NAXIS2", ":", "bb_ns2_orig", "=", "EMIR_NAXIS2", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'bb_ns1_orig'", "]", "=", "bb_ns1_orig", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'bb_ns2_orig'", "]", "=", "bb_ns2_orig", "# additional parameters (see Slitlet2dArc.__init__)", "for", "islitlet", "in", "list_valid_islitlets", ":", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "# define expected frontier ordinates at x0_reference for the rectified", "# image imposing the vertical length of the slitlet to be constant", "# and equal to EMIR_NPIXPERSLIT_RECTIFIED", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_frontier_lower_expected'", "]", "=", "expected_y0_lower_frontier", "(", "islitlet", ")", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_frontier_upper_expected'", "]", "=", "expected_y0_upper_frontier", "(", "islitlet", ")", "# compute linear transformation to place the rectified slitlet at", "# the center of the current slitlet bounding box", "tmpdict", "=", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "xdum1", "=", "tmpdict", "[", "'y0_frontier_lower'", "]", "ydum1", "=", "tmpdict", "[", "'y0_frontier_lower_expected'", "]", "xdum2", "=", "tmpdict", "[", "'y0_frontier_upper'", "]", "ydum2", "=", "tmpdict", "[", "'y0_frontier_upper_expected'", "]", "corr_yrect_b", "=", "(", "ydum2", "-", "ydum1", ")", "/", "(", "xdum2", "-", "xdum1", ")", "corr_yrect_a", "=", "ydum1", "-", "corr_yrect_b", "*", "xdum1", "# compute expected location of rectified boundaries", "y0_reference_lower_expected", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_reference_lower'", "]", "y0_reference_middle_expected", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_reference_middle'", "]", "y0_reference_upper_expected", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_reference_upper'", "]", "# shift transformation to center the rectified slitlet within the", "# slitlet bounding box", "ydummid", "=", "(", "ydum1", "+", "ydum2", ")", "/", "2", "ioffset", "=", "int", "(", "ydummid", "-", "(", "tmpdict", "[", "'bb_ns1_orig'", "]", "+", "tmpdict", "[", "'bb_ns2_orig'", "]", ")", "/", "2.0", ")", "corr_yrect_a", "-=", "ioffset", "# minimum and maximum row in the rectified slitlet encompassing", "# EMIR_NPIXPERSLIT_RECTIFIED pixels", "# a) scan number (in pixels, from 1 to NAXIS2)", "xdum1", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_frontier_lower'", "]", "xdum2", "=", "corr_yrect_a", "+", "corr_yrect_b", "*", "tmpdict", "[", "'y0_frontier_upper'", "]", "# b) row number (starting from zero)", "min_row_rectified", "=", "int", "(", "(", "round", "(", "xdum1", "*", "10", ")", "+", "5", ")", "/", "10", ")", "-", "tmpdict", "[", "'bb_ns1_orig'", "]", "max_row_rectified", "=", "int", "(", "(", "round", "(", "xdum2", "*", "10", ")", "-", "5", ")", "/", "10", ")", "-", "tmpdict", "[", "'bb_ns1_orig'", "]", "# save previous results in outdict", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_reference_lower_expected'", "]", "=", "y0_reference_lower_expected", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_reference_middle_expected'", "]", "=", "y0_reference_middle_expected", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'y0_reference_upper_expected'", "]", "=", "y0_reference_upper_expected", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'corr_yrect_a'", "]", "=", "corr_yrect_a", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'corr_yrect_b'", "]", "=", "corr_yrect_b", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'min_row_rectified'", "]", "=", "min_row_rectified", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", "[", "'max_row_rectified'", "]", "=", "max_row_rectified", "# ---", "# Create object of type RectWaveCoeff with coefficients for", "# rectification and wavelength calibration", "rectwv_coeff", "=", "RectWaveCoeff", "(", "instrument", "=", "'EMIR'", ")", "rectwv_coeff", ".", "quality_control", "=", "numina", ".", "types", ".", "qc", ".", "QC", ".", "GOOD", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", "=", "grism_name", "rectwv_coeff", ".", "tags", "[", "'filter'", "]", "=", "filter_name", "rectwv_coeff", ".", "meta_info", "[", "'origin'", "]", "[", "'bound_param'", "]", "=", "master_rectwv", ".", "meta_info", "[", "'origin'", "]", "[", "'bound_param'", "]", "rectwv_coeff", ".", "meta_info", "[", "'origin'", "]", "[", "'master_rectwv'", "]", "=", "'uuid'", "+", "master_rectwv", ".", "uuid", "rectwv_coeff", ".", "meta_info", "[", "'dtu_configuration'", "]", "=", "outdict", "[", "'dtu_configuration'", "]", "rectwv_coeff", ".", "total_slitlets", "=", "EMIR_NBARS", "for", "i", "in", "range", "(", "EMIR_NBARS", ")", ":", "islitlet", "=", "i", "+", "1", "dumdict", "=", "{", "'islitlet'", ":", "islitlet", "}", "cslitlet", "=", "'slitlet'", "+", "str", "(", "islitlet", ")", ".", "zfill", "(", "2", ")", "if", "cslitlet", "in", "outdict", "[", "'contents'", "]", ":", "dumdict", ".", "update", "(", "outdict", "[", "'contents'", "]", "[", "cslitlet", "]", ")", "else", ":", "dumdict", ".", "update", "(", "{", "'csu_bar_left'", ":", "csu_conf", ".", "csu_bar_left", "(", "islitlet", ")", ",", "'csu_bar_right'", ":", "csu_conf", ".", "csu_bar_right", "(", "islitlet", ")", ",", "'csu_bar_slit_center'", ":", "csu_conf", ".", "csu_bar_slit_center", "(", "islitlet", ")", ",", "'csu_bar_slit_width'", ":", "csu_conf", ".", "csu_bar_slit_width", "(", "islitlet", ")", ",", "'x0_reference'", ":", "float", "(", "EMIR_NAXIS1", ")", "/", "2.0", "+", "0.5", ",", "'y0_frontier_lower_expected'", ":", "expected_y0_lower_frontier", "(", "islitlet", ")", ",", "'y0_frontier_upper_expected'", ":", "expected_y0_upper_frontier", "(", "islitlet", ")", "}", ")", "rectwv_coeff", ".", "missing_slitlets", ".", "append", "(", "islitlet", ")", "rectwv_coeff", ".", "contents", ".", "append", "(", "dumdict", ")", "# debugging __getstate__ and __setstate__", "# rectwv_coeff.writeto(args.out_rect_wpoly.name)", "# print('>>> Saving file ' + args.out_rect_wpoly.name)", "# check_setstate_getstate(rectwv_coeff, args.out_rect_wpoly.name)", "logger", ".", "info", "(", "'Generating RectWaveCoeff object with uuid='", "+", "rectwv_coeff", ".", "uuid", ")", "return", "rectwv_coeff" ]
Evaluate rect.+wavecal. coefficients from MOS library Parameters ---------- reduced_image : HDUList object Image with preliminary basic reduction: bpm, bias, dark and flatfield. master_rectwv : MasterRectWave instance Rectification and Wavelength Calibrartion Library product. Contains the library of polynomial coefficients necessary to generate an instance of RectWaveCoeff with the rectification and wavelength calibration coefficients for the particular CSU configuration. ignore_dtu_configuration : bool If True, ignore differences in DTU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. Returns ------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration.
[ "Evaluate", "rect", ".", "+", "wavecal", ".", "coefficients", "from", "MOS", "library" ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_from_mos_library.py#L55-L410
guaix-ucm/pyemir
emirdrp/processing/wavecal/median_slitlets_rectified.py
median_slitlets_rectified
def median_slitlets_rectified( input_image, mode=0, minimum_slitlet_width_mm=EMIR_MINIMUM_SLITLET_WIDTH_MM, maximum_slitlet_width_mm=EMIR_MAXIMUM_SLITLET_WIDTH_MM, debugplot=0 ): """Compute median spectrum for each slitlet. Parameters ---------- input_image : HDUList object Input 2D image. mode : int Indicate desired result: 0 : image with the same size as the input image, with the median spectrum of each slitlet spanning all the spectra of the corresponding slitlet 1 : image with 55 spectra, containing the median spectra of each slitlet 2 : single collapsed median spectrum, using exclusively the useful slitlets from the input image minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- image_median : HDUList object Output image. """ image_header = input_image[0].header image2d = input_image[0].data # check image dimensions naxis2_expected = EMIR_NBARS * EMIR_NPIXPERSLIT_RECTIFIED naxis2, naxis1 = image2d.shape if naxis2 != naxis2_expected: raise ValueError("NAXIS2={0} should be {1}".format( naxis2, naxis2_expected )) # check that the FITS file has been obtained with EMIR instrument = image_header['instrume'] if instrument != 'EMIR': raise ValueError("INSTRUME keyword is not 'EMIR'!") # initialize output image if mode == 0: image2d_median = np.zeros((naxis2, naxis1)) else: image2d_median = np.zeros((EMIR_NBARS, naxis1)) # main loop for i in range(EMIR_NBARS): ns1 = i * EMIR_NPIXPERSLIT_RECTIFIED + 1 ns2 = ns1 + EMIR_NPIXPERSLIT_RECTIFIED - 1 sp_median = np.median(image2d[(ns1-1):ns2, :], axis=0) if mode == 0: image2d_median[(ns1-1):ns2, :] = np.tile( sp_median, (EMIR_NPIXPERSLIT_RECTIFIED, 1) ) else: image2d_median[i] = np.copy(sp_median) if mode == 2: # get CSU configuration from FITS header csu_config = CsuConfiguration.define_from_header(image_header) # define wavelength calibration parameters crpix1 = image_header['crpix1'] crval1 = image_header['crval1'] cdelt1 = image_header['cdelt1'] # segregate slitlets list_useful_slitlets = csu_config.widths_in_range_mm( minwidth=minimum_slitlet_width_mm, maxwidth=maximum_slitlet_width_mm ) list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1)) if i not in list_useful_slitlets] if abs(debugplot) != 0: print('>>> list_useful_slitlets....:', list_useful_slitlets) print('>>> list_not_useful_slitlets:', list_not_useful_slitlets) # define mask from array data mask2d, borders = define_mask_borders(image2d_median, sought_value=0) if abs(debugplot) % 10 != 0: ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # update mask with unused slitlets for islitlet in list_not_useful_slitlets: mask2d[islitlet - 1, :] = np.array([True] * naxis1) if abs(debugplot) % 10 != 0: ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # useful image pixels image2d_masked = image2d_median * (1 - mask2d.astype(int)) if abs(debugplot) % 10 != 0: ximshow(image2d_masked, crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # masked image image2d_masked = np.ma.masked_array(image2d_median, mask=mask2d) # median spectrum image1d_median = np.ma.median(image2d_masked, axis=0).data image_median = fits.PrimaryHDU(data=image1d_median, header=image_header) else: image_median = fits.PrimaryHDU(data=image2d_median, header=image_header) return fits.HDUList([image_median])
python
def median_slitlets_rectified( input_image, mode=0, minimum_slitlet_width_mm=EMIR_MINIMUM_SLITLET_WIDTH_MM, maximum_slitlet_width_mm=EMIR_MAXIMUM_SLITLET_WIDTH_MM, debugplot=0 ): """Compute median spectrum for each slitlet. Parameters ---------- input_image : HDUList object Input 2D image. mode : int Indicate desired result: 0 : image with the same size as the input image, with the median spectrum of each slitlet spanning all the spectra of the corresponding slitlet 1 : image with 55 spectra, containing the median spectra of each slitlet 2 : single collapsed median spectrum, using exclusively the useful slitlets from the input image minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- image_median : HDUList object Output image. """ image_header = input_image[0].header image2d = input_image[0].data # check image dimensions naxis2_expected = EMIR_NBARS * EMIR_NPIXPERSLIT_RECTIFIED naxis2, naxis1 = image2d.shape if naxis2 != naxis2_expected: raise ValueError("NAXIS2={0} should be {1}".format( naxis2, naxis2_expected )) # check that the FITS file has been obtained with EMIR instrument = image_header['instrume'] if instrument != 'EMIR': raise ValueError("INSTRUME keyword is not 'EMIR'!") # initialize output image if mode == 0: image2d_median = np.zeros((naxis2, naxis1)) else: image2d_median = np.zeros((EMIR_NBARS, naxis1)) # main loop for i in range(EMIR_NBARS): ns1 = i * EMIR_NPIXPERSLIT_RECTIFIED + 1 ns2 = ns1 + EMIR_NPIXPERSLIT_RECTIFIED - 1 sp_median = np.median(image2d[(ns1-1):ns2, :], axis=0) if mode == 0: image2d_median[(ns1-1):ns2, :] = np.tile( sp_median, (EMIR_NPIXPERSLIT_RECTIFIED, 1) ) else: image2d_median[i] = np.copy(sp_median) if mode == 2: # get CSU configuration from FITS header csu_config = CsuConfiguration.define_from_header(image_header) # define wavelength calibration parameters crpix1 = image_header['crpix1'] crval1 = image_header['crval1'] cdelt1 = image_header['cdelt1'] # segregate slitlets list_useful_slitlets = csu_config.widths_in_range_mm( minwidth=minimum_slitlet_width_mm, maxwidth=maximum_slitlet_width_mm ) list_not_useful_slitlets = [i for i in list(range(1, EMIR_NBARS + 1)) if i not in list_useful_slitlets] if abs(debugplot) != 0: print('>>> list_useful_slitlets....:', list_useful_slitlets) print('>>> list_not_useful_slitlets:', list_not_useful_slitlets) # define mask from array data mask2d, borders = define_mask_borders(image2d_median, sought_value=0) if abs(debugplot) % 10 != 0: ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # update mask with unused slitlets for islitlet in list_not_useful_slitlets: mask2d[islitlet - 1, :] = np.array([True] * naxis1) if abs(debugplot) % 10 != 0: ximshow(mask2d.astype(int), z1z2=(-.2, 1.2), crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # useful image pixels image2d_masked = image2d_median * (1 - mask2d.astype(int)) if abs(debugplot) % 10 != 0: ximshow(image2d_masked, crpix1=crpix1, crval1=crval1, cdelt1=cdelt1, debugplot=debugplot) # masked image image2d_masked = np.ma.masked_array(image2d_median, mask=mask2d) # median spectrum image1d_median = np.ma.median(image2d_masked, axis=0).data image_median = fits.PrimaryHDU(data=image1d_median, header=image_header) else: image_median = fits.PrimaryHDU(data=image2d_median, header=image_header) return fits.HDUList([image_median])
[ "def", "median_slitlets_rectified", "(", "input_image", ",", "mode", "=", "0", ",", "minimum_slitlet_width_mm", "=", "EMIR_MINIMUM_SLITLET_WIDTH_MM", ",", "maximum_slitlet_width_mm", "=", "EMIR_MAXIMUM_SLITLET_WIDTH_MM", ",", "debugplot", "=", "0", ")", ":", "image_header", "=", "input_image", "[", "0", "]", ".", "header", "image2d", "=", "input_image", "[", "0", "]", ".", "data", "# check image dimensions", "naxis2_expected", "=", "EMIR_NBARS", "*", "EMIR_NPIXPERSLIT_RECTIFIED", "naxis2", ",", "naxis1", "=", "image2d", ".", "shape", "if", "naxis2", "!=", "naxis2_expected", ":", "raise", "ValueError", "(", "\"NAXIS2={0} should be {1}\"", ".", "format", "(", "naxis2", ",", "naxis2_expected", ")", ")", "# check that the FITS file has been obtained with EMIR", "instrument", "=", "image_header", "[", "'instrume'", "]", "if", "instrument", "!=", "'EMIR'", ":", "raise", "ValueError", "(", "\"INSTRUME keyword is not 'EMIR'!\"", ")", "# initialize output image", "if", "mode", "==", "0", ":", "image2d_median", "=", "np", ".", "zeros", "(", "(", "naxis2", ",", "naxis1", ")", ")", "else", ":", "image2d_median", "=", "np", ".", "zeros", "(", "(", "EMIR_NBARS", ",", "naxis1", ")", ")", "# main loop", "for", "i", "in", "range", "(", "EMIR_NBARS", ")", ":", "ns1", "=", "i", "*", "EMIR_NPIXPERSLIT_RECTIFIED", "+", "1", "ns2", "=", "ns1", "+", "EMIR_NPIXPERSLIT_RECTIFIED", "-", "1", "sp_median", "=", "np", ".", "median", "(", "image2d", "[", "(", "ns1", "-", "1", ")", ":", "ns2", ",", ":", "]", ",", "axis", "=", "0", ")", "if", "mode", "==", "0", ":", "image2d_median", "[", "(", "ns1", "-", "1", ")", ":", "ns2", ",", ":", "]", "=", "np", ".", "tile", "(", "sp_median", ",", "(", "EMIR_NPIXPERSLIT_RECTIFIED", ",", "1", ")", ")", "else", ":", "image2d_median", "[", "i", "]", "=", "np", ".", "copy", "(", "sp_median", ")", "if", "mode", "==", "2", ":", "# get CSU configuration from FITS header", "csu_config", "=", "CsuConfiguration", ".", "define_from_header", "(", "image_header", ")", "# define wavelength calibration parameters", "crpix1", "=", "image_header", "[", "'crpix1'", "]", "crval1", "=", "image_header", "[", "'crval1'", "]", "cdelt1", "=", "image_header", "[", "'cdelt1'", "]", "# segregate slitlets", "list_useful_slitlets", "=", "csu_config", ".", "widths_in_range_mm", "(", "minwidth", "=", "minimum_slitlet_width_mm", ",", "maxwidth", "=", "maximum_slitlet_width_mm", ")", "list_not_useful_slitlets", "=", "[", "i", "for", "i", "in", "list", "(", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ")", "if", "i", "not", "in", "list_useful_slitlets", "]", "if", "abs", "(", "debugplot", ")", "!=", "0", ":", "print", "(", "'>>> list_useful_slitlets....:'", ",", "list_useful_slitlets", ")", "print", "(", "'>>> list_not_useful_slitlets:'", ",", "list_not_useful_slitlets", ")", "# define mask from array data", "mask2d", ",", "borders", "=", "define_mask_borders", "(", "image2d_median", ",", "sought_value", "=", "0", ")", "if", "abs", "(", "debugplot", ")", "%", "10", "!=", "0", ":", "ximshow", "(", "mask2d", ".", "astype", "(", "int", ")", ",", "z1z2", "=", "(", "-", ".2", ",", "1.2", ")", ",", "crpix1", "=", "crpix1", ",", "crval1", "=", "crval1", ",", "cdelt1", "=", "cdelt1", ",", "debugplot", "=", "debugplot", ")", "# update mask with unused slitlets", "for", "islitlet", "in", "list_not_useful_slitlets", ":", "mask2d", "[", "islitlet", "-", "1", ",", ":", "]", "=", "np", ".", "array", "(", "[", "True", "]", "*", "naxis1", ")", "if", "abs", "(", "debugplot", ")", "%", "10", "!=", "0", ":", "ximshow", "(", "mask2d", ".", "astype", "(", "int", ")", ",", "z1z2", "=", "(", "-", ".2", ",", "1.2", ")", ",", "crpix1", "=", "crpix1", ",", "crval1", "=", "crval1", ",", "cdelt1", "=", "cdelt1", ",", "debugplot", "=", "debugplot", ")", "# useful image pixels", "image2d_masked", "=", "image2d_median", "*", "(", "1", "-", "mask2d", ".", "astype", "(", "int", ")", ")", "if", "abs", "(", "debugplot", ")", "%", "10", "!=", "0", ":", "ximshow", "(", "image2d_masked", ",", "crpix1", "=", "crpix1", ",", "crval1", "=", "crval1", ",", "cdelt1", "=", "cdelt1", ",", "debugplot", "=", "debugplot", ")", "# masked image", "image2d_masked", "=", "np", ".", "ma", ".", "masked_array", "(", "image2d_median", ",", "mask", "=", "mask2d", ")", "# median spectrum", "image1d_median", "=", "np", ".", "ma", ".", "median", "(", "image2d_masked", ",", "axis", "=", "0", ")", ".", "data", "image_median", "=", "fits", ".", "PrimaryHDU", "(", "data", "=", "image1d_median", ",", "header", "=", "image_header", ")", "else", ":", "image_median", "=", "fits", ".", "PrimaryHDU", "(", "data", "=", "image2d_median", ",", "header", "=", "image_header", ")", "return", "fits", ".", "HDUList", "(", "[", "image_median", "]", ")" ]
Compute median spectrum for each slitlet. Parameters ---------- input_image : HDUList object Input 2D image. mode : int Indicate desired result: 0 : image with the same size as the input image, with the median spectrum of each slitlet spanning all the spectra of the corresponding slitlet 1 : image with 55 spectra, containing the median spectra of each slitlet 2 : single collapsed median spectrum, using exclusively the useful slitlets from the input image minimum_slitlet_width_mm : float Minimum slitlet width (mm) for a valid slitlet. maximum_slitlet_width_mm : float Maximum slitlet width (mm) for a valid slitlet. debugplot : int Determines whether intermediate computations and/or plots are displayed. The valid codes are defined in numina.array.display.pause_debugplot. Returns ------- image_median : HDUList object Output image.
[ "Compute", "median", "spectrum", "for", "each", "slitlet", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/median_slitlets_rectified.py#L42-L167
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/api.py
_check_deployed_nodes
def _check_deployed_nodes(nodes): """This is borrowed from execo.""" deployed = [] undeployed = [] cmd = "! (mount | grep -E '^/dev/[[:alpha:]]+2 on / ')" deployed_check = get_execo_remote( cmd, nodes, DEFAULT_CONN_PARAMS) for p in deployed_check.processes: p.nolog_exit_code = True p.nolog_timeout = True p.nolog_error = True p.timeout = 10 deployed_check.run() for p in deployed_check.processes: if p.ok: deployed.append(p.host.address) else: undeployed.append(p.host.address) return deployed, undeployed
python
def _check_deployed_nodes(nodes): """This is borrowed from execo.""" deployed = [] undeployed = [] cmd = "! (mount | grep -E '^/dev/[[:alpha:]]+2 on / ')" deployed_check = get_execo_remote( cmd, nodes, DEFAULT_CONN_PARAMS) for p in deployed_check.processes: p.nolog_exit_code = True p.nolog_timeout = True p.nolog_error = True p.timeout = 10 deployed_check.run() for p in deployed_check.processes: if p.ok: deployed.append(p.host.address) else: undeployed.append(p.host.address) return deployed, undeployed
[ "def", "_check_deployed_nodes", "(", "nodes", ")", ":", "deployed", "=", "[", "]", "undeployed", "=", "[", "]", "cmd", "=", "\"! (mount | grep -E '^/dev/[[:alpha:]]+2 on / ')\"", "deployed_check", "=", "get_execo_remote", "(", "cmd", ",", "nodes", ",", "DEFAULT_CONN_PARAMS", ")", "for", "p", "in", "deployed_check", ".", "processes", ":", "p", ".", "nolog_exit_code", "=", "True", "p", ".", "nolog_timeout", "=", "True", "p", ".", "nolog_error", "=", "True", "p", ".", "timeout", "=", "10", "deployed_check", ".", "run", "(", ")", "for", "p", "in", "deployed_check", ".", "processes", ":", "if", "p", ".", "ok", ":", "deployed", ".", "append", "(", "p", ".", "host", ".", "address", ")", "else", ":", "undeployed", ".", "append", "(", "p", ".", "host", ".", "address", ")", "return", "deployed", ",", "undeployed" ]
This is borrowed from execo.
[ "This", "is", "borrowed", "from", "execo", "." ]
train
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/api.py#L30-L54
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/api.py
Resources.get_networks
def get_networks(self): """Get the networks assoiated with the resource description. Returns list of tuple roles, network """ networks = self.c_resources["networks"] result = [] for net in networks: _c_network = net.get("_c_network") if _c_network is None: continue roles = utils.get_roles_as_list(net) result.append((roles, _c_network)) return result
python
def get_networks(self): """Get the networks assoiated with the resource description. Returns list of tuple roles, network """ networks = self.c_resources["networks"] result = [] for net in networks: _c_network = net.get("_c_network") if _c_network is None: continue roles = utils.get_roles_as_list(net) result.append((roles, _c_network)) return result
[ "def", "get_networks", "(", "self", ")", ":", "networks", "=", "self", ".", "c_resources", "[", "\"networks\"", "]", "result", "=", "[", "]", "for", "net", "in", "networks", ":", "_c_network", "=", "net", ".", "get", "(", "\"_c_network\"", ")", "if", "_c_network", "is", "None", ":", "continue", "roles", "=", "utils", ".", "get_roles_as_list", "(", "net", ")", "result", ".", "append", "(", "(", "roles", ",", "_c_network", ")", ")", "return", "result" ]
Get the networks assoiated with the resource description. Returns list of tuple roles, network
[ "Get", "the", "networks", "assoiated", "with", "the", "resource", "description", "." ]
train
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/api.py#L167-L181
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/api.py
Resources.get_roles
def get_roles(self): """Get the roles associated with the hosts. Returns dict of role -> [host] """ machines = self.c_resources["machines"] result = {} for desc in machines: roles = utils.get_roles_as_list(desc) hosts = self._denormalize(desc) for role in roles: result.setdefault(role, []) result[role].extend(hosts) return result
python
def get_roles(self): """Get the roles associated with the hosts. Returns dict of role -> [host] """ machines = self.c_resources["machines"] result = {} for desc in machines: roles = utils.get_roles_as_list(desc) hosts = self._denormalize(desc) for role in roles: result.setdefault(role, []) result[role].extend(hosts) return result
[ "def", "get_roles", "(", "self", ")", ":", "machines", "=", "self", ".", "c_resources", "[", "\"machines\"", "]", "result", "=", "{", "}", "for", "desc", "in", "machines", ":", "roles", "=", "utils", ".", "get_roles_as_list", "(", "desc", ")", "hosts", "=", "self", ".", "_denormalize", "(", "desc", ")", "for", "role", "in", "roles", ":", "result", ".", "setdefault", "(", "role", ",", "[", "]", ")", "result", "[", "role", "]", ".", "extend", "(", "hosts", ")", "return", "result" ]
Get the roles associated with the hosts. Returns dict of role -> [host]
[ "Get", "the", "roles", "associated", "with", "the", "hosts", "." ]
train
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/api.py#L183-L198
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket.start_client
async def start_client(self, sock: anyio.abc.SocketStream, addr, path: str, headers: Optional[List] = None, subprotocols: Optional[List[str]] = None): """Start a client WS connection on this socket. Returns: the AcceptConnection message. """ self._sock = sock self._connection = WSConnection(ConnectionType.CLIENT) if headers is None: headers = [] if subprotocols is None: subprotocols = [] data = self._connection.send( Request( host=addr[0], target=path, extra_headers=headers, subprotocols=subprotocols)) await self._sock.send_all(data) assert self._scope is None self._scope = True try: event = await self._next_event() if not isinstance(event, AcceptConnection): raise ConnectionError("Failed to establish a connection", event) return event finally: self._scope = None
python
async def start_client(self, sock: anyio.abc.SocketStream, addr, path: str, headers: Optional[List] = None, subprotocols: Optional[List[str]] = None): """Start a client WS connection on this socket. Returns: the AcceptConnection message. """ self._sock = sock self._connection = WSConnection(ConnectionType.CLIENT) if headers is None: headers = [] if subprotocols is None: subprotocols = [] data = self._connection.send( Request( host=addr[0], target=path, extra_headers=headers, subprotocols=subprotocols)) await self._sock.send_all(data) assert self._scope is None self._scope = True try: event = await self._next_event() if not isinstance(event, AcceptConnection): raise ConnectionError("Failed to establish a connection", event) return event finally: self._scope = None
[ "async", "def", "start_client", "(", "self", ",", "sock", ":", "anyio", ".", "abc", ".", "SocketStream", ",", "addr", ",", "path", ":", "str", ",", "headers", ":", "Optional", "[", "List", "]", "=", "None", ",", "subprotocols", ":", "Optional", "[", "List", "[", "str", "]", "]", "=", "None", ")", ":", "self", ".", "_sock", "=", "sock", "self", ".", "_connection", "=", "WSConnection", "(", "ConnectionType", ".", "CLIENT", ")", "if", "headers", "is", "None", ":", "headers", "=", "[", "]", "if", "subprotocols", "is", "None", ":", "subprotocols", "=", "[", "]", "data", "=", "self", ".", "_connection", ".", "send", "(", "Request", "(", "host", "=", "addr", "[", "0", "]", ",", "target", "=", "path", ",", "extra_headers", "=", "headers", ",", "subprotocols", "=", "subprotocols", ")", ")", "await", "self", ".", "_sock", ".", "send_all", "(", "data", ")", "assert", "self", ".", "_scope", "is", "None", "self", ".", "_scope", "=", "True", "try", ":", "event", "=", "await", "self", ".", "_next_event", "(", ")", "if", "not", "isinstance", "(", "event", ",", "AcceptConnection", ")", ":", "raise", "ConnectionError", "(", "\"Failed to establish a connection\"", ",", "event", ")", "return", "event", "finally", ":", "self", ".", "_scope", "=", "None" ]
Start a client WS connection on this socket. Returns: the AcceptConnection message.
[ "Start", "a", "client", "WS", "connection", "on", "this", "socket", "." ]
train
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L42-L75
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket.start_server
async def start_server(self, sock: anyio.abc.SocketStream, filter=None): # pylint: disable=W0622 """Start a server WS connection on this socket. Filter: an async callable that gets passed the initial Request. It may return an AcceptConnection message, a bool, or a string (the subprotocol to use). Returns: the Request message. """ assert self._scope is None self._scope = True self._sock = sock self._connection = WSConnection(ConnectionType.SERVER) try: event = await self._next_event() if not isinstance(event, Request): raise ConnectionError("Failed to establish a connection", event) msg = None if filter is not None: msg = await filter(event) if not msg: msg = RejectConnection() elif msg is True: msg = None elif isinstance(msg, str): msg = AcceptConnection(subprotocol=msg) if not msg: msg = AcceptConnection(subprotocol=event.subprotocols[0]) data = self._connection.send(msg) await self._sock.send_all(data) if not isinstance(msg, AcceptConnection): raise ConnectionError("Not accepted", msg) finally: self._scope = None
python
async def start_server(self, sock: anyio.abc.SocketStream, filter=None): # pylint: disable=W0622 """Start a server WS connection on this socket. Filter: an async callable that gets passed the initial Request. It may return an AcceptConnection message, a bool, or a string (the subprotocol to use). Returns: the Request message. """ assert self._scope is None self._scope = True self._sock = sock self._connection = WSConnection(ConnectionType.SERVER) try: event = await self._next_event() if not isinstance(event, Request): raise ConnectionError("Failed to establish a connection", event) msg = None if filter is not None: msg = await filter(event) if not msg: msg = RejectConnection() elif msg is True: msg = None elif isinstance(msg, str): msg = AcceptConnection(subprotocol=msg) if not msg: msg = AcceptConnection(subprotocol=event.subprotocols[0]) data = self._connection.send(msg) await self._sock.send_all(data) if not isinstance(msg, AcceptConnection): raise ConnectionError("Not accepted", msg) finally: self._scope = None
[ "async", "def", "start_server", "(", "self", ",", "sock", ":", "anyio", ".", "abc", ".", "SocketStream", ",", "filter", "=", "None", ")", ":", "# pylint: disable=W0622", "assert", "self", ".", "_scope", "is", "None", "self", ".", "_scope", "=", "True", "self", ".", "_sock", "=", "sock", "self", ".", "_connection", "=", "WSConnection", "(", "ConnectionType", ".", "SERVER", ")", "try", ":", "event", "=", "await", "self", ".", "_next_event", "(", ")", "if", "not", "isinstance", "(", "event", ",", "Request", ")", ":", "raise", "ConnectionError", "(", "\"Failed to establish a connection\"", ",", "event", ")", "msg", "=", "None", "if", "filter", "is", "not", "None", ":", "msg", "=", "await", "filter", "(", "event", ")", "if", "not", "msg", ":", "msg", "=", "RejectConnection", "(", ")", "elif", "msg", "is", "True", ":", "msg", "=", "None", "elif", "isinstance", "(", "msg", ",", "str", ")", ":", "msg", "=", "AcceptConnection", "(", "subprotocol", "=", "msg", ")", "if", "not", "msg", ":", "msg", "=", "AcceptConnection", "(", "subprotocol", "=", "event", ".", "subprotocols", "[", "0", "]", ")", "data", "=", "self", ".", "_connection", ".", "send", "(", "msg", ")", "await", "self", ".", "_sock", ".", "send_all", "(", "data", ")", "if", "not", "isinstance", "(", "msg", ",", "AcceptConnection", ")", ":", "raise", "ConnectionError", "(", "\"Not accepted\"", ",", "msg", ")", "finally", ":", "self", ".", "_scope", "=", "None" ]
Start a server WS connection on this socket. Filter: an async callable that gets passed the initial Request. It may return an AcceptConnection message, a bool, or a string (the subprotocol to use). Returns: the Request message.
[ "Start", "a", "server", "WS", "connection", "on", "this", "socket", "." ]
train
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L77-L111
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket._next_event
async def _next_event(self): """ Gets the next event. """ while True: for event in self._connection.events(): if isinstance(event, Message): # check if we need to buffer if event.message_finished: return self._wrap_data(self._gather_buffers(event)) self._buffer(event) break # exit for loop else: return event data = await self._sock.receive_some(4096) if not data: return CloseConnection(code=500, reason="Socket closed") self._connection.receive_data(data)
python
async def _next_event(self): """ Gets the next event. """ while True: for event in self._connection.events(): if isinstance(event, Message): # check if we need to buffer if event.message_finished: return self._wrap_data(self._gather_buffers(event)) self._buffer(event) break # exit for loop else: return event data = await self._sock.receive_some(4096) if not data: return CloseConnection(code=500, reason="Socket closed") self._connection.receive_data(data)
[ "async", "def", "_next_event", "(", "self", ")", ":", "while", "True", ":", "for", "event", "in", "self", ".", "_connection", ".", "events", "(", ")", ":", "if", "isinstance", "(", "event", ",", "Message", ")", ":", "# check if we need to buffer", "if", "event", ".", "message_finished", ":", "return", "self", ".", "_wrap_data", "(", "self", ".", "_gather_buffers", "(", "event", ")", ")", "self", ".", "_buffer", "(", "event", ")", "break", "# exit for loop", "else", ":", "return", "event", "data", "=", "await", "self", ".", "_sock", ".", "receive_some", "(", "4096", ")", "if", "not", "data", ":", "return", "CloseConnection", "(", "code", "=", "500", ",", "reason", "=", "\"Socket closed\"", ")", "self", ".", "_connection", ".", "receive_data", "(", "data", ")" ]
Gets the next event.
[ "Gets", "the", "next", "event", "." ]
train
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L113-L131
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket.close
async def close(self, code: int = 1006, reason: str = "Connection closed"): """ Closes the websocket. """ if self._closed: return self._closed = True if self._scope is not None: await self._scope.cancel() # cancel any outstanding listeners data = self._connection.send(CloseConnection(code=code, reason=reason)) await self._sock.send_all(data) # No, we don't wait for the correct reply await self._sock.close()
python
async def close(self, code: int = 1006, reason: str = "Connection closed"): """ Closes the websocket. """ if self._closed: return self._closed = True if self._scope is not None: await self._scope.cancel() # cancel any outstanding listeners data = self._connection.send(CloseConnection(code=code, reason=reason)) await self._sock.send_all(data) # No, we don't wait for the correct reply await self._sock.close()
[ "async", "def", "close", "(", "self", ",", "code", ":", "int", "=", "1006", ",", "reason", ":", "str", "=", "\"Connection closed\"", ")", ":", "if", "self", ".", "_closed", ":", "return", "self", ".", "_closed", "=", "True", "if", "self", ".", "_scope", "is", "not", "None", ":", "await", "self", ".", "_scope", ".", "cancel", "(", ")", "# cancel any outstanding listeners", "data", "=", "self", ".", "_connection", ".", "send", "(", "CloseConnection", "(", "code", "=", "code", ",", "reason", "=", "reason", ")", ")", "await", "self", ".", "_sock", ".", "send_all", "(", "data", ")", "# No, we don't wait for the correct reply", "await", "self", ".", "_sock", ".", "close", "(", ")" ]
Closes the websocket.
[ "Closes", "the", "websocket", "." ]
train
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L133-L150
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket.send
async def send(self, data: Union[bytes, str], final: bool = True): """ Sends some data down the connection. """ MsgType = TextMessage if isinstance(data, str) else BytesMessage data = MsgType(data=data, message_finished=final) data = self._connection.send(event=data) await self._sock.send_all(data)
python
async def send(self, data: Union[bytes, str], final: bool = True): """ Sends some data down the connection. """ MsgType = TextMessage if isinstance(data, str) else BytesMessage data = MsgType(data=data, message_finished=final) data = self._connection.send(event=data) await self._sock.send_all(data)
[ "async", "def", "send", "(", "self", ",", "data", ":", "Union", "[", "bytes", ",", "str", "]", ",", "final", ":", "bool", "=", "True", ")", ":", "MsgType", "=", "TextMessage", "if", "isinstance", "(", "data", ",", "str", ")", "else", "BytesMessage", "data", "=", "MsgType", "(", "data", "=", "data", ",", "message_finished", "=", "final", ")", "data", "=", "self", ".", "_connection", ".", "send", "(", "event", "=", "data", ")", "await", "self", ".", "_sock", ".", "send_all", "(", "data", ")" ]
Sends some data down the connection.
[ "Sends", "some", "data", "down", "the", "connection", "." ]
train
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L152-L159
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket._buffer
def _buffer(self, event: Message): """ Buffers an event, if applicable. """ if isinstance(event, BytesMessage): self._byte_buffer.write(event.data) elif isinstance(event, TextMessage): self._string_buffer.write(event.data)
python
def _buffer(self, event: Message): """ Buffers an event, if applicable. """ if isinstance(event, BytesMessage): self._byte_buffer.write(event.data) elif isinstance(event, TextMessage): self._string_buffer.write(event.data)
[ "def", "_buffer", "(", "self", ",", "event", ":", "Message", ")", ":", "if", "isinstance", "(", "event", ",", "BytesMessage", ")", ":", "self", ".", "_byte_buffer", ".", "write", "(", "event", ".", "data", ")", "elif", "isinstance", "(", "event", ",", "TextMessage", ")", ":", "self", ".", "_string_buffer", ".", "write", "(", "event", ".", "data", ")" ]
Buffers an event, if applicable.
[ "Buffers", "an", "event", "if", "applicable", "." ]
train
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L161-L168
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket._gather_buffers
def _gather_buffers(self, event: Message): """ Gathers all the data from a buffer. """ if isinstance(event, BytesMessage): buf = self._byte_buffer else: buf = self._string_buffer # yay for code shortening buf.write(event.data) buf.seek(0) data = buf.read() buf.seek(0) buf.truncate() return data
python
def _gather_buffers(self, event: Message): """ Gathers all the data from a buffer. """ if isinstance(event, BytesMessage): buf = self._byte_buffer else: buf = self._string_buffer # yay for code shortening buf.write(event.data) buf.seek(0) data = buf.read() buf.seek(0) buf.truncate() return data
[ "def", "_gather_buffers", "(", "self", ",", "event", ":", "Message", ")", ":", "if", "isinstance", "(", "event", ",", "BytesMessage", ")", ":", "buf", "=", "self", ".", "_byte_buffer", "else", ":", "buf", "=", "self", ".", "_string_buffer", "# yay for code shortening", "buf", ".", "write", "(", "event", ".", "data", ")", "buf", ".", "seek", "(", "0", ")", "data", "=", "buf", ".", "read", "(", ")", "buf", ".", "seek", "(", "0", ")", "buf", ".", "truncate", "(", ")", "return", "data" ]
Gathers all the data from a buffer.
[ "Gathers", "all", "the", "data", "from", "a", "buffer", "." ]
train
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L170-L185
Fuyukai/asyncwebsockets
asyncwebsockets/websocket.py
Websocket._wrap_data
def _wrap_data(data: Union[str, bytes]): """ Wraps data into the right event. """ MsgType = TextMessage if isinstance(data, str) else BytesMessage return MsgType(data=data, frame_finished=True, message_finished=True)
python
def _wrap_data(data: Union[str, bytes]): """ Wraps data into the right event. """ MsgType = TextMessage if isinstance(data, str) else BytesMessage return MsgType(data=data, frame_finished=True, message_finished=True)
[ "def", "_wrap_data", "(", "data", ":", "Union", "[", "str", ",", "bytes", "]", ")", ":", "MsgType", "=", "TextMessage", "if", "isinstance", "(", "data", ",", "str", ")", "else", "BytesMessage", "return", "MsgType", "(", "data", "=", "data", ",", "frame_finished", "=", "True", ",", "message_finished", "=", "True", ")" ]
Wraps data into the right event.
[ "Wraps", "data", "into", "the", "right", "event", "." ]
train
https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/websocket.py#L188-L193
Jaymon/prom
prom/interface/sqlite.py
SQLite.get_field_SQL
def get_field_SQL(self, field_name, field): """ returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL) """ field_type = "" is_pk = field.options.get('pk', False) if issubclass(field.type, bool): field_type = 'BOOLEAN' elif issubclass(field.type, long): if is_pk: field_type = 'INTEGER PRIMARY KEY' else: field_type = 'BIGINT' elif issubclass(field.type, int): field_type = 'INTEGER' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, basestring): fo = field.options if field.is_ref(): # TODO -- 7-8-17 - this isn't a great way to do this, ideally the Field instance # would combine all the options of both the current field and the # foreign key field and return all those when Field.options is called # (with the current field's options taking precedence) but there are # lots of circular dependency things that happen when one field is # trying to get the schema of another field and I don't have time # to sort it all out right now ref_s = field.schema fo = ref_s.pk.options if 'size' in fo: field_type = 'CHARACTER({})'.format(fo['size']) elif 'max_size' in fo: field_type = 'VARCHAR({})'.format(fo['max_size']) else: field_type = 'TEXT' if fo.get('ignore_case', False): field_type += ' COLLATE NOCASE' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, datetime.datetime): #field_type = 'DATETIME' field_type = 'TIMESTAMP' elif issubclass(field.type, datetime.date): field_type = 'DATE' elif issubclass(field.type, float): field_type = 'REAL' size = field.options.get('size', field.options.get('max_size', 0)) if size > 6: field_type = 'DOUBLE PRECISION' elif issubclass(field.type, decimal.Decimal): field_type = 'NUMERIC' elif issubclass(field.type, bytearray): field_type = 'BLOB' else: raise ValueError('unknown python type: {}'.format(field.type.__name__)) if field.required: field_type += ' NOT NULL' else: field_type += ' NULL' if not is_pk: if field.is_ref(): ref_s = field.schema if field.required: # strong ref, it deletes on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE'.format( ref_s, ref_s.pk.name ) else: # weak ref, it sets column to null on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL'.format( ref_s, ref_s.pk.name ) return '{} {}'.format(self._normalize_name(field_name), field_type)
python
def get_field_SQL(self, field_name, field): """ returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL) """ field_type = "" is_pk = field.options.get('pk', False) if issubclass(field.type, bool): field_type = 'BOOLEAN' elif issubclass(field.type, long): if is_pk: field_type = 'INTEGER PRIMARY KEY' else: field_type = 'BIGINT' elif issubclass(field.type, int): field_type = 'INTEGER' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, basestring): fo = field.options if field.is_ref(): # TODO -- 7-8-17 - this isn't a great way to do this, ideally the Field instance # would combine all the options of both the current field and the # foreign key field and return all those when Field.options is called # (with the current field's options taking precedence) but there are # lots of circular dependency things that happen when one field is # trying to get the schema of another field and I don't have time # to sort it all out right now ref_s = field.schema fo = ref_s.pk.options if 'size' in fo: field_type = 'CHARACTER({})'.format(fo['size']) elif 'max_size' in fo: field_type = 'VARCHAR({})'.format(fo['max_size']) else: field_type = 'TEXT' if fo.get('ignore_case', False): field_type += ' COLLATE NOCASE' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, datetime.datetime): #field_type = 'DATETIME' field_type = 'TIMESTAMP' elif issubclass(field.type, datetime.date): field_type = 'DATE' elif issubclass(field.type, float): field_type = 'REAL' size = field.options.get('size', field.options.get('max_size', 0)) if size > 6: field_type = 'DOUBLE PRECISION' elif issubclass(field.type, decimal.Decimal): field_type = 'NUMERIC' elif issubclass(field.type, bytearray): field_type = 'BLOB' else: raise ValueError('unknown python type: {}'.format(field.type.__name__)) if field.required: field_type += ' NOT NULL' else: field_type += ' NULL' if not is_pk: if field.is_ref(): ref_s = field.schema if field.required: # strong ref, it deletes on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE'.format( ref_s, ref_s.pk.name ) else: # weak ref, it sets column to null on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL'.format( ref_s, ref_s.pk.name ) return '{} {}'.format(self._normalize_name(field_name), field_type)
[ "def", "get_field_SQL", "(", "self", ",", "field_name", ",", "field", ")", ":", "field_type", "=", "\"\"", "is_pk", "=", "field", ".", "options", ".", "get", "(", "'pk'", ",", "False", ")", "if", "issubclass", "(", "field", ".", "type", ",", "bool", ")", ":", "field_type", "=", "'BOOLEAN'", "elif", "issubclass", "(", "field", ".", "type", ",", "long", ")", ":", "if", "is_pk", ":", "field_type", "=", "'INTEGER PRIMARY KEY'", "else", ":", "field_type", "=", "'BIGINT'", "elif", "issubclass", "(", "field", ".", "type", ",", "int", ")", ":", "field_type", "=", "'INTEGER'", "if", "is_pk", ":", "field_type", "+=", "' PRIMARY KEY'", "elif", "issubclass", "(", "field", ".", "type", ",", "basestring", ")", ":", "fo", "=", "field", ".", "options", "if", "field", ".", "is_ref", "(", ")", ":", "# TODO -- 7-8-17 - this isn't a great way to do this, ideally the Field instance", "# would combine all the options of both the current field and the", "# foreign key field and return all those when Field.options is called", "# (with the current field's options taking precedence) but there are", "# lots of circular dependency things that happen when one field is", "# trying to get the schema of another field and I don't have time", "# to sort it all out right now", "ref_s", "=", "field", ".", "schema", "fo", "=", "ref_s", ".", "pk", ".", "options", "if", "'size'", "in", "fo", ":", "field_type", "=", "'CHARACTER({})'", ".", "format", "(", "fo", "[", "'size'", "]", ")", "elif", "'max_size'", "in", "fo", ":", "field_type", "=", "'VARCHAR({})'", ".", "format", "(", "fo", "[", "'max_size'", "]", ")", "else", ":", "field_type", "=", "'TEXT'", "if", "fo", ".", "get", "(", "'ignore_case'", ",", "False", ")", ":", "field_type", "+=", "' COLLATE NOCASE'", "if", "is_pk", ":", "field_type", "+=", "' PRIMARY KEY'", "elif", "issubclass", "(", "field", ".", "type", ",", "datetime", ".", "datetime", ")", ":", "#field_type = 'DATETIME'", "field_type", "=", "'TIMESTAMP'", "elif", "issubclass", "(", "field", ".", "type", ",", "datetime", ".", "date", ")", ":", "field_type", "=", "'DATE'", "elif", "issubclass", "(", "field", ".", "type", ",", "float", ")", ":", "field_type", "=", "'REAL'", "size", "=", "field", ".", "options", ".", "get", "(", "'size'", ",", "field", ".", "options", ".", "get", "(", "'max_size'", ",", "0", ")", ")", "if", "size", ">", "6", ":", "field_type", "=", "'DOUBLE PRECISION'", "elif", "issubclass", "(", "field", ".", "type", ",", "decimal", ".", "Decimal", ")", ":", "field_type", "=", "'NUMERIC'", "elif", "issubclass", "(", "field", ".", "type", ",", "bytearray", ")", ":", "field_type", "=", "'BLOB'", "else", ":", "raise", "ValueError", "(", "'unknown python type: {}'", ".", "format", "(", "field", ".", "type", ".", "__name__", ")", ")", "if", "field", ".", "required", ":", "field_type", "+=", "' NOT NULL'", "else", ":", "field_type", "+=", "' NULL'", "if", "not", "is_pk", ":", "if", "field", ".", "is_ref", "(", ")", ":", "ref_s", "=", "field", ".", "schema", "if", "field", ".", "required", ":", "# strong ref, it deletes on fk row removal", "field_type", "+=", "' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE'", ".", "format", "(", "ref_s", ",", "ref_s", ".", "pk", ".", "name", ")", "else", ":", "# weak ref, it sets column to null on fk row removal", "field_type", "+=", "' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL'", ".", "format", "(", "ref_s", ",", "ref_s", ".", "pk", ".", "name", ")", "return", "'{} {}'", ".", "format", "(", "self", ".", "_normalize_name", "(", "field_name", ")", ",", "field_type", ")" ]
returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL)
[ "returns", "the", "SQL", "for", "a", "given", "field", "with", "full", "type", "information" ]
train
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L266-L362
Jaymon/prom
prom/interface/sqlite.py
SQLite._set_table
def _set_table(self, schema, **kwargs): """ http://sqlite.org/lang_createtable.html """ query_str = [] query_str.append("CREATE TABLE {} (".format(self._normalize_table_name(schema))) query_fields = [] for field_name, field in schema.fields.items(): query_fields.append(' {}'.format(self.get_field_SQL(field_name, field))) query_str.append(",{}".format(os.linesep).join(query_fields)) query_str.append(')') query_str = os.linesep.join(query_str) ret = self._query(query_str, ignore_result=True, **kwargs)
python
def _set_table(self, schema, **kwargs): """ http://sqlite.org/lang_createtable.html """ query_str = [] query_str.append("CREATE TABLE {} (".format(self._normalize_table_name(schema))) query_fields = [] for field_name, field in schema.fields.items(): query_fields.append(' {}'.format(self.get_field_SQL(field_name, field))) query_str.append(",{}".format(os.linesep).join(query_fields)) query_str.append(')') query_str = os.linesep.join(query_str) ret = self._query(query_str, ignore_result=True, **kwargs)
[ "def", "_set_table", "(", "self", ",", "schema", ",", "*", "*", "kwargs", ")", ":", "query_str", "=", "[", "]", "query_str", ".", "append", "(", "\"CREATE TABLE {} (\"", ".", "format", "(", "self", ".", "_normalize_table_name", "(", "schema", ")", ")", ")", "query_fields", "=", "[", "]", "for", "field_name", ",", "field", "in", "schema", ".", "fields", ".", "items", "(", ")", ":", "query_fields", ".", "append", "(", "' {}'", ".", "format", "(", "self", ".", "get_field_SQL", "(", "field_name", ",", "field", ")", ")", ")", "query_str", ".", "append", "(", "\",{}\"", ".", "format", "(", "os", ".", "linesep", ")", ".", "join", "(", "query_fields", ")", ")", "query_str", ".", "append", "(", "')'", ")", "query_str", "=", "os", ".", "linesep", ".", "join", "(", "query_str", ")", "ret", "=", "self", ".", "_query", "(", "query_str", ",", "ignore_result", "=", "True", ",", "*", "*", "kwargs", ")" ]
http://sqlite.org/lang_createtable.html
[ "http", ":", "//", "sqlite", ".", "org", "/", "lang_createtable", ".", "html" ]
train
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L364-L378
Jaymon/prom
prom/interface/sqlite.py
SQLite._set_index
def _set_index(self, schema, name, fields, **index_options): """ https://www.sqlite.org/lang_createindex.html """ query_str = "CREATE {}INDEX IF NOT EXISTS '{}_{}' ON {} ({})".format( 'UNIQUE ' if index_options.get('unique', False) else '', schema, name, self._normalize_table_name(schema), ', '.join((self._normalize_name(f) for f in fields)) ) return self._query(query_str, ignore_result=True, **index_options)
python
def _set_index(self, schema, name, fields, **index_options): """ https://www.sqlite.org/lang_createindex.html """ query_str = "CREATE {}INDEX IF NOT EXISTS '{}_{}' ON {} ({})".format( 'UNIQUE ' if index_options.get('unique', False) else '', schema, name, self._normalize_table_name(schema), ', '.join((self._normalize_name(f) for f in fields)) ) return self._query(query_str, ignore_result=True, **index_options)
[ "def", "_set_index", "(", "self", ",", "schema", ",", "name", ",", "fields", ",", "*", "*", "index_options", ")", ":", "query_str", "=", "\"CREATE {}INDEX IF NOT EXISTS '{}_{}' ON {} ({})\"", ".", "format", "(", "'UNIQUE '", "if", "index_options", ".", "get", "(", "'unique'", ",", "False", ")", "else", "''", ",", "schema", ",", "name", ",", "self", ".", "_normalize_table_name", "(", "schema", ")", ",", "', '", ".", "join", "(", "(", "self", ".", "_normalize_name", "(", "f", ")", "for", "f", "in", "fields", ")", ")", ")", "return", "self", ".", "_query", "(", "query_str", ",", "ignore_result", "=", "True", ",", "*", "*", "index_options", ")" ]
https://www.sqlite.org/lang_createindex.html
[ "https", ":", "//", "www", ".", "sqlite", ".", "org", "/", "lang_createindex", ".", "html" ]
train
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L380-L392
Jaymon/prom
prom/interface/sqlite.py
SQLite._get_indexes
def _get_indexes(self, schema, **kwargs): """return all the indexes for the given schema""" # http://www.sqlite.org/pragma.html#schema # http://www.mail-archive.com/sqlite-users@sqlite.org/msg22055.html # http://stackoverflow.com/questions/604939/ ret = {} rs = self._query('PRAGMA index_list({})'.format(self._normalize_table_name(schema)), **kwargs) if rs: for r in rs: iname = r['name'] ret.setdefault(iname, []) indexes = self._query('PRAGMA index_info({})'.format(r['name']), **kwargs) for idict in indexes: ret[iname].append(idict['name']) return ret
python
def _get_indexes(self, schema, **kwargs): """return all the indexes for the given schema""" # http://www.sqlite.org/pragma.html#schema # http://www.mail-archive.com/sqlite-users@sqlite.org/msg22055.html # http://stackoverflow.com/questions/604939/ ret = {} rs = self._query('PRAGMA index_list({})'.format(self._normalize_table_name(schema)), **kwargs) if rs: for r in rs: iname = r['name'] ret.setdefault(iname, []) indexes = self._query('PRAGMA index_info({})'.format(r['name']), **kwargs) for idict in indexes: ret[iname].append(idict['name']) return ret
[ "def", "_get_indexes", "(", "self", ",", "schema", ",", "*", "*", "kwargs", ")", ":", "# http://www.sqlite.org/pragma.html#schema", "# http://www.mail-archive.com/sqlite-users@sqlite.org/msg22055.html", "# http://stackoverflow.com/questions/604939/", "ret", "=", "{", "}", "rs", "=", "self", ".", "_query", "(", "'PRAGMA index_list({})'", ".", "format", "(", "self", ".", "_normalize_table_name", "(", "schema", ")", ")", ",", "*", "*", "kwargs", ")", "if", "rs", ":", "for", "r", "in", "rs", ":", "iname", "=", "r", "[", "'name'", "]", "ret", ".", "setdefault", "(", "iname", ",", "[", "]", ")", "indexes", "=", "self", ".", "_query", "(", "'PRAGMA index_info({})'", ".", "format", "(", "r", "[", "'name'", "]", ")", ",", "*", "*", "kwargs", ")", "for", "idict", "in", "indexes", ":", "ret", "[", "iname", "]", ".", "append", "(", "idict", "[", "'name'", "]", ")", "return", "ret" ]
return all the indexes for the given schema
[ "return", "all", "the", "indexes", "for", "the", "given", "schema" ]
train
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L394-L409
Jaymon/prom
prom/interface/sqlite.py
SQLite._get_fields
def _get_fields(self, table_name, **kwargs): """return all the fields for the given table""" ret = {} query_str = 'PRAGMA table_info({})'.format(self._normalize_table_name(table_name)) fields = self._query(query_str, **kwargs) #pout.v([dict(d) for d in fields]) query_str = 'PRAGMA foreign_key_list({})'.format(self._normalize_table_name(table_name)) fks = {f["from"]: f for f in self._query(query_str, **kwargs)} #pout.v([dict(d) for d in fks.values()]) pg_types = { "INTEGER": int, "BIGINT": long, "DOUBLE PRECISION": float, "FLOAT": float, "REAL": float, "NUMERIC": decimal.Decimal, "BOOLEAN": bool, "DATE": datetime.date, "TIMESTAMP": datetime.datetime, "CHARACTER": str, "VARCHAR": str, "TEXT": str, "BLOB": bytearray, } # the rows we can set: field_type, name, field_required, min_size, max_size, # size, unique, pk, <foreign key info> # These keys will roughly correspond with schema.Field # TODO -- we could actually use "type" to get the size because SQLite returns # a value like VARCHAR[32] for row in fields: field = { "name": row["name"], "field_required": bool(row["notnull"]) or bool(row["pk"]), "pk": bool(row["pk"]), } for tname, ty in pg_types.items(): if row["type"].startswith(tname): field["field_type"] = ty break if field["pk"] and field["field_type"] is int: # we compensate for SQLite internally setting pk to int field["field_type"] = long if row["name"] in fks: field["schema_table_name"] = fks[row["name"]]["table"] field["ref_table_name"] = fks[row["name"]]["table"] ret[field["name"]] = field return ret
python
def _get_fields(self, table_name, **kwargs): """return all the fields for the given table""" ret = {} query_str = 'PRAGMA table_info({})'.format(self._normalize_table_name(table_name)) fields = self._query(query_str, **kwargs) #pout.v([dict(d) for d in fields]) query_str = 'PRAGMA foreign_key_list({})'.format(self._normalize_table_name(table_name)) fks = {f["from"]: f for f in self._query(query_str, **kwargs)} #pout.v([dict(d) for d in fks.values()]) pg_types = { "INTEGER": int, "BIGINT": long, "DOUBLE PRECISION": float, "FLOAT": float, "REAL": float, "NUMERIC": decimal.Decimal, "BOOLEAN": bool, "DATE": datetime.date, "TIMESTAMP": datetime.datetime, "CHARACTER": str, "VARCHAR": str, "TEXT": str, "BLOB": bytearray, } # the rows we can set: field_type, name, field_required, min_size, max_size, # size, unique, pk, <foreign key info> # These keys will roughly correspond with schema.Field # TODO -- we could actually use "type" to get the size because SQLite returns # a value like VARCHAR[32] for row in fields: field = { "name": row["name"], "field_required": bool(row["notnull"]) or bool(row["pk"]), "pk": bool(row["pk"]), } for tname, ty in pg_types.items(): if row["type"].startswith(tname): field["field_type"] = ty break if field["pk"] and field["field_type"] is int: # we compensate for SQLite internally setting pk to int field["field_type"] = long if row["name"] in fks: field["schema_table_name"] = fks[row["name"]]["table"] field["ref_table_name"] = fks[row["name"]]["table"] ret[field["name"]] = field return ret
[ "def", "_get_fields", "(", "self", ",", "table_name", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "}", "query_str", "=", "'PRAGMA table_info({})'", ".", "format", "(", "self", ".", "_normalize_table_name", "(", "table_name", ")", ")", "fields", "=", "self", ".", "_query", "(", "query_str", ",", "*", "*", "kwargs", ")", "#pout.v([dict(d) for d in fields])", "query_str", "=", "'PRAGMA foreign_key_list({})'", ".", "format", "(", "self", ".", "_normalize_table_name", "(", "table_name", ")", ")", "fks", "=", "{", "f", "[", "\"from\"", "]", ":", "f", "for", "f", "in", "self", ".", "_query", "(", "query_str", ",", "*", "*", "kwargs", ")", "}", "#pout.v([dict(d) for d in fks.values()])", "pg_types", "=", "{", "\"INTEGER\"", ":", "int", ",", "\"BIGINT\"", ":", "long", ",", "\"DOUBLE PRECISION\"", ":", "float", ",", "\"FLOAT\"", ":", "float", ",", "\"REAL\"", ":", "float", ",", "\"NUMERIC\"", ":", "decimal", ".", "Decimal", ",", "\"BOOLEAN\"", ":", "bool", ",", "\"DATE\"", ":", "datetime", ".", "date", ",", "\"TIMESTAMP\"", ":", "datetime", ".", "datetime", ",", "\"CHARACTER\"", ":", "str", ",", "\"VARCHAR\"", ":", "str", ",", "\"TEXT\"", ":", "str", ",", "\"BLOB\"", ":", "bytearray", ",", "}", "# the rows we can set: field_type, name, field_required, min_size, max_size,", "# size, unique, pk, <foreign key info>", "# These keys will roughly correspond with schema.Field", "# TODO -- we could actually use \"type\" to get the size because SQLite returns", "# a value like VARCHAR[32]", "for", "row", "in", "fields", ":", "field", "=", "{", "\"name\"", ":", "row", "[", "\"name\"", "]", ",", "\"field_required\"", ":", "bool", "(", "row", "[", "\"notnull\"", "]", ")", "or", "bool", "(", "row", "[", "\"pk\"", "]", ")", ",", "\"pk\"", ":", "bool", "(", "row", "[", "\"pk\"", "]", ")", ",", "}", "for", "tname", ",", "ty", "in", "pg_types", ".", "items", "(", ")", ":", "if", "row", "[", "\"type\"", "]", ".", "startswith", "(", "tname", ")", ":", "field", "[", "\"field_type\"", "]", "=", "ty", "break", "if", "field", "[", "\"pk\"", "]", "and", "field", "[", "\"field_type\"", "]", "is", "int", ":", "# we compensate for SQLite internally setting pk to int", "field", "[", "\"field_type\"", "]", "=", "long", "if", "row", "[", "\"name\"", "]", "in", "fks", ":", "field", "[", "\"schema_table_name\"", "]", "=", "fks", "[", "row", "[", "\"name\"", "]", "]", "[", "\"table\"", "]", "field", "[", "\"ref_table_name\"", "]", "=", "fks", "[", "row", "[", "\"name\"", "]", "]", "[", "\"table\"", "]", "ret", "[", "field", "[", "\"name\"", "]", "]", "=", "field", "return", "ret" ]
return all the fields for the given table
[ "return", "all", "the", "fields", "for", "the", "given", "table" ]
train
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L473-L527
Jaymon/prom
prom/interface/sqlite.py
SQLite._normalize_date_SQL
def _normalize_date_SQL(self, field_name, field_kwargs, symbol): """ allow extracting information from date http://www.sqlite.org/lang_datefunc.html """ fstrs = [] k_opts = { 'day': "CAST(strftime('%d', {}) AS integer)", 'hour': "CAST(strftime('%H', {}) AS integer)", 'doy': "CAST(strftime('%j', {}) AS integer)", # day of year 'julian_day': "strftime('%J', {})", # YYYY-MM-DD 'month': "CAST(strftime('%m', {}) AS integer)", 'minute': "CAST(strftime('%M', {}) AS integer)", 'dow': "CAST(strftime('%w', {}) AS integer)", # day of week 0 = sunday 'week': "CAST(strftime('%W', {}) AS integer)", 'year': "CAST(strftime('%Y', {}) AS integer)" } for k, v in field_kwargs.items(): fstrs.append([k_opts[k].format(self._normalize_name(field_name)), self.val_placeholder, v]) return fstrs
python
def _normalize_date_SQL(self, field_name, field_kwargs, symbol): """ allow extracting information from date http://www.sqlite.org/lang_datefunc.html """ fstrs = [] k_opts = { 'day': "CAST(strftime('%d', {}) AS integer)", 'hour': "CAST(strftime('%H', {}) AS integer)", 'doy': "CAST(strftime('%j', {}) AS integer)", # day of year 'julian_day': "strftime('%J', {})", # YYYY-MM-DD 'month': "CAST(strftime('%m', {}) AS integer)", 'minute': "CAST(strftime('%M', {}) AS integer)", 'dow': "CAST(strftime('%w', {}) AS integer)", # day of week 0 = sunday 'week': "CAST(strftime('%W', {}) AS integer)", 'year': "CAST(strftime('%Y', {}) AS integer)" } for k, v in field_kwargs.items(): fstrs.append([k_opts[k].format(self._normalize_name(field_name)), self.val_placeholder, v]) return fstrs
[ "def", "_normalize_date_SQL", "(", "self", ",", "field_name", ",", "field_kwargs", ",", "symbol", ")", ":", "fstrs", "=", "[", "]", "k_opts", "=", "{", "'day'", ":", "\"CAST(strftime('%d', {}) AS integer)\"", ",", "'hour'", ":", "\"CAST(strftime('%H', {}) AS integer)\"", ",", "'doy'", ":", "\"CAST(strftime('%j', {}) AS integer)\"", ",", "# day of year", "'julian_day'", ":", "\"strftime('%J', {})\"", ",", "# YYYY-MM-DD", "'month'", ":", "\"CAST(strftime('%m', {}) AS integer)\"", ",", "'minute'", ":", "\"CAST(strftime('%M', {}) AS integer)\"", ",", "'dow'", ":", "\"CAST(strftime('%w', {}) AS integer)\"", ",", "# day of week 0 = sunday", "'week'", ":", "\"CAST(strftime('%W', {}) AS integer)\"", ",", "'year'", ":", "\"CAST(strftime('%Y', {}) AS integer)\"", "}", "for", "k", ",", "v", "in", "field_kwargs", ".", "items", "(", ")", ":", "fstrs", ".", "append", "(", "[", "k_opts", "[", "k", "]", ".", "format", "(", "self", ".", "_normalize_name", "(", "field_name", ")", ")", ",", "self", ".", "val_placeholder", ",", "v", "]", ")", "return", "fstrs" ]
allow extracting information from date http://www.sqlite.org/lang_datefunc.html
[ "allow", "extracting", "information", "from", "date" ]
train
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L529-L551
Jaymon/prom
prom/interface/sqlite.py
SQLite._normalize_sort_SQL
def _normalize_sort_SQL(self, field_name, field_vals, sort_dir_str): """ allow sorting by a set of values http://stackoverflow.com/questions/3303851/sqlite-and-custom-order-by """ fvi = None if sort_dir_str == 'ASC': fvi = (t for t in enumerate(field_vals)) else: fvi = (t for t in enumerate(reversed(field_vals))) query_sort_str = [' CASE {}'.format(self._normalize_name(field_name))] query_args = [] for i, v in fvi: query_sort_str.append(' WHEN {} THEN {}'.format(self.val_placeholder, i)) query_args.append(v) query_sort_str.append(' END') query_sort_str = "\n".join(query_sort_str) return query_sort_str, query_args
python
def _normalize_sort_SQL(self, field_name, field_vals, sort_dir_str): """ allow sorting by a set of values http://stackoverflow.com/questions/3303851/sqlite-and-custom-order-by """ fvi = None if sort_dir_str == 'ASC': fvi = (t for t in enumerate(field_vals)) else: fvi = (t for t in enumerate(reversed(field_vals))) query_sort_str = [' CASE {}'.format(self._normalize_name(field_name))] query_args = [] for i, v in fvi: query_sort_str.append(' WHEN {} THEN {}'.format(self.val_placeholder, i)) query_args.append(v) query_sort_str.append(' END') query_sort_str = "\n".join(query_sort_str) return query_sort_str, query_args
[ "def", "_normalize_sort_SQL", "(", "self", ",", "field_name", ",", "field_vals", ",", "sort_dir_str", ")", ":", "fvi", "=", "None", "if", "sort_dir_str", "==", "'ASC'", ":", "fvi", "=", "(", "t", "for", "t", "in", "enumerate", "(", "field_vals", ")", ")", "else", ":", "fvi", "=", "(", "t", "for", "t", "in", "enumerate", "(", "reversed", "(", "field_vals", ")", ")", ")", "query_sort_str", "=", "[", "' CASE {}'", ".", "format", "(", "self", ".", "_normalize_name", "(", "field_name", ")", ")", "]", "query_args", "=", "[", "]", "for", "i", ",", "v", "in", "fvi", ":", "query_sort_str", ".", "append", "(", "' WHEN {} THEN {}'", ".", "format", "(", "self", ".", "val_placeholder", ",", "i", ")", ")", "query_args", ".", "append", "(", "v", ")", "query_sort_str", ".", "append", "(", "' END'", ")", "query_sort_str", "=", "\"\\n\"", ".", "join", "(", "query_sort_str", ")", "return", "query_sort_str", ",", "query_args" ]
allow sorting by a set of values http://stackoverflow.com/questions/3303851/sqlite-and-custom-order-by
[ "allow", "sorting", "by", "a", "set", "of", "values" ]
train
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L553-L574
BreakingBytes/simkit
simkit/core/calculations.py
CalcRegistry.register
def register(self, new_calc, *args, **kwargs): """ Register calculations and meta data. * ``dependencies`` - list of prerequisite calculations * ``always_calc`` - ``True`` if calculation ignores thresholds * ``frequency`` - frequency of calculation in intervals or units of time :param new_calc: register new calculation """ kwargs.update(zip(self.meta_names, args)) # dependencies should be a list of other calculations if isinstance(kwargs['dependencies'], basestring): kwargs['dependencies'] = [kwargs['dependencies']] # call super method, now meta can be passed as args or kwargs. super(CalcRegistry, self).register(new_calc, **kwargs)
python
def register(self, new_calc, *args, **kwargs): """ Register calculations and meta data. * ``dependencies`` - list of prerequisite calculations * ``always_calc`` - ``True`` if calculation ignores thresholds * ``frequency`` - frequency of calculation in intervals or units of time :param new_calc: register new calculation """ kwargs.update(zip(self.meta_names, args)) # dependencies should be a list of other calculations if isinstance(kwargs['dependencies'], basestring): kwargs['dependencies'] = [kwargs['dependencies']] # call super method, now meta can be passed as args or kwargs. super(CalcRegistry, self).register(new_calc, **kwargs)
[ "def", "register", "(", "self", ",", "new_calc", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "zip", "(", "self", ".", "meta_names", ",", "args", ")", ")", "# dependencies should be a list of other calculations", "if", "isinstance", "(", "kwargs", "[", "'dependencies'", "]", ",", "basestring", ")", ":", "kwargs", "[", "'dependencies'", "]", "=", "[", "kwargs", "[", "'dependencies'", "]", "]", "# call super method, now meta can be passed as args or kwargs.", "super", "(", "CalcRegistry", ",", "self", ")", ".", "register", "(", "new_calc", ",", "*", "*", "kwargs", ")" ]
Register calculations and meta data. * ``dependencies`` - list of prerequisite calculations * ``always_calc`` - ``True`` if calculation ignores thresholds * ``frequency`` - frequency of calculation in intervals or units of time :param new_calc: register new calculation
[ "Register", "calculations", "and", "meta", "data", "." ]
train
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/calculations.py#L41-L56
guaix-ucm/pyemir
emirdrp/processing/bars.py
slits_to_ds9_reg
def slits_to_ds9_reg(ds9reg, slits): """Transform fiber traces to ds9-region format. Parameters ---------- ds9reg : BinaryIO Handle to output file name in ds9-region format. """ # open output file and insert header ds9reg.write('# Region file format: DS9 version 4.1\n') ds9reg.write( 'global color=green dashlist=8 3 width=1 font="helvetica 10 ' 'normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 ' 'move=1 delete=1 include=1 source=1\n') ds9reg.write('physical\n') for idx, slit in enumerate(slits, 1): xpos1, y2, xpos2, y2, xpos2, y1, xpos1, y1 = slit xc = 0.5 * (xpos1 + xpos2) + 1 yc = 0.5 * (y1 + y2) + 1 xd = (xpos2 - xpos1) yd = (y2 - y1) ds9reg.write('box({0},{1},{2},{3},0)\n'.format(xc, yc, xd, yd)) ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(xpos1 - 5, yc, idx)) ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(xpos2 + 5, yc, idx + EMIR_NBARS))
python
def slits_to_ds9_reg(ds9reg, slits): """Transform fiber traces to ds9-region format. Parameters ---------- ds9reg : BinaryIO Handle to output file name in ds9-region format. """ # open output file and insert header ds9reg.write('# Region file format: DS9 version 4.1\n') ds9reg.write( 'global color=green dashlist=8 3 width=1 font="helvetica 10 ' 'normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 ' 'move=1 delete=1 include=1 source=1\n') ds9reg.write('physical\n') for idx, slit in enumerate(slits, 1): xpos1, y2, xpos2, y2, xpos2, y1, xpos1, y1 = slit xc = 0.5 * (xpos1 + xpos2) + 1 yc = 0.5 * (y1 + y2) + 1 xd = (xpos2 - xpos1) yd = (y2 - y1) ds9reg.write('box({0},{1},{2},{3},0)\n'.format(xc, yc, xd, yd)) ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(xpos1 - 5, yc, idx)) ds9reg.write('# text({0},{1}) color=red text={{{2}}}\n'.format(xpos2 + 5, yc, idx + EMIR_NBARS))
[ "def", "slits_to_ds9_reg", "(", "ds9reg", ",", "slits", ")", ":", "# open output file and insert header", "ds9reg", ".", "write", "(", "'# Region file format: DS9 version 4.1\\n'", ")", "ds9reg", ".", "write", "(", "'global color=green dashlist=8 3 width=1 font=\"helvetica 10 '", "'normal roman\" select=1 highlite=1 dash=0 fixed=0 edit=1 '", "'move=1 delete=1 include=1 source=1\\n'", ")", "ds9reg", ".", "write", "(", "'physical\\n'", ")", "for", "idx", ",", "slit", "in", "enumerate", "(", "slits", ",", "1", ")", ":", "xpos1", ",", "y2", ",", "xpos2", ",", "y2", ",", "xpos2", ",", "y1", ",", "xpos1", ",", "y1", "=", "slit", "xc", "=", "0.5", "*", "(", "xpos1", "+", "xpos2", ")", "+", "1", "yc", "=", "0.5", "*", "(", "y1", "+", "y2", ")", "+", "1", "xd", "=", "(", "xpos2", "-", "xpos1", ")", "yd", "=", "(", "y2", "-", "y1", ")", "ds9reg", ".", "write", "(", "'box({0},{1},{2},{3},0)\\n'", ".", "format", "(", "xc", ",", "yc", ",", "xd", ",", "yd", ")", ")", "ds9reg", ".", "write", "(", "'# text({0},{1}) color=red text={{{2}}}\\n'", ".", "format", "(", "xpos1", "-", "5", ",", "yc", ",", "idx", ")", ")", "ds9reg", ".", "write", "(", "'# text({0},{1}) color=red text={{{2}}}\\n'", ".", "format", "(", "xpos2", "+", "5", ",", "yc", ",", "idx", "+", "EMIR_NBARS", ")", ")" ]
Transform fiber traces to ds9-region format. Parameters ---------- ds9reg : BinaryIO Handle to output file name in ds9-region format.
[ "Transform", "fiber", "traces", "to", "ds9", "-", "region", "format", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/bars.py#L192-L218
BreakingBytes/simkit
simkit/core/simulations.py
id_maker
def id_maker(obj): """ Makes an ID from the object's class name and the datetime now in ISO format. :param obj: the class from which to make the ID :return: ID """ dtfmt = '%Y%m%d-%H%M%S' return '%s-%s' % (obj.__class__.__name__, datetime.now().strftime(dtfmt))
python
def id_maker(obj): """ Makes an ID from the object's class name and the datetime now in ISO format. :param obj: the class from which to make the ID :return: ID """ dtfmt = '%Y%m%d-%H%M%S' return '%s-%s' % (obj.__class__.__name__, datetime.now().strftime(dtfmt))
[ "def", "id_maker", "(", "obj", ")", ":", "dtfmt", "=", "'%Y%m%d-%H%M%S'", "return", "'%s-%s'", "%", "(", "obj", ".", "__class__", ".", "__name__", ",", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "dtfmt", ")", ")" ]
Makes an ID from the object's class name and the datetime now in ISO format. :param obj: the class from which to make the ID :return: ID
[ "Makes", "an", "ID", "from", "the", "object", "s", "class", "name", "and", "the", "datetime", "now", "in", "ISO", "format", "." ]
train
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L38-L46
BreakingBytes/simkit
simkit/core/simulations.py
topological_sort
def topological_sort(dag): """ topological sort :param dag: directed acyclic graph :type dag: dict .. seealso:: `Topographical Sorting <http://en.wikipedia.org/wiki/Topological_sorting>`_, `Directed Acyclic Graph (DAG) <https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_ """ # find all edges of dag topsort = [node for node, edge in dag.iteritems() if not edge] # loop through nodes until topologically sorted while len(topsort) < len(dag): num_nodes = len(topsort) # number of nodes # unsorted nodes for node in dag.viewkeys() - set(topsort): # nodes with no incoming edges if set(dag[node]) <= set(topsort): topsort.append(node) break # circular dependencies if len(topsort) == num_nodes: raise CircularDependencyError(dag.viewkeys() - set(topsort)) return topsort
python
def topological_sort(dag): """ topological sort :param dag: directed acyclic graph :type dag: dict .. seealso:: `Topographical Sorting <http://en.wikipedia.org/wiki/Topological_sorting>`_, `Directed Acyclic Graph (DAG) <https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_ """ # find all edges of dag topsort = [node for node, edge in dag.iteritems() if not edge] # loop through nodes until topologically sorted while len(topsort) < len(dag): num_nodes = len(topsort) # number of nodes # unsorted nodes for node in dag.viewkeys() - set(topsort): # nodes with no incoming edges if set(dag[node]) <= set(topsort): topsort.append(node) break # circular dependencies if len(topsort) == num_nodes: raise CircularDependencyError(dag.viewkeys() - set(topsort)) return topsort
[ "def", "topological_sort", "(", "dag", ")", ":", "# find all edges of dag", "topsort", "=", "[", "node", "for", "node", ",", "edge", "in", "dag", ".", "iteritems", "(", ")", "if", "not", "edge", "]", "# loop through nodes until topologically sorted", "while", "len", "(", "topsort", ")", "<", "len", "(", "dag", ")", ":", "num_nodes", "=", "len", "(", "topsort", ")", "# number of nodes", "# unsorted nodes", "for", "node", "in", "dag", ".", "viewkeys", "(", ")", "-", "set", "(", "topsort", ")", ":", "# nodes with no incoming edges", "if", "set", "(", "dag", "[", "node", "]", ")", "<=", "set", "(", "topsort", ")", ":", "topsort", ".", "append", "(", "node", ")", "break", "# circular dependencies", "if", "len", "(", "topsort", ")", "==", "num_nodes", ":", "raise", "CircularDependencyError", "(", "dag", ".", "viewkeys", "(", ")", "-", "set", "(", "topsort", ")", ")", "return", "topsort" ]
topological sort :param dag: directed acyclic graph :type dag: dict .. seealso:: `Topographical Sorting <http://en.wikipedia.org/wiki/Topological_sorting>`_, `Directed Acyclic Graph (DAG) <https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_
[ "topological", "sort" ]
train
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L69-L95
BreakingBytes/simkit
simkit/core/simulations.py
SimRegistry.register
def register(self, sim, *args, **kwargs): """ register simulation and metadata. * ``commands`` - list of methods to callable from model :param sim: new simulation """ kwargs.update(zip(self.meta_names, args)) # call super method, now meta can be passed as args or kwargs. super(SimRegistry, self).register(sim, **kwargs)
python
def register(self, sim, *args, **kwargs): """ register simulation and metadata. * ``commands`` - list of methods to callable from model :param sim: new simulation """ kwargs.update(zip(self.meta_names, args)) # call super method, now meta can be passed as args or kwargs. super(SimRegistry, self).register(sim, **kwargs)
[ "def", "register", "(", "self", ",", "sim", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "zip", "(", "self", ".", "meta_names", ",", "args", ")", ")", "# call super method, now meta can be passed as args or kwargs.", "super", "(", "SimRegistry", ",", "self", ")", ".", "register", "(", "sim", ",", "*", "*", "kwargs", ")" ]
register simulation and metadata. * ``commands`` - list of methods to callable from model :param sim: new simulation
[ "register", "simulation", "and", "metadata", "." ]
train
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L111-L121
BreakingBytes/simkit
simkit/core/simulations.py
Simulation.check_data
def check_data(self, data): """ Check if data loaded for all sources in data layer. :param data: data layer from model :type data: :class:`~simkit.core.layer.Data` :return: dictionary of data sources and objects or `None` if not loaded """ data_objs = { data_src: data.objects.get(data_src) for data_src in data.layer } self._is_data_loaded = all(data_objs.values()) return data_objs
python
def check_data(self, data): """ Check if data loaded for all sources in data layer. :param data: data layer from model :type data: :class:`~simkit.core.layer.Data` :return: dictionary of data sources and objects or `None` if not loaded """ data_objs = { data_src: data.objects.get(data_src) for data_src in data.layer } self._is_data_loaded = all(data_objs.values()) return data_objs
[ "def", "check_data", "(", "self", ",", "data", ")", ":", "data_objs", "=", "{", "data_src", ":", "data", ".", "objects", ".", "get", "(", "data_src", ")", "for", "data_src", "in", "data", ".", "layer", "}", "self", ".", "_is_data_loaded", "=", "all", "(", "data_objs", ".", "values", "(", ")", ")", "return", "data_objs" ]
Check if data loaded for all sources in data layer. :param data: data layer from model :type data: :class:`~simkit.core.layer.Data` :return: dictionary of data sources and objects or `None` if not loaded
[ "Check", "if", "data", "loaded", "for", "all", "sources", "in", "data", "layer", "." ]
train
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L303-L315
BreakingBytes/simkit
simkit/core/simulations.py
Simulation.initialize
def initialize(self, calc_reg): """ Initialize the simulation. Organize calculations by dependency. :param calc_reg: Calculation registry. :type calc_reg: :class:`~simkit.core.calculation.CalcRegistry` """ self._isinitialized = True # TODO: if calculations are edited, loaded, added, etc. then reset self.calc_order = topological_sort(calc_reg.dependencies)
python
def initialize(self, calc_reg): """ Initialize the simulation. Organize calculations by dependency. :param calc_reg: Calculation registry. :type calc_reg: :class:`~simkit.core.calculation.CalcRegistry` """ self._isinitialized = True # TODO: if calculations are edited, loaded, added, etc. then reset self.calc_order = topological_sort(calc_reg.dependencies)
[ "def", "initialize", "(", "self", ",", "calc_reg", ")", ":", "self", ".", "_isinitialized", "=", "True", "# TODO: if calculations are edited, loaded, added, etc. then reset", "self", ".", "calc_order", "=", "topological_sort", "(", "calc_reg", ".", "dependencies", ")" ]
Initialize the simulation. Organize calculations by dependency. :param calc_reg: Calculation registry. :type calc_reg: :class:`~simkit.core.calculation.CalcRegistry`
[ "Initialize", "the", "simulation", ".", "Organize", "calculations", "by", "dependency", "." ]
train
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L317-L327
BreakingBytes/simkit
simkit/core/simulations.py
Simulation.index_iterator
def index_iterator(self): """ Generator that resumes from same index, or restarts from sent index. """ idx = 0 # index while idx < self.number_intervals: new_idx = yield idx idx += 1 if new_idx: idx = new_idx - 1
python
def index_iterator(self): """ Generator that resumes from same index, or restarts from sent index. """ idx = 0 # index while idx < self.number_intervals: new_idx = yield idx idx += 1 if new_idx: idx = new_idx - 1
[ "def", "index_iterator", "(", "self", ")", ":", "idx", "=", "0", "# index", "while", "idx", "<", "self", ".", "number_intervals", ":", "new_idx", "=", "yield", "idx", "idx", "+=", "1", "if", "new_idx", ":", "idx", "=", "new_idx", "-", "1" ]
Generator that resumes from same index, or restarts from sent index.
[ "Generator", "that", "resumes", "from", "same", "index", "or", "restarts", "from", "sent", "index", "." ]
train
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L329-L338
BreakingBytes/simkit
simkit/core/simulations.py
Simulation.start
def start(self, model, progress_hook=None): """ Start the simulation from time zero. :param model: Model with layers and registries containing parameters :type: :class:`~simkit.core.models.Model` :param progress_hook: A function that receives either a string or a list containing the index followed by tuples of the data or outputs names and values specified by ``write_fields`` in the simfile. :type progress_hook: function The model registries should contain the following layer registries: * :class:`~simkit.core.data_sources.DataRegistry`, * :class:`~simkit.core.formulas.FormulaRegistry`, * :class:`~simkit.core.outputs.OutputRegistry`, * :class:`~simkit.core.calculation.CalcRegistry` """ # check if data loaded data_objs = self.check_data(model.data) if not self.is_data_loaded: raise MissingDataError([ds for ds in data_objs if ds is None]) # get layer registries data_reg = model.registries['data'] formula_reg = model.registries['formulas'] out_reg = model.registries['outputs'] calc_reg = model.registries['calculations'] # initialize if not self.isinitialized: self.initialize(calc_reg) # default progress hook if not progress_hook: progress_hook = functools.partial( sim_progress_hook, display_header=True ) # start, resume or restart if self.ispaused: # if paused, then resume, do not resize outputs again. self._ispaused = False # change pause state progress_hook('resume simulation') elif self.iscomplete: # if complete, then restart, do not resize outputs again. self._iscomplete = False # change pause state progress_hook('restart simulation') self.idx_iter = self.index_iterator() else: # resize outputs # assumes that self.write_frequency is immutable # TODO: allow self.write_frequency to be changed # only resize outputs first time simulation is started # repeat output rows to self.write_frequency # put initial conditions of outputs last so it's copied when # idx == 0 progress_hook('resize outputs') # display progress for k in out_reg: if out_reg.isconstant[k]: continue # repeat rows (axis=0) out_reg[k] = out_reg[k].repeat(self.write_frequency, 0) _initial_value = out_reg.initial_value[k] if not _initial_value: continue if isinstance(_initial_value, basestring): # initial value is from data registry # assign in a scalar to a vector fills in the vector, yes! out_reg[k][-1] = data_reg[_initial_value] else: out_reg[k][-1] = _initial_value * out_reg[k].units progress_hook('start simulation') # check and/or make SimKit_Simulations and simulation ID folders mkdir_p(self.path) sim_id_path = os.path.join(self.path, self.ID) mkdir_p(sim_id_path) # header & units for save files data_fields = self.write_fields.get('data', []) # any data fields out_fields = self.write_fields.get('outputs', []) # any outputs fields save_header = tuple(data_fields + out_fields) # concatenate fields # get units as strings from data & outputs data_units = [str(data_reg[f].dimensionality) for f in data_fields] out_units = [str(out_reg[f].dimensionality) for f in out_fields] save_units = tuple(data_units + out_units) # concatenate units # string format for header & units save_str = ('%s' + ',%s' * (len(save_header) - 1)) + '\n' # format save_header = (save_str * 2) % (save_header + save_units) # header save_header = save_header[:-1] # remove trailing new line # =================== # Static calculations # =================== progress_hook('static calcs') for calc in self.calc_order: if not calc_reg.is_dynamic[calc]: calc_reg.calculator[calc].calculate( calc_reg[calc], formula_reg, data_reg, out_reg ) # ==================== # Dynamic calculations # ==================== progress_hook('dynamic calcs') # TODO: assumes that interval size and indices are same, but should # interpolate for any size interval or indices for idx_tot in self.idx_iter: self.interval_idx = idx_tot # update simulation interval counter idx = idx_tot % self.write_frequency # update properties for k, v in out_reg.isproperty.iteritems(): # set properties from previous interval at night if v: out_reg[k][idx] = out_reg[k][idx - 1] # night if any threshold exceeded if self.thresholds: night = not all(limits[0] < data_reg[data][idx] < limits[1] for data, limits in self.thresholds.iteritems()) else: night = None # daytime or always calculated outputs for calc in self.calc_order: # Determine if calculation is scheduled for this timestep # TODO: add ``start_at`` parameter combined with ``frequency`` freq = calc_reg.frequency[calc] if not freq.dimensionality: is_scheduled = (idx_tot % freq) == 0 else: # Frequency with units of time is_scheduled = ((idx_tot * self.interval) % freq) == 0 is_scheduled = ( is_scheduled and (not night or calc_reg.always_calc[calc]) ) if calc_reg.is_dynamic[calc] and is_scheduled: calc_reg.calculator[calc].calculate( calc_reg[calc], formula_reg, data_reg, out_reg, timestep=self.interval, idx=idx ) # display progress if not (idx % self.display_frequency): progress_hook(self.format_progress(idx, data_reg, out_reg)) # disp_head = False # create an index for the save file, 0 if not saving if not ((idx_tot + 1) % self.write_frequency): savenum = (idx_tot + 1) / self.write_frequency elif idx_tot == self.number_intervals - 1: # save file index should be integer! savenum = int(np.ceil((idx_tot + 1) / float(self.write_frequency))) else: savenum = 0 # not saving this iteration # save file to disk if savenum: savename = self.ID + '_' + str(savenum) + '.csv' # filename savepath = os.path.join(sim_id_path, savename) # path # create array of all data & outputs to save save_array = self.format_write(data_reg, out_reg, idx + 1) # save as csv using default format & turn comments off np.savetxt(savepath, save_array, delimiter=',', header=save_header, comments='') try: cmd = self.cmd_queue.get_nowait() except Queue.Empty: continue if cmd == 'pause': self._ispaused = True return self._iscomplete = True
python
def start(self, model, progress_hook=None): """ Start the simulation from time zero. :param model: Model with layers and registries containing parameters :type: :class:`~simkit.core.models.Model` :param progress_hook: A function that receives either a string or a list containing the index followed by tuples of the data or outputs names and values specified by ``write_fields`` in the simfile. :type progress_hook: function The model registries should contain the following layer registries: * :class:`~simkit.core.data_sources.DataRegistry`, * :class:`~simkit.core.formulas.FormulaRegistry`, * :class:`~simkit.core.outputs.OutputRegistry`, * :class:`~simkit.core.calculation.CalcRegistry` """ # check if data loaded data_objs = self.check_data(model.data) if not self.is_data_loaded: raise MissingDataError([ds for ds in data_objs if ds is None]) # get layer registries data_reg = model.registries['data'] formula_reg = model.registries['formulas'] out_reg = model.registries['outputs'] calc_reg = model.registries['calculations'] # initialize if not self.isinitialized: self.initialize(calc_reg) # default progress hook if not progress_hook: progress_hook = functools.partial( sim_progress_hook, display_header=True ) # start, resume or restart if self.ispaused: # if paused, then resume, do not resize outputs again. self._ispaused = False # change pause state progress_hook('resume simulation') elif self.iscomplete: # if complete, then restart, do not resize outputs again. self._iscomplete = False # change pause state progress_hook('restart simulation') self.idx_iter = self.index_iterator() else: # resize outputs # assumes that self.write_frequency is immutable # TODO: allow self.write_frequency to be changed # only resize outputs first time simulation is started # repeat output rows to self.write_frequency # put initial conditions of outputs last so it's copied when # idx == 0 progress_hook('resize outputs') # display progress for k in out_reg: if out_reg.isconstant[k]: continue # repeat rows (axis=0) out_reg[k] = out_reg[k].repeat(self.write_frequency, 0) _initial_value = out_reg.initial_value[k] if not _initial_value: continue if isinstance(_initial_value, basestring): # initial value is from data registry # assign in a scalar to a vector fills in the vector, yes! out_reg[k][-1] = data_reg[_initial_value] else: out_reg[k][-1] = _initial_value * out_reg[k].units progress_hook('start simulation') # check and/or make SimKit_Simulations and simulation ID folders mkdir_p(self.path) sim_id_path = os.path.join(self.path, self.ID) mkdir_p(sim_id_path) # header & units for save files data_fields = self.write_fields.get('data', []) # any data fields out_fields = self.write_fields.get('outputs', []) # any outputs fields save_header = tuple(data_fields + out_fields) # concatenate fields # get units as strings from data & outputs data_units = [str(data_reg[f].dimensionality) for f in data_fields] out_units = [str(out_reg[f].dimensionality) for f in out_fields] save_units = tuple(data_units + out_units) # concatenate units # string format for header & units save_str = ('%s' + ',%s' * (len(save_header) - 1)) + '\n' # format save_header = (save_str * 2) % (save_header + save_units) # header save_header = save_header[:-1] # remove trailing new line # =================== # Static calculations # =================== progress_hook('static calcs') for calc in self.calc_order: if not calc_reg.is_dynamic[calc]: calc_reg.calculator[calc].calculate( calc_reg[calc], formula_reg, data_reg, out_reg ) # ==================== # Dynamic calculations # ==================== progress_hook('dynamic calcs') # TODO: assumes that interval size and indices are same, but should # interpolate for any size interval or indices for idx_tot in self.idx_iter: self.interval_idx = idx_tot # update simulation interval counter idx = idx_tot % self.write_frequency # update properties for k, v in out_reg.isproperty.iteritems(): # set properties from previous interval at night if v: out_reg[k][idx] = out_reg[k][idx - 1] # night if any threshold exceeded if self.thresholds: night = not all(limits[0] < data_reg[data][idx] < limits[1] for data, limits in self.thresholds.iteritems()) else: night = None # daytime or always calculated outputs for calc in self.calc_order: # Determine if calculation is scheduled for this timestep # TODO: add ``start_at`` parameter combined with ``frequency`` freq = calc_reg.frequency[calc] if not freq.dimensionality: is_scheduled = (idx_tot % freq) == 0 else: # Frequency with units of time is_scheduled = ((idx_tot * self.interval) % freq) == 0 is_scheduled = ( is_scheduled and (not night or calc_reg.always_calc[calc]) ) if calc_reg.is_dynamic[calc] and is_scheduled: calc_reg.calculator[calc].calculate( calc_reg[calc], formula_reg, data_reg, out_reg, timestep=self.interval, idx=idx ) # display progress if not (idx % self.display_frequency): progress_hook(self.format_progress(idx, data_reg, out_reg)) # disp_head = False # create an index for the save file, 0 if not saving if not ((idx_tot + 1) % self.write_frequency): savenum = (idx_tot + 1) / self.write_frequency elif idx_tot == self.number_intervals - 1: # save file index should be integer! savenum = int(np.ceil((idx_tot + 1) / float(self.write_frequency))) else: savenum = 0 # not saving this iteration # save file to disk if savenum: savename = self.ID + '_' + str(savenum) + '.csv' # filename savepath = os.path.join(sim_id_path, savename) # path # create array of all data & outputs to save save_array = self.format_write(data_reg, out_reg, idx + 1) # save as csv using default format & turn comments off np.savetxt(savepath, save_array, delimiter=',', header=save_header, comments='') try: cmd = self.cmd_queue.get_nowait() except Queue.Empty: continue if cmd == 'pause': self._ispaused = True return self._iscomplete = True
[ "def", "start", "(", "self", ",", "model", ",", "progress_hook", "=", "None", ")", ":", "# check if data loaded", "data_objs", "=", "self", ".", "check_data", "(", "model", ".", "data", ")", "if", "not", "self", ".", "is_data_loaded", ":", "raise", "MissingDataError", "(", "[", "ds", "for", "ds", "in", "data_objs", "if", "ds", "is", "None", "]", ")", "# get layer registries", "data_reg", "=", "model", ".", "registries", "[", "'data'", "]", "formula_reg", "=", "model", ".", "registries", "[", "'formulas'", "]", "out_reg", "=", "model", ".", "registries", "[", "'outputs'", "]", "calc_reg", "=", "model", ".", "registries", "[", "'calculations'", "]", "# initialize", "if", "not", "self", ".", "isinitialized", ":", "self", ".", "initialize", "(", "calc_reg", ")", "# default progress hook", "if", "not", "progress_hook", ":", "progress_hook", "=", "functools", ".", "partial", "(", "sim_progress_hook", ",", "display_header", "=", "True", ")", "# start, resume or restart", "if", "self", ".", "ispaused", ":", "# if paused, then resume, do not resize outputs again.", "self", ".", "_ispaused", "=", "False", "# change pause state", "progress_hook", "(", "'resume simulation'", ")", "elif", "self", ".", "iscomplete", ":", "# if complete, then restart, do not resize outputs again.", "self", ".", "_iscomplete", "=", "False", "# change pause state", "progress_hook", "(", "'restart simulation'", ")", "self", ".", "idx_iter", "=", "self", ".", "index_iterator", "(", ")", "else", ":", "# resize outputs", "# assumes that self.write_frequency is immutable", "# TODO: allow self.write_frequency to be changed", "# only resize outputs first time simulation is started", "# repeat output rows to self.write_frequency", "# put initial conditions of outputs last so it's copied when", "# idx == 0", "progress_hook", "(", "'resize outputs'", ")", "# display progress", "for", "k", "in", "out_reg", ":", "if", "out_reg", ".", "isconstant", "[", "k", "]", ":", "continue", "# repeat rows (axis=0)", "out_reg", "[", "k", "]", "=", "out_reg", "[", "k", "]", ".", "repeat", "(", "self", ".", "write_frequency", ",", "0", ")", "_initial_value", "=", "out_reg", ".", "initial_value", "[", "k", "]", "if", "not", "_initial_value", ":", "continue", "if", "isinstance", "(", "_initial_value", ",", "basestring", ")", ":", "# initial value is from data registry", "# assign in a scalar to a vector fills in the vector, yes!", "out_reg", "[", "k", "]", "[", "-", "1", "]", "=", "data_reg", "[", "_initial_value", "]", "else", ":", "out_reg", "[", "k", "]", "[", "-", "1", "]", "=", "_initial_value", "*", "out_reg", "[", "k", "]", ".", "units", "progress_hook", "(", "'start simulation'", ")", "# check and/or make SimKit_Simulations and simulation ID folders", "mkdir_p", "(", "self", ".", "path", ")", "sim_id_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "self", ".", "ID", ")", "mkdir_p", "(", "sim_id_path", ")", "# header & units for save files", "data_fields", "=", "self", ".", "write_fields", ".", "get", "(", "'data'", ",", "[", "]", ")", "# any data fields", "out_fields", "=", "self", ".", "write_fields", ".", "get", "(", "'outputs'", ",", "[", "]", ")", "# any outputs fields", "save_header", "=", "tuple", "(", "data_fields", "+", "out_fields", ")", "# concatenate fields", "# get units as strings from data & outputs", "data_units", "=", "[", "str", "(", "data_reg", "[", "f", "]", ".", "dimensionality", ")", "for", "f", "in", "data_fields", "]", "out_units", "=", "[", "str", "(", "out_reg", "[", "f", "]", ".", "dimensionality", ")", "for", "f", "in", "out_fields", "]", "save_units", "=", "tuple", "(", "data_units", "+", "out_units", ")", "# concatenate units", "# string format for header & units", "save_str", "=", "(", "'%s'", "+", "',%s'", "*", "(", "len", "(", "save_header", ")", "-", "1", ")", ")", "+", "'\\n'", "# format", "save_header", "=", "(", "save_str", "*", "2", ")", "%", "(", "save_header", "+", "save_units", ")", "# header", "save_header", "=", "save_header", "[", ":", "-", "1", "]", "# remove trailing new line", "# ===================", "# Static calculations", "# ===================", "progress_hook", "(", "'static calcs'", ")", "for", "calc", "in", "self", ".", "calc_order", ":", "if", "not", "calc_reg", ".", "is_dynamic", "[", "calc", "]", ":", "calc_reg", ".", "calculator", "[", "calc", "]", ".", "calculate", "(", "calc_reg", "[", "calc", "]", ",", "formula_reg", ",", "data_reg", ",", "out_reg", ")", "# ====================", "# Dynamic calculations", "# ====================", "progress_hook", "(", "'dynamic calcs'", ")", "# TODO: assumes that interval size and indices are same, but should", "# interpolate for any size interval or indices", "for", "idx_tot", "in", "self", ".", "idx_iter", ":", "self", ".", "interval_idx", "=", "idx_tot", "# update simulation interval counter", "idx", "=", "idx_tot", "%", "self", ".", "write_frequency", "# update properties", "for", "k", ",", "v", "in", "out_reg", ".", "isproperty", ".", "iteritems", "(", ")", ":", "# set properties from previous interval at night", "if", "v", ":", "out_reg", "[", "k", "]", "[", "idx", "]", "=", "out_reg", "[", "k", "]", "[", "idx", "-", "1", "]", "# night if any threshold exceeded", "if", "self", ".", "thresholds", ":", "night", "=", "not", "all", "(", "limits", "[", "0", "]", "<", "data_reg", "[", "data", "]", "[", "idx", "]", "<", "limits", "[", "1", "]", "for", "data", ",", "limits", "in", "self", ".", "thresholds", ".", "iteritems", "(", ")", ")", "else", ":", "night", "=", "None", "# daytime or always calculated outputs", "for", "calc", "in", "self", ".", "calc_order", ":", "# Determine if calculation is scheduled for this timestep", "# TODO: add ``start_at`` parameter combined with ``frequency``", "freq", "=", "calc_reg", ".", "frequency", "[", "calc", "]", "if", "not", "freq", ".", "dimensionality", ":", "is_scheduled", "=", "(", "idx_tot", "%", "freq", ")", "==", "0", "else", ":", "# Frequency with units of time", "is_scheduled", "=", "(", "(", "idx_tot", "*", "self", ".", "interval", ")", "%", "freq", ")", "==", "0", "is_scheduled", "=", "(", "is_scheduled", "and", "(", "not", "night", "or", "calc_reg", ".", "always_calc", "[", "calc", "]", ")", ")", "if", "calc_reg", ".", "is_dynamic", "[", "calc", "]", "and", "is_scheduled", ":", "calc_reg", ".", "calculator", "[", "calc", "]", ".", "calculate", "(", "calc_reg", "[", "calc", "]", ",", "formula_reg", ",", "data_reg", ",", "out_reg", ",", "timestep", "=", "self", ".", "interval", ",", "idx", "=", "idx", ")", "# display progress", "if", "not", "(", "idx", "%", "self", ".", "display_frequency", ")", ":", "progress_hook", "(", "self", ".", "format_progress", "(", "idx", ",", "data_reg", ",", "out_reg", ")", ")", "# disp_head = False", "# create an index for the save file, 0 if not saving", "if", "not", "(", "(", "idx_tot", "+", "1", ")", "%", "self", ".", "write_frequency", ")", ":", "savenum", "=", "(", "idx_tot", "+", "1", ")", "/", "self", ".", "write_frequency", "elif", "idx_tot", "==", "self", ".", "number_intervals", "-", "1", ":", "# save file index should be integer!", "savenum", "=", "int", "(", "np", ".", "ceil", "(", "(", "idx_tot", "+", "1", ")", "/", "float", "(", "self", ".", "write_frequency", ")", ")", ")", "else", ":", "savenum", "=", "0", "# not saving this iteration", "# save file to disk", "if", "savenum", ":", "savename", "=", "self", ".", "ID", "+", "'_'", "+", "str", "(", "savenum", ")", "+", "'.csv'", "# filename", "savepath", "=", "os", ".", "path", ".", "join", "(", "sim_id_path", ",", "savename", ")", "# path", "# create array of all data & outputs to save", "save_array", "=", "self", ".", "format_write", "(", "data_reg", ",", "out_reg", ",", "idx", "+", "1", ")", "# save as csv using default format & turn comments off", "np", ".", "savetxt", "(", "savepath", ",", "save_array", ",", "delimiter", "=", "','", ",", "header", "=", "save_header", ",", "comments", "=", "''", ")", "try", ":", "cmd", "=", "self", ".", "cmd_queue", ".", "get_nowait", "(", ")", "except", "Queue", ".", "Empty", ":", "continue", "if", "cmd", "==", "'pause'", ":", "self", ".", "_ispaused", "=", "True", "return", "self", ".", "_iscomplete", "=", "True" ]
Start the simulation from time zero. :param model: Model with layers and registries containing parameters :type: :class:`~simkit.core.models.Model` :param progress_hook: A function that receives either a string or a list containing the index followed by tuples of the data or outputs names and values specified by ``write_fields`` in the simfile. :type progress_hook: function The model registries should contain the following layer registries: * :class:`~simkit.core.data_sources.DataRegistry`, * :class:`~simkit.core.formulas.FormulaRegistry`, * :class:`~simkit.core.outputs.OutputRegistry`, * :class:`~simkit.core.calculation.CalcRegistry`
[ "Start", "the", "simulation", "from", "time", "zero", "." ]
train
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L342-L503
BreakingBytes/simkit
simkit/core/simulations.py
Simulation.pause
def pause(self, progress_hook=None): """ Pause the simulation. How is this different from stopping it? Maintain info sufficient to restart simulation. Sets ``is_paused`` to True. Will this state allow analysis? changing parameters? What can you do with a paused simulation? Should be capable of saving paused simulation for loading/resuming later, that is the main usage. EG: someone else need computer, or power goes out, so on battery backup quickly pause simulation, and save. Is save automatic? Should there be a parameter for auto save changed? """ # default progress hook if progress_hook is None: progress_hook = sim_progress_hook progress_hook('simulation paused') self.cmd_queue.put('pause') self._ispaused = True
python
def pause(self, progress_hook=None): """ Pause the simulation. How is this different from stopping it? Maintain info sufficient to restart simulation. Sets ``is_paused`` to True. Will this state allow analysis? changing parameters? What can you do with a paused simulation? Should be capable of saving paused simulation for loading/resuming later, that is the main usage. EG: someone else need computer, or power goes out, so on battery backup quickly pause simulation, and save. Is save automatic? Should there be a parameter for auto save changed? """ # default progress hook if progress_hook is None: progress_hook = sim_progress_hook progress_hook('simulation paused') self.cmd_queue.put('pause') self._ispaused = True
[ "def", "pause", "(", "self", ",", "progress_hook", "=", "None", ")", ":", "# default progress hook", "if", "progress_hook", "is", "None", ":", "progress_hook", "=", "sim_progress_hook", "progress_hook", "(", "'simulation paused'", ")", "self", ".", "cmd_queue", ".", "put", "(", "'pause'", ")", "self", ".", "_ispaused", "=", "True" ]
Pause the simulation. How is this different from stopping it? Maintain info sufficient to restart simulation. Sets ``is_paused`` to True. Will this state allow analysis? changing parameters? What can you do with a paused simulation? Should be capable of saving paused simulation for loading/resuming later, that is the main usage. EG: someone else need computer, or power goes out, so on battery backup quickly pause simulation, and save. Is save automatic? Should there be a parameter for auto save changed?
[ "Pause", "the", "simulation", ".", "How", "is", "this", "different", "from", "stopping", "it?", "Maintain", "info", "sufficient", "to", "restart", "simulation", ".", "Sets", "is_paused", "to", "True", ".", "Will", "this", "state", "allow", "analysis?", "changing", "parameters?", "What", "can", "you", "do", "with", "a", "paused", "simulation?", "Should", "be", "capable", "of", "saving", "paused", "simulation", "for", "loading", "/", "resuming", "later", "that", "is", "the", "main", "usage", ".", "EG", ":", "someone", "else", "need", "computer", "or", "power", "goes", "out", "so", "on", "battery", "backup", "quickly", "pause", "simulation", "and", "save", ".", "Is", "save", "automatic?", "Should", "there", "be", "a", "parameter", "for", "auto", "save", "changed?" ]
train
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/simulations.py#L519-L535
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py
save_ds9
def save_ds9(output, filename): """Save ds9 region output info filename. Parameters ---------- output : str String containing the full output to be exported as a ds9 region file. filename : str Output file name. """ ds9_file = open(filename, 'wt') ds9_file.write(output) ds9_file.close()
python
def save_ds9(output, filename): """Save ds9 region output info filename. Parameters ---------- output : str String containing the full output to be exported as a ds9 region file. filename : str Output file name. """ ds9_file = open(filename, 'wt') ds9_file.write(output) ds9_file.close()
[ "def", "save_ds9", "(", "output", ",", "filename", ")", ":", "ds9_file", "=", "open", "(", "filename", ",", "'wt'", ")", "ds9_file", ".", "write", "(", "output", ")", "ds9_file", ".", "close", "(", ")" ]
Save ds9 region output info filename. Parameters ---------- output : str String containing the full output to be exported as a ds9 region file. filename : str Output file name.
[ "Save", "ds9", "region", "output", "info", "filename", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py#L41-L56
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py
save_four_ds9
def save_four_ds9(rectwv_coeff, debugplot=0): """Save the 4 possible ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. """ for limits, rectified, suffix in zip( ['frontiers', 'frontiers', 'boundaries', 'boundaries'], [False, True, False, True], ['rawimage', 'rectified', 'rawimage', 'rectified'] ): output = rectwv_coeff_to_ds9(rectwv_coeff=rectwv_coeff, limits=limits, rectified=rectified) filename = 'ds9_' + limits + '_' + suffix + '.reg' if abs(debugplot) >= 10: print('>>> Saving: ', filename) save_ds9(output, filename)
python
def save_four_ds9(rectwv_coeff, debugplot=0): """Save the 4 possible ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. """ for limits, rectified, suffix in zip( ['frontiers', 'frontiers', 'boundaries', 'boundaries'], [False, True, False, True], ['rawimage', 'rectified', 'rawimage', 'rectified'] ): output = rectwv_coeff_to_ds9(rectwv_coeff=rectwv_coeff, limits=limits, rectified=rectified) filename = 'ds9_' + limits + '_' + suffix + '.reg' if abs(debugplot) >= 10: print('>>> Saving: ', filename) save_ds9(output, filename)
[ "def", "save_four_ds9", "(", "rectwv_coeff", ",", "debugplot", "=", "0", ")", ":", "for", "limits", ",", "rectified", ",", "suffix", "in", "zip", "(", "[", "'frontiers'", ",", "'frontiers'", ",", "'boundaries'", ",", "'boundaries'", "]", ",", "[", "False", ",", "True", ",", "False", ",", "True", "]", ",", "[", "'rawimage'", ",", "'rectified'", ",", "'rawimage'", ",", "'rectified'", "]", ")", ":", "output", "=", "rectwv_coeff_to_ds9", "(", "rectwv_coeff", "=", "rectwv_coeff", ",", "limits", "=", "limits", ",", "rectified", "=", "rectified", ")", "filename", "=", "'ds9_'", "+", "limits", "+", "'_'", "+", "suffix", "+", "'.reg'", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "print", "(", "'>>> Saving: '", ",", "filename", ")", "save_ds9", "(", "output", ",", "filename", ")" ]
Save the 4 possible ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'.
[ "Save", "the", "4", "possible", "ds9", "region", "files", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py#L59-L84
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py
rectwv_coeff_to_ds9
def rectwv_coeff_to_ds9(rectwv_coeff, limits=None, rectified=False, numpix=100): """Generate ds9 region output with requested slitlet limits Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. limits : str Region to be saved: the only two possibilities are 'boundaries' and 'frontiers'. rectified : bool If True, the regions correspond to the rectified image. numpix : int Number of points in which the X-range interval is subdivided in order to save each boundary as a connected set of line segments. This option is only relevant when rectified=False. Returns ------- output : str String containing the full output to be exported as a ds9 region file. """ # protections if limits not in ['boundaries', 'frontiers']: raise ValueError('Unexpect limits=' + str(limits)) # retrieve relevant wavelength calibration parameters grism_name = rectwv_coeff.tags['grism'] filter_name = rectwv_coeff.tags['filter'] wv_parameters = set_wv_parameters(filter_name, grism_name) naxis1_enlarged = wv_parameters['naxis1_enlarged'] crpix1_enlarged = wv_parameters['crpix1_enlarged'] crval1_enlarged = wv_parameters['crval1_enlarged'] cdelt1_enlarged = wv_parameters['cdelt1_enlarged'] ds9_output = '# Region file format: DS9 version 4.1\n' \ 'global color=green dashlist=2 4 width=2 ' \ 'font="helvetica 10 normal roman" select=1 ' \ 'highlite=1 dash=1 fixed=0 edit=1 ' \ 'move=1 delete=1 include=1 source=1\nphysical\n#\n' ds9_output += '#\n# uuid (rectwv_coeff): {0}\n'.format(rectwv_coeff.uuid) ds9_output += \ '# grism...............: {0}\n'.format(rectwv_coeff.tags['grism']) ds9_output += \ '# filter..............: {0}\n'.format(rectwv_coeff.tags['filter']) for islitlet in range(1, EMIR_NBARS + 1): if islitlet not in rectwv_coeff.missing_slitlets: dumdict = rectwv_coeff.contents[islitlet - 1] if islitlet % 2 == 0: if limits == 'frontiers': colorbox = '#0000ff' # '#ff77ff' else: colorbox = '#ff00ff' # '#ff77ff' else: if limits == 'frontiers': colorbox = '#0000ff' # '#4444ff' else: colorbox = '#00ffff' # '#4444ff' ds9_output += '#\n# islitlet...........: {0}\n'.format(islitlet) ds9_output += '# csu_bar_slit_center: {0}\n'.format( dumdict['csu_bar_slit_center'] ) if rectified: crpix1_linear = 1.0 crval1_linear = dumdict['crval1_linear'] cdelt1_linear = dumdict['cdelt1_linear'] if limits == 'frontiers': ydum_lower = dumdict['y0_frontier_lower_expected'] ydum_upper = dumdict['y0_frontier_upper_expected'] else: ydum_lower = dumdict['y0_reference_lower_expected'] ydum_upper = dumdict['y0_reference_upper_expected'] wave_ini = crval1_linear + \ (0.5 - crpix1_linear) * cdelt1_linear xdum_ini = (wave_ini - crval1_enlarged) / cdelt1_enlarged xdum_ini += crpix1_enlarged wave_end = crval1_linear + \ (EMIR_NAXIS1 + 0.5 - crpix1_linear) * cdelt1_linear xdum_end = (wave_end - crval1_enlarged) / cdelt1_enlarged xdum_end += crpix1_enlarged for ydum in [ydum_lower, ydum_upper]: ds9_output += \ 'line {0} {1} {2} {3}'.format( xdum_ini, ydum, xdum_end, ydum ) ds9_output += ' # color={0}\n'.format(colorbox) # slitlet label ydum_label = (ydum_lower + ydum_upper) / 2.0 xdum_label = EMIR_NAXIS1 / 2 + 0.5 wave_center = crval1_linear + \ (xdum_label - crpix1_linear) * cdelt1_linear xdum_label = (wave_center - crval1_enlarged) / cdelt1_enlarged xdum_label += crpix1_enlarged ds9_output += 'text {0} {1} {{{2}}} # color={3} ' \ 'font="helvetica 10 bold ' \ 'roman"\n'.format(xdum_label, ydum_label, islitlet, colorbox) else: if limits == 'frontiers': pol_lower = Polynomial( dumdict['frontier']['poly_coef_lower'] ) pol_upper = Polynomial( dumdict['frontier']['poly_coef_upper'] ) else: pol_lower = Polynomial( dumdict['spectrail']['poly_coef_lower'] ) pol_upper = Polynomial( dumdict['spectrail']['poly_coef_upper'] ) xdum = np.linspace(1, EMIR_NAXIS1, num=numpix) ydum = pol_lower(xdum) for i in range(len(xdum) - 1): ds9_output += \ 'line {0} {1} {2} {3}'.format( xdum[i], ydum[i], xdum[i + 1], ydum[i + 1] ) ds9_output += ' # color={0}\n'.format(colorbox) ydum = pol_upper(xdum) for i in range(len(xdum) - 1): ds9_output += \ 'line {0} {1} {2} {3}'.format( xdum[i], ydum[i], xdum[i + 1], ydum[i + 1] ) ds9_output += ' # color={0}\n'.format(colorbox) # slitlet label xdum_label = EMIR_NAXIS1 / 2 + 0.5 ydum_lower = pol_lower(xdum_label) ydum_upper = pol_upper(xdum_label) ydum_label = (ydum_lower + ydum_upper) / 2.0 ds9_output += 'text {0} {1} {{{2}}} # color={3} ' \ 'font="helvetica 10 bold ' \ 'roman"\n'.format(xdum_label, ydum_label, islitlet, colorbox) return ds9_output
python
def rectwv_coeff_to_ds9(rectwv_coeff, limits=None, rectified=False, numpix=100): """Generate ds9 region output with requested slitlet limits Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. limits : str Region to be saved: the only two possibilities are 'boundaries' and 'frontiers'. rectified : bool If True, the regions correspond to the rectified image. numpix : int Number of points in which the X-range interval is subdivided in order to save each boundary as a connected set of line segments. This option is only relevant when rectified=False. Returns ------- output : str String containing the full output to be exported as a ds9 region file. """ # protections if limits not in ['boundaries', 'frontiers']: raise ValueError('Unexpect limits=' + str(limits)) # retrieve relevant wavelength calibration parameters grism_name = rectwv_coeff.tags['grism'] filter_name = rectwv_coeff.tags['filter'] wv_parameters = set_wv_parameters(filter_name, grism_name) naxis1_enlarged = wv_parameters['naxis1_enlarged'] crpix1_enlarged = wv_parameters['crpix1_enlarged'] crval1_enlarged = wv_parameters['crval1_enlarged'] cdelt1_enlarged = wv_parameters['cdelt1_enlarged'] ds9_output = '# Region file format: DS9 version 4.1\n' \ 'global color=green dashlist=2 4 width=2 ' \ 'font="helvetica 10 normal roman" select=1 ' \ 'highlite=1 dash=1 fixed=0 edit=1 ' \ 'move=1 delete=1 include=1 source=1\nphysical\n#\n' ds9_output += '#\n# uuid (rectwv_coeff): {0}\n'.format(rectwv_coeff.uuid) ds9_output += \ '# grism...............: {0}\n'.format(rectwv_coeff.tags['grism']) ds9_output += \ '# filter..............: {0}\n'.format(rectwv_coeff.tags['filter']) for islitlet in range(1, EMIR_NBARS + 1): if islitlet not in rectwv_coeff.missing_slitlets: dumdict = rectwv_coeff.contents[islitlet - 1] if islitlet % 2 == 0: if limits == 'frontiers': colorbox = '#0000ff' # '#ff77ff' else: colorbox = '#ff00ff' # '#ff77ff' else: if limits == 'frontiers': colorbox = '#0000ff' # '#4444ff' else: colorbox = '#00ffff' # '#4444ff' ds9_output += '#\n# islitlet...........: {0}\n'.format(islitlet) ds9_output += '# csu_bar_slit_center: {0}\n'.format( dumdict['csu_bar_slit_center'] ) if rectified: crpix1_linear = 1.0 crval1_linear = dumdict['crval1_linear'] cdelt1_linear = dumdict['cdelt1_linear'] if limits == 'frontiers': ydum_lower = dumdict['y0_frontier_lower_expected'] ydum_upper = dumdict['y0_frontier_upper_expected'] else: ydum_lower = dumdict['y0_reference_lower_expected'] ydum_upper = dumdict['y0_reference_upper_expected'] wave_ini = crval1_linear + \ (0.5 - crpix1_linear) * cdelt1_linear xdum_ini = (wave_ini - crval1_enlarged) / cdelt1_enlarged xdum_ini += crpix1_enlarged wave_end = crval1_linear + \ (EMIR_NAXIS1 + 0.5 - crpix1_linear) * cdelt1_linear xdum_end = (wave_end - crval1_enlarged) / cdelt1_enlarged xdum_end += crpix1_enlarged for ydum in [ydum_lower, ydum_upper]: ds9_output += \ 'line {0} {1} {2} {3}'.format( xdum_ini, ydum, xdum_end, ydum ) ds9_output += ' # color={0}\n'.format(colorbox) # slitlet label ydum_label = (ydum_lower + ydum_upper) / 2.0 xdum_label = EMIR_NAXIS1 / 2 + 0.5 wave_center = crval1_linear + \ (xdum_label - crpix1_linear) * cdelt1_linear xdum_label = (wave_center - crval1_enlarged) / cdelt1_enlarged xdum_label += crpix1_enlarged ds9_output += 'text {0} {1} {{{2}}} # color={3} ' \ 'font="helvetica 10 bold ' \ 'roman"\n'.format(xdum_label, ydum_label, islitlet, colorbox) else: if limits == 'frontiers': pol_lower = Polynomial( dumdict['frontier']['poly_coef_lower'] ) pol_upper = Polynomial( dumdict['frontier']['poly_coef_upper'] ) else: pol_lower = Polynomial( dumdict['spectrail']['poly_coef_lower'] ) pol_upper = Polynomial( dumdict['spectrail']['poly_coef_upper'] ) xdum = np.linspace(1, EMIR_NAXIS1, num=numpix) ydum = pol_lower(xdum) for i in range(len(xdum) - 1): ds9_output += \ 'line {0} {1} {2} {3}'.format( xdum[i], ydum[i], xdum[i + 1], ydum[i + 1] ) ds9_output += ' # color={0}\n'.format(colorbox) ydum = pol_upper(xdum) for i in range(len(xdum) - 1): ds9_output += \ 'line {0} {1} {2} {3}'.format( xdum[i], ydum[i], xdum[i + 1], ydum[i + 1] ) ds9_output += ' # color={0}\n'.format(colorbox) # slitlet label xdum_label = EMIR_NAXIS1 / 2 + 0.5 ydum_lower = pol_lower(xdum_label) ydum_upper = pol_upper(xdum_label) ydum_label = (ydum_lower + ydum_upper) / 2.0 ds9_output += 'text {0} {1} {{{2}}} # color={3} ' \ 'font="helvetica 10 bold ' \ 'roman"\n'.format(xdum_label, ydum_label, islitlet, colorbox) return ds9_output
[ "def", "rectwv_coeff_to_ds9", "(", "rectwv_coeff", ",", "limits", "=", "None", ",", "rectified", "=", "False", ",", "numpix", "=", "100", ")", ":", "# protections", "if", "limits", "not", "in", "[", "'boundaries'", ",", "'frontiers'", "]", ":", "raise", "ValueError", "(", "'Unexpect limits='", "+", "str", "(", "limits", ")", ")", "# retrieve relevant wavelength calibration parameters", "grism_name", "=", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", "filter_name", "=", "rectwv_coeff", ".", "tags", "[", "'filter'", "]", "wv_parameters", "=", "set_wv_parameters", "(", "filter_name", ",", "grism_name", ")", "naxis1_enlarged", "=", "wv_parameters", "[", "'naxis1_enlarged'", "]", "crpix1_enlarged", "=", "wv_parameters", "[", "'crpix1_enlarged'", "]", "crval1_enlarged", "=", "wv_parameters", "[", "'crval1_enlarged'", "]", "cdelt1_enlarged", "=", "wv_parameters", "[", "'cdelt1_enlarged'", "]", "ds9_output", "=", "'# Region file format: DS9 version 4.1\\n'", "'global color=green dashlist=2 4 width=2 '", "'font=\"helvetica 10 normal roman\" select=1 '", "'highlite=1 dash=1 fixed=0 edit=1 '", "'move=1 delete=1 include=1 source=1\\nphysical\\n#\\n'", "ds9_output", "+=", "'#\\n# uuid (rectwv_coeff): {0}\\n'", ".", "format", "(", "rectwv_coeff", ".", "uuid", ")", "ds9_output", "+=", "'# grism...............: {0}\\n'", ".", "format", "(", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", ")", "ds9_output", "+=", "'# filter..............: {0}\\n'", ".", "format", "(", "rectwv_coeff", ".", "tags", "[", "'filter'", "]", ")", "for", "islitlet", "in", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ":", "if", "islitlet", "not", "in", "rectwv_coeff", ".", "missing_slitlets", ":", "dumdict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "if", "islitlet", "%", "2", "==", "0", ":", "if", "limits", "==", "'frontiers'", ":", "colorbox", "=", "'#0000ff'", "# '#ff77ff'", "else", ":", "colorbox", "=", "'#ff00ff'", "# '#ff77ff'", "else", ":", "if", "limits", "==", "'frontiers'", ":", "colorbox", "=", "'#0000ff'", "# '#4444ff'", "else", ":", "colorbox", "=", "'#00ffff'", "# '#4444ff'", "ds9_output", "+=", "'#\\n# islitlet...........: {0}\\n'", ".", "format", "(", "islitlet", ")", "ds9_output", "+=", "'# csu_bar_slit_center: {0}\\n'", ".", "format", "(", "dumdict", "[", "'csu_bar_slit_center'", "]", ")", "if", "rectified", ":", "crpix1_linear", "=", "1.0", "crval1_linear", "=", "dumdict", "[", "'crval1_linear'", "]", "cdelt1_linear", "=", "dumdict", "[", "'cdelt1_linear'", "]", "if", "limits", "==", "'frontiers'", ":", "ydum_lower", "=", "dumdict", "[", "'y0_frontier_lower_expected'", "]", "ydum_upper", "=", "dumdict", "[", "'y0_frontier_upper_expected'", "]", "else", ":", "ydum_lower", "=", "dumdict", "[", "'y0_reference_lower_expected'", "]", "ydum_upper", "=", "dumdict", "[", "'y0_reference_upper_expected'", "]", "wave_ini", "=", "crval1_linear", "+", "(", "0.5", "-", "crpix1_linear", ")", "*", "cdelt1_linear", "xdum_ini", "=", "(", "wave_ini", "-", "crval1_enlarged", ")", "/", "cdelt1_enlarged", "xdum_ini", "+=", "crpix1_enlarged", "wave_end", "=", "crval1_linear", "+", "(", "EMIR_NAXIS1", "+", "0.5", "-", "crpix1_linear", ")", "*", "cdelt1_linear", "xdum_end", "=", "(", "wave_end", "-", "crval1_enlarged", ")", "/", "cdelt1_enlarged", "xdum_end", "+=", "crpix1_enlarged", "for", "ydum", "in", "[", "ydum_lower", ",", "ydum_upper", "]", ":", "ds9_output", "+=", "'line {0} {1} {2} {3}'", ".", "format", "(", "xdum_ini", ",", "ydum", ",", "xdum_end", ",", "ydum", ")", "ds9_output", "+=", "' # color={0}\\n'", ".", "format", "(", "colorbox", ")", "# slitlet label", "ydum_label", "=", "(", "ydum_lower", "+", "ydum_upper", ")", "/", "2.0", "xdum_label", "=", "EMIR_NAXIS1", "/", "2", "+", "0.5", "wave_center", "=", "crval1_linear", "+", "(", "xdum_label", "-", "crpix1_linear", ")", "*", "cdelt1_linear", "xdum_label", "=", "(", "wave_center", "-", "crval1_enlarged", ")", "/", "cdelt1_enlarged", "xdum_label", "+=", "crpix1_enlarged", "ds9_output", "+=", "'text {0} {1} {{{2}}} # color={3} '", "'font=\"helvetica 10 bold '", "'roman\"\\n'", ".", "format", "(", "xdum_label", ",", "ydum_label", ",", "islitlet", ",", "colorbox", ")", "else", ":", "if", "limits", "==", "'frontiers'", ":", "pol_lower", "=", "Polynomial", "(", "dumdict", "[", "'frontier'", "]", "[", "'poly_coef_lower'", "]", ")", "pol_upper", "=", "Polynomial", "(", "dumdict", "[", "'frontier'", "]", "[", "'poly_coef_upper'", "]", ")", "else", ":", "pol_lower", "=", "Polynomial", "(", "dumdict", "[", "'spectrail'", "]", "[", "'poly_coef_lower'", "]", ")", "pol_upper", "=", "Polynomial", "(", "dumdict", "[", "'spectrail'", "]", "[", "'poly_coef_upper'", "]", ")", "xdum", "=", "np", ".", "linspace", "(", "1", ",", "EMIR_NAXIS1", ",", "num", "=", "numpix", ")", "ydum", "=", "pol_lower", "(", "xdum", ")", "for", "i", "in", "range", "(", "len", "(", "xdum", ")", "-", "1", ")", ":", "ds9_output", "+=", "'line {0} {1} {2} {3}'", ".", "format", "(", "xdum", "[", "i", "]", ",", "ydum", "[", "i", "]", ",", "xdum", "[", "i", "+", "1", "]", ",", "ydum", "[", "i", "+", "1", "]", ")", "ds9_output", "+=", "' # color={0}\\n'", ".", "format", "(", "colorbox", ")", "ydum", "=", "pol_upper", "(", "xdum", ")", "for", "i", "in", "range", "(", "len", "(", "xdum", ")", "-", "1", ")", ":", "ds9_output", "+=", "'line {0} {1} {2} {3}'", ".", "format", "(", "xdum", "[", "i", "]", ",", "ydum", "[", "i", "]", ",", "xdum", "[", "i", "+", "1", "]", ",", "ydum", "[", "i", "+", "1", "]", ")", "ds9_output", "+=", "' # color={0}\\n'", ".", "format", "(", "colorbox", ")", "# slitlet label", "xdum_label", "=", "EMIR_NAXIS1", "/", "2", "+", "0.5", "ydum_lower", "=", "pol_lower", "(", "xdum_label", ")", "ydum_upper", "=", "pol_upper", "(", "xdum_label", ")", "ydum_label", "=", "(", "ydum_lower", "+", "ydum_upper", ")", "/", "2.0", "ds9_output", "+=", "'text {0} {1} {{{2}}} # color={3} '", "'font=\"helvetica 10 bold '", "'roman\"\\n'", ".", "format", "(", "xdum_label", ",", "ydum_label", ",", "islitlet", ",", "colorbox", ")", "return", "ds9_output" ]
Generate ds9 region output with requested slitlet limits Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. limits : str Region to be saved: the only two possibilities are 'boundaries' and 'frontiers'. rectified : bool If True, the regions correspond to the rectified image. numpix : int Number of points in which the X-range interval is subdivided in order to save each boundary as a connected set of line segments. This option is only relevant when rectified=False. Returns ------- output : str String containing the full output to be exported as a ds9 region file.
[ "Generate", "ds9", "region", "output", "with", "requested", "slitlet", "limits" ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py#L87-L236
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py
save_spectral_lines_ds9
def save_spectral_lines_ds9(rectwv_coeff, debugplot=0): """Save expected location of arc and OH airglow to ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. """ for spectral_lines, rectified, suffix in zip( ['arc', 'arc', 'oh', 'oh'], [False, True, False, True], ['rawimage', 'rectified', 'rawimage', 'rectified'] ): output = spectral_lines_to_ds9(rectwv_coeff=rectwv_coeff, spectral_lines=spectral_lines, rectified=rectified) filename = 'ds9_' + spectral_lines + '_' + suffix + '.reg' if abs(debugplot) >= 10: print('>>> Saving: ', filename) save_ds9(output, filename)
python
def save_spectral_lines_ds9(rectwv_coeff, debugplot=0): """Save expected location of arc and OH airglow to ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'. """ for spectral_lines, rectified, suffix in zip( ['arc', 'arc', 'oh', 'oh'], [False, True, False, True], ['rawimage', 'rectified', 'rawimage', 'rectified'] ): output = spectral_lines_to_ds9(rectwv_coeff=rectwv_coeff, spectral_lines=spectral_lines, rectified=rectified) filename = 'ds9_' + spectral_lines + '_' + suffix + '.reg' if abs(debugplot) >= 10: print('>>> Saving: ', filename) save_ds9(output, filename)
[ "def", "save_spectral_lines_ds9", "(", "rectwv_coeff", ",", "debugplot", "=", "0", ")", ":", "for", "spectral_lines", ",", "rectified", ",", "suffix", "in", "zip", "(", "[", "'arc'", ",", "'arc'", ",", "'oh'", ",", "'oh'", "]", ",", "[", "False", ",", "True", ",", "False", ",", "True", "]", ",", "[", "'rawimage'", ",", "'rectified'", ",", "'rawimage'", ",", "'rectified'", "]", ")", ":", "output", "=", "spectral_lines_to_ds9", "(", "rectwv_coeff", "=", "rectwv_coeff", ",", "spectral_lines", "=", "spectral_lines", ",", "rectified", "=", "rectified", ")", "filename", "=", "'ds9_'", "+", "spectral_lines", "+", "'_'", "+", "suffix", "+", "'.reg'", "if", "abs", "(", "debugplot", ")", ">=", "10", ":", "print", "(", "'>>> Saving: '", ",", "filename", ")", "save_ds9", "(", "output", ",", "filename", ")" ]
Save expected location of arc and OH airglow to ds9 region files. Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. debugplot : int Debugging level for messages and plots. For details see 'numina.array.display.pause_debugplot.py'.
[ "Save", "expected", "location", "of", "arc", "and", "OH", "airglow", "to", "ds9", "region", "files", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py#L239-L264
guaix-ucm/pyemir
emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py
spectral_lines_to_ds9
def spectral_lines_to_ds9(rectwv_coeff, spectral_lines=None, rectified=False): """Generate ds9 region output with requested spectral lines Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. spectral_lines : str Spectral lines to be saved: the only two possibilities are 'arc' and 'oh'. rectified : bool If True, the regions correspond to the rectified image. Returns ------- output : str String containing the full output to be exported as a ds9 region file. """ # protections if spectral_lines not in ['arc', 'oh']: raise ValueError('Unexpected spectral lines=' + str(spectral_lines)) if spectral_lines == 'arc': grism_name = rectwv_coeff.tags['grism'] if grism_name == 'LR': catlines_file = 'lines_argon_neon_xenon_empirical_LR.dat' else: catlines_file = 'lines_argon_neon_xenon_empirical.dat' dumdata = pkgutil.get_data('emirdrp.instrument.configs', catlines_file) arc_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(arc_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = catlines[:, 0] catlines_all_flux = catlines[:, 1] elif spectral_lines == 'oh': dumdata = pkgutil.get_data('emirdrp.instrument.configs', 'Oliva_etal_2013.dat') oh_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(oh_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0])) catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2])) else: raise ValueError('This should not happen!') # retrieve relevant wavelength calibration parameters grism_name = rectwv_coeff.tags['grism'] filter_name = rectwv_coeff.tags['filter'] wv_parameters = set_wv_parameters(filter_name, grism_name) naxis1_enlarged = wv_parameters['naxis1_enlarged'] crpix1_enlarged = wv_parameters['crpix1_enlarged'] crval1_enlarged = wv_parameters['crval1_enlarged'] cdelt1_enlarged = wv_parameters['cdelt1_enlarged'] ds9_output = '# Region file format: DS9 version 4.1\n' \ 'global color=#00ffff dashlist=0 0 width=2 ' \ 'font="helvetica 10 normal roman" select=1 ' \ 'highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 ' \ 'include=1 source=1\nphysical\n' ds9_output += '#\n# uuid (rectwv_coeff): {0}\n'.format( rectwv_coeff.uuid) ds9_output += \ '# grism...............: {0}\n'.format(rectwv_coeff.tags['grism']) ds9_output += \ '# filter..............: {0}\n'.format(rectwv_coeff.tags['filter']) global_integer_offset_x_pix = rectwv_coeff.global_integer_offset_x_pix global_integer_offset_y_pix = rectwv_coeff.global_integer_offset_y_pix for islitlet in range(1, EMIR_NBARS + 1): if islitlet not in rectwv_coeff.missing_slitlets: dumdict = rectwv_coeff.contents[islitlet - 1] if islitlet % 2 == 0: colorbox = '#ff00ff' # '#ff77ff' else: colorbox = '#00ffff' # '#4444ff' ds9_output += '#\n# islitlet...........: {0}\n'.format( islitlet) ds9_output += '# csu_bar_slit_center: {0}\n'.format( dumdict['csu_bar_slit_center'] ) crpix1_linear = 1.0 crval1_linear = dumdict['crval1_linear'] cdelt1_linear = dumdict['cdelt1_linear'] wave_ini = crval1_linear + \ (0.5 - crpix1_linear) * cdelt1_linear wave_end = crval1_linear + \ (EMIR_NAXIS1 + 0.5 - crpix1_linear) * cdelt1_linear if rectified: ydum_lower = dumdict['y0_reference_lower_expected'] ydum_upper = dumdict['y0_reference_upper_expected'] # spectral lines for wave in catlines_all_wave: if wave_ini <= wave <= wave_end: xdum = (wave - crval1_enlarged) / cdelt1_enlarged xdum += crpix1_enlarged ds9_output += \ 'line {0} {1} {2} {3}'.format( xdum, ydum_lower, xdum, ydum_upper ) ds9_output += ' # color={0}\n'.format(colorbox) # slitlet label ydum_label = (ydum_lower + ydum_upper) / 2.0 xdum_label = EMIR_NAXIS1 / 2 + 0.5 wave_center = crval1_linear + \ (xdum_label - crpix1_linear) * cdelt1_linear xdum_label = (wave_center - crval1_enlarged) / cdelt1_enlarged xdum_label += crpix1_enlarged ds9_output += 'text {0} {1} {{{2}}} # color={3} ' \ 'font="helvetica 10 bold ' \ 'roman"\n'.format(xdum_label, ydum_label, islitlet, colorbox) else: bb_ns1_orig = dumdict['bb_ns1_orig'] ttd_order = dumdict['ttd_order'] aij = dumdict['ttd_aij'] bij = dumdict['ttd_bij'] min_row_rectified = float(dumdict['min_row_rectified']) max_row_rectified = float(dumdict['max_row_rectified']) mean_row_rectified = (min_row_rectified + max_row_rectified)/2 wpoly_coeff = dumdict['wpoly_coeff'] x0 = [] y0 = [] x1 = [] y1 = [] x2 = [] y2 = [] # spectral lines for wave in catlines_all_wave: if wave_ini <= wave <= wave_end: tmp_coeff = np.copy(wpoly_coeff) tmp_coeff[0] -= wave tmp_xroots = np.polynomial.Polynomial( tmp_coeff).roots() for dum in tmp_xroots: if np.isreal(dum): dum = dum.real if 1 <= dum <= EMIR_NAXIS1: x0.append(dum) y0.append(mean_row_rectified) x1.append(dum) y1.append(min_row_rectified) x2.append(dum) y2.append(max_row_rectified) pass xx0, yy0 = fmap(ttd_order, aij, bij, np.array(x0), np.array(y0)) xx0 -= global_integer_offset_x_pix yy0 += bb_ns1_orig yy0 -= global_integer_offset_y_pix xx1, yy1 = fmap(ttd_order, aij, bij, np.array(x1), np.array(y1)) xx1 -= global_integer_offset_x_pix yy1 += bb_ns1_orig yy1 -= global_integer_offset_y_pix xx2, yy2 = fmap(ttd_order, aij, bij, np.array(x2), np.array(y2)) xx2 -= global_integer_offset_x_pix yy2 += bb_ns1_orig yy2 -= global_integer_offset_y_pix for xx1_, xx2_, yy1_, yy2_ in zip(xx1, xx2, yy1, yy2): ds9_output += \ 'line {0} {1} {2} {3}'.format( xx1_, yy1_, xx2_, yy2_ ) ds9_output += ' # color={0}\n'.format(colorbox) # slitlet label pol_lower = Polynomial(dumdict['spectrail']['poly_coef_lower']) pol_upper = Polynomial(dumdict['spectrail']['poly_coef_upper']) xdum_label = EMIR_NAXIS1 / 2 + 0.5 ydum_lower = pol_lower(xdum_label) ydum_upper = pol_upper(xdum_label) ydum_label = (ydum_lower + ydum_upper) / 2.0 ds9_output += 'text {0} {1} {{{2}}} # color={3} ' \ 'font="helvetica 10 bold ' \ 'roman"\n'.format(xdum_label, ydum_label, islitlet, colorbox) return ds9_output
python
def spectral_lines_to_ds9(rectwv_coeff, spectral_lines=None, rectified=False): """Generate ds9 region output with requested spectral lines Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. spectral_lines : str Spectral lines to be saved: the only two possibilities are 'arc' and 'oh'. rectified : bool If True, the regions correspond to the rectified image. Returns ------- output : str String containing the full output to be exported as a ds9 region file. """ # protections if spectral_lines not in ['arc', 'oh']: raise ValueError('Unexpected spectral lines=' + str(spectral_lines)) if spectral_lines == 'arc': grism_name = rectwv_coeff.tags['grism'] if grism_name == 'LR': catlines_file = 'lines_argon_neon_xenon_empirical_LR.dat' else: catlines_file = 'lines_argon_neon_xenon_empirical.dat' dumdata = pkgutil.get_data('emirdrp.instrument.configs', catlines_file) arc_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(arc_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = catlines[:, 0] catlines_all_flux = catlines[:, 1] elif spectral_lines == 'oh': dumdata = pkgutil.get_data('emirdrp.instrument.configs', 'Oliva_etal_2013.dat') oh_lines_tmpfile = StringIO(dumdata.decode('utf8')) catlines = np.genfromtxt(oh_lines_tmpfile) # define wavelength and flux as separate arrays catlines_all_wave = np.concatenate((catlines[:, 1], catlines[:, 0])) catlines_all_flux = np.concatenate((catlines[:, 2], catlines[:, 2])) else: raise ValueError('This should not happen!') # retrieve relevant wavelength calibration parameters grism_name = rectwv_coeff.tags['grism'] filter_name = rectwv_coeff.tags['filter'] wv_parameters = set_wv_parameters(filter_name, grism_name) naxis1_enlarged = wv_parameters['naxis1_enlarged'] crpix1_enlarged = wv_parameters['crpix1_enlarged'] crval1_enlarged = wv_parameters['crval1_enlarged'] cdelt1_enlarged = wv_parameters['cdelt1_enlarged'] ds9_output = '# Region file format: DS9 version 4.1\n' \ 'global color=#00ffff dashlist=0 0 width=2 ' \ 'font="helvetica 10 normal roman" select=1 ' \ 'highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 ' \ 'include=1 source=1\nphysical\n' ds9_output += '#\n# uuid (rectwv_coeff): {0}\n'.format( rectwv_coeff.uuid) ds9_output += \ '# grism...............: {0}\n'.format(rectwv_coeff.tags['grism']) ds9_output += \ '# filter..............: {0}\n'.format(rectwv_coeff.tags['filter']) global_integer_offset_x_pix = rectwv_coeff.global_integer_offset_x_pix global_integer_offset_y_pix = rectwv_coeff.global_integer_offset_y_pix for islitlet in range(1, EMIR_NBARS + 1): if islitlet not in rectwv_coeff.missing_slitlets: dumdict = rectwv_coeff.contents[islitlet - 1] if islitlet % 2 == 0: colorbox = '#ff00ff' # '#ff77ff' else: colorbox = '#00ffff' # '#4444ff' ds9_output += '#\n# islitlet...........: {0}\n'.format( islitlet) ds9_output += '# csu_bar_slit_center: {0}\n'.format( dumdict['csu_bar_slit_center'] ) crpix1_linear = 1.0 crval1_linear = dumdict['crval1_linear'] cdelt1_linear = dumdict['cdelt1_linear'] wave_ini = crval1_linear + \ (0.5 - crpix1_linear) * cdelt1_linear wave_end = crval1_linear + \ (EMIR_NAXIS1 + 0.5 - crpix1_linear) * cdelt1_linear if rectified: ydum_lower = dumdict['y0_reference_lower_expected'] ydum_upper = dumdict['y0_reference_upper_expected'] # spectral lines for wave in catlines_all_wave: if wave_ini <= wave <= wave_end: xdum = (wave - crval1_enlarged) / cdelt1_enlarged xdum += crpix1_enlarged ds9_output += \ 'line {0} {1} {2} {3}'.format( xdum, ydum_lower, xdum, ydum_upper ) ds9_output += ' # color={0}\n'.format(colorbox) # slitlet label ydum_label = (ydum_lower + ydum_upper) / 2.0 xdum_label = EMIR_NAXIS1 / 2 + 0.5 wave_center = crval1_linear + \ (xdum_label - crpix1_linear) * cdelt1_linear xdum_label = (wave_center - crval1_enlarged) / cdelt1_enlarged xdum_label += crpix1_enlarged ds9_output += 'text {0} {1} {{{2}}} # color={3} ' \ 'font="helvetica 10 bold ' \ 'roman"\n'.format(xdum_label, ydum_label, islitlet, colorbox) else: bb_ns1_orig = dumdict['bb_ns1_orig'] ttd_order = dumdict['ttd_order'] aij = dumdict['ttd_aij'] bij = dumdict['ttd_bij'] min_row_rectified = float(dumdict['min_row_rectified']) max_row_rectified = float(dumdict['max_row_rectified']) mean_row_rectified = (min_row_rectified + max_row_rectified)/2 wpoly_coeff = dumdict['wpoly_coeff'] x0 = [] y0 = [] x1 = [] y1 = [] x2 = [] y2 = [] # spectral lines for wave in catlines_all_wave: if wave_ini <= wave <= wave_end: tmp_coeff = np.copy(wpoly_coeff) tmp_coeff[0] -= wave tmp_xroots = np.polynomial.Polynomial( tmp_coeff).roots() for dum in tmp_xroots: if np.isreal(dum): dum = dum.real if 1 <= dum <= EMIR_NAXIS1: x0.append(dum) y0.append(mean_row_rectified) x1.append(dum) y1.append(min_row_rectified) x2.append(dum) y2.append(max_row_rectified) pass xx0, yy0 = fmap(ttd_order, aij, bij, np.array(x0), np.array(y0)) xx0 -= global_integer_offset_x_pix yy0 += bb_ns1_orig yy0 -= global_integer_offset_y_pix xx1, yy1 = fmap(ttd_order, aij, bij, np.array(x1), np.array(y1)) xx1 -= global_integer_offset_x_pix yy1 += bb_ns1_orig yy1 -= global_integer_offset_y_pix xx2, yy2 = fmap(ttd_order, aij, bij, np.array(x2), np.array(y2)) xx2 -= global_integer_offset_x_pix yy2 += bb_ns1_orig yy2 -= global_integer_offset_y_pix for xx1_, xx2_, yy1_, yy2_ in zip(xx1, xx2, yy1, yy2): ds9_output += \ 'line {0} {1} {2} {3}'.format( xx1_, yy1_, xx2_, yy2_ ) ds9_output += ' # color={0}\n'.format(colorbox) # slitlet label pol_lower = Polynomial(dumdict['spectrail']['poly_coef_lower']) pol_upper = Polynomial(dumdict['spectrail']['poly_coef_upper']) xdum_label = EMIR_NAXIS1 / 2 + 0.5 ydum_lower = pol_lower(xdum_label) ydum_upper = pol_upper(xdum_label) ydum_label = (ydum_lower + ydum_upper) / 2.0 ds9_output += 'text {0} {1} {{{2}}} # color={3} ' \ 'font="helvetica 10 bold ' \ 'roman"\n'.format(xdum_label, ydum_label, islitlet, colorbox) return ds9_output
[ "def", "spectral_lines_to_ds9", "(", "rectwv_coeff", ",", "spectral_lines", "=", "None", ",", "rectified", "=", "False", ")", ":", "# protections", "if", "spectral_lines", "not", "in", "[", "'arc'", ",", "'oh'", "]", ":", "raise", "ValueError", "(", "'Unexpected spectral lines='", "+", "str", "(", "spectral_lines", ")", ")", "if", "spectral_lines", "==", "'arc'", ":", "grism_name", "=", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", "if", "grism_name", "==", "'LR'", ":", "catlines_file", "=", "'lines_argon_neon_xenon_empirical_LR.dat'", "else", ":", "catlines_file", "=", "'lines_argon_neon_xenon_empirical.dat'", "dumdata", "=", "pkgutil", ".", "get_data", "(", "'emirdrp.instrument.configs'", ",", "catlines_file", ")", "arc_lines_tmpfile", "=", "StringIO", "(", "dumdata", ".", "decode", "(", "'utf8'", ")", ")", "catlines", "=", "np", ".", "genfromtxt", "(", "arc_lines_tmpfile", ")", "# define wavelength and flux as separate arrays", "catlines_all_wave", "=", "catlines", "[", ":", ",", "0", "]", "catlines_all_flux", "=", "catlines", "[", ":", ",", "1", "]", "elif", "spectral_lines", "==", "'oh'", ":", "dumdata", "=", "pkgutil", ".", "get_data", "(", "'emirdrp.instrument.configs'", ",", "'Oliva_etal_2013.dat'", ")", "oh_lines_tmpfile", "=", "StringIO", "(", "dumdata", ".", "decode", "(", "'utf8'", ")", ")", "catlines", "=", "np", ".", "genfromtxt", "(", "oh_lines_tmpfile", ")", "# define wavelength and flux as separate arrays", "catlines_all_wave", "=", "np", ".", "concatenate", "(", "(", "catlines", "[", ":", ",", "1", "]", ",", "catlines", "[", ":", ",", "0", "]", ")", ")", "catlines_all_flux", "=", "np", ".", "concatenate", "(", "(", "catlines", "[", ":", ",", "2", "]", ",", "catlines", "[", ":", ",", "2", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "'This should not happen!'", ")", "# retrieve relevant wavelength calibration parameters", "grism_name", "=", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", "filter_name", "=", "rectwv_coeff", ".", "tags", "[", "'filter'", "]", "wv_parameters", "=", "set_wv_parameters", "(", "filter_name", ",", "grism_name", ")", "naxis1_enlarged", "=", "wv_parameters", "[", "'naxis1_enlarged'", "]", "crpix1_enlarged", "=", "wv_parameters", "[", "'crpix1_enlarged'", "]", "crval1_enlarged", "=", "wv_parameters", "[", "'crval1_enlarged'", "]", "cdelt1_enlarged", "=", "wv_parameters", "[", "'cdelt1_enlarged'", "]", "ds9_output", "=", "'# Region file format: DS9 version 4.1\\n'", "'global color=#00ffff dashlist=0 0 width=2 '", "'font=\"helvetica 10 normal roman\" select=1 '", "'highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 '", "'include=1 source=1\\nphysical\\n'", "ds9_output", "+=", "'#\\n# uuid (rectwv_coeff): {0}\\n'", ".", "format", "(", "rectwv_coeff", ".", "uuid", ")", "ds9_output", "+=", "'# grism...............: {0}\\n'", ".", "format", "(", "rectwv_coeff", ".", "tags", "[", "'grism'", "]", ")", "ds9_output", "+=", "'# filter..............: {0}\\n'", ".", "format", "(", "rectwv_coeff", ".", "tags", "[", "'filter'", "]", ")", "global_integer_offset_x_pix", "=", "rectwv_coeff", ".", "global_integer_offset_x_pix", "global_integer_offset_y_pix", "=", "rectwv_coeff", ".", "global_integer_offset_y_pix", "for", "islitlet", "in", "range", "(", "1", ",", "EMIR_NBARS", "+", "1", ")", ":", "if", "islitlet", "not", "in", "rectwv_coeff", ".", "missing_slitlets", ":", "dumdict", "=", "rectwv_coeff", ".", "contents", "[", "islitlet", "-", "1", "]", "if", "islitlet", "%", "2", "==", "0", ":", "colorbox", "=", "'#ff00ff'", "# '#ff77ff'", "else", ":", "colorbox", "=", "'#00ffff'", "# '#4444ff'", "ds9_output", "+=", "'#\\n# islitlet...........: {0}\\n'", ".", "format", "(", "islitlet", ")", "ds9_output", "+=", "'# csu_bar_slit_center: {0}\\n'", ".", "format", "(", "dumdict", "[", "'csu_bar_slit_center'", "]", ")", "crpix1_linear", "=", "1.0", "crval1_linear", "=", "dumdict", "[", "'crval1_linear'", "]", "cdelt1_linear", "=", "dumdict", "[", "'cdelt1_linear'", "]", "wave_ini", "=", "crval1_linear", "+", "(", "0.5", "-", "crpix1_linear", ")", "*", "cdelt1_linear", "wave_end", "=", "crval1_linear", "+", "(", "EMIR_NAXIS1", "+", "0.5", "-", "crpix1_linear", ")", "*", "cdelt1_linear", "if", "rectified", ":", "ydum_lower", "=", "dumdict", "[", "'y0_reference_lower_expected'", "]", "ydum_upper", "=", "dumdict", "[", "'y0_reference_upper_expected'", "]", "# spectral lines", "for", "wave", "in", "catlines_all_wave", ":", "if", "wave_ini", "<=", "wave", "<=", "wave_end", ":", "xdum", "=", "(", "wave", "-", "crval1_enlarged", ")", "/", "cdelt1_enlarged", "xdum", "+=", "crpix1_enlarged", "ds9_output", "+=", "'line {0} {1} {2} {3}'", ".", "format", "(", "xdum", ",", "ydum_lower", ",", "xdum", ",", "ydum_upper", ")", "ds9_output", "+=", "' # color={0}\\n'", ".", "format", "(", "colorbox", ")", "# slitlet label", "ydum_label", "=", "(", "ydum_lower", "+", "ydum_upper", ")", "/", "2.0", "xdum_label", "=", "EMIR_NAXIS1", "/", "2", "+", "0.5", "wave_center", "=", "crval1_linear", "+", "(", "xdum_label", "-", "crpix1_linear", ")", "*", "cdelt1_linear", "xdum_label", "=", "(", "wave_center", "-", "crval1_enlarged", ")", "/", "cdelt1_enlarged", "xdum_label", "+=", "crpix1_enlarged", "ds9_output", "+=", "'text {0} {1} {{{2}}} # color={3} '", "'font=\"helvetica 10 bold '", "'roman\"\\n'", ".", "format", "(", "xdum_label", ",", "ydum_label", ",", "islitlet", ",", "colorbox", ")", "else", ":", "bb_ns1_orig", "=", "dumdict", "[", "'bb_ns1_orig'", "]", "ttd_order", "=", "dumdict", "[", "'ttd_order'", "]", "aij", "=", "dumdict", "[", "'ttd_aij'", "]", "bij", "=", "dumdict", "[", "'ttd_bij'", "]", "min_row_rectified", "=", "float", "(", "dumdict", "[", "'min_row_rectified'", "]", ")", "max_row_rectified", "=", "float", "(", "dumdict", "[", "'max_row_rectified'", "]", ")", "mean_row_rectified", "=", "(", "min_row_rectified", "+", "max_row_rectified", ")", "/", "2", "wpoly_coeff", "=", "dumdict", "[", "'wpoly_coeff'", "]", "x0", "=", "[", "]", "y0", "=", "[", "]", "x1", "=", "[", "]", "y1", "=", "[", "]", "x2", "=", "[", "]", "y2", "=", "[", "]", "# spectral lines", "for", "wave", "in", "catlines_all_wave", ":", "if", "wave_ini", "<=", "wave", "<=", "wave_end", ":", "tmp_coeff", "=", "np", ".", "copy", "(", "wpoly_coeff", ")", "tmp_coeff", "[", "0", "]", "-=", "wave", "tmp_xroots", "=", "np", ".", "polynomial", ".", "Polynomial", "(", "tmp_coeff", ")", ".", "roots", "(", ")", "for", "dum", "in", "tmp_xroots", ":", "if", "np", ".", "isreal", "(", "dum", ")", ":", "dum", "=", "dum", ".", "real", "if", "1", "<=", "dum", "<=", "EMIR_NAXIS1", ":", "x0", ".", "append", "(", "dum", ")", "y0", ".", "append", "(", "mean_row_rectified", ")", "x1", ".", "append", "(", "dum", ")", "y1", ".", "append", "(", "min_row_rectified", ")", "x2", ".", "append", "(", "dum", ")", "y2", ".", "append", "(", "max_row_rectified", ")", "pass", "xx0", ",", "yy0", "=", "fmap", "(", "ttd_order", ",", "aij", ",", "bij", ",", "np", ".", "array", "(", "x0", ")", ",", "np", ".", "array", "(", "y0", ")", ")", "xx0", "-=", "global_integer_offset_x_pix", "yy0", "+=", "bb_ns1_orig", "yy0", "-=", "global_integer_offset_y_pix", "xx1", ",", "yy1", "=", "fmap", "(", "ttd_order", ",", "aij", ",", "bij", ",", "np", ".", "array", "(", "x1", ")", ",", "np", ".", "array", "(", "y1", ")", ")", "xx1", "-=", "global_integer_offset_x_pix", "yy1", "+=", "bb_ns1_orig", "yy1", "-=", "global_integer_offset_y_pix", "xx2", ",", "yy2", "=", "fmap", "(", "ttd_order", ",", "aij", ",", "bij", ",", "np", ".", "array", "(", "x2", ")", ",", "np", ".", "array", "(", "y2", ")", ")", "xx2", "-=", "global_integer_offset_x_pix", "yy2", "+=", "bb_ns1_orig", "yy2", "-=", "global_integer_offset_y_pix", "for", "xx1_", ",", "xx2_", ",", "yy1_", ",", "yy2_", "in", "zip", "(", "xx1", ",", "xx2", ",", "yy1", ",", "yy2", ")", ":", "ds9_output", "+=", "'line {0} {1} {2} {3}'", ".", "format", "(", "xx1_", ",", "yy1_", ",", "xx2_", ",", "yy2_", ")", "ds9_output", "+=", "' # color={0}\\n'", ".", "format", "(", "colorbox", ")", "# slitlet label", "pol_lower", "=", "Polynomial", "(", "dumdict", "[", "'spectrail'", "]", "[", "'poly_coef_lower'", "]", ")", "pol_upper", "=", "Polynomial", "(", "dumdict", "[", "'spectrail'", "]", "[", "'poly_coef_upper'", "]", ")", "xdum_label", "=", "EMIR_NAXIS1", "/", "2", "+", "0.5", "ydum_lower", "=", "pol_lower", "(", "xdum_label", ")", "ydum_upper", "=", "pol_upper", "(", "xdum_label", ")", "ydum_label", "=", "(", "ydum_lower", "+", "ydum_upper", ")", "/", "2.0", "ds9_output", "+=", "'text {0} {1} {{{2}}} # color={3} '", "'font=\"helvetica 10 bold '", "'roman\"\\n'", ".", "format", "(", "xdum_label", ",", "ydum_label", ",", "islitlet", ",", "colorbox", ")", "return", "ds9_output" ]
Generate ds9 region output with requested spectral lines Parameters ---------- rectwv_coeff : RectWaveCoeff instance Rectification and wavelength calibration coefficients for the particular CSU configuration. spectral_lines : str Spectral lines to be saved: the only two possibilities are 'arc' and 'oh'. rectified : bool If True, the regions correspond to the rectified image. Returns ------- output : str String containing the full output to be exported as a ds9 region file.
[ "Generate", "ds9", "region", "output", "with", "requested", "spectral", "lines" ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/rectwv_coeff_to_ds9.py#L267-L456
IdentityPython/oidcendpoint
src/oidcendpoint/endpoint_context.py
EndpointContext.create_providerinfo
def create_providerinfo(self, capabilities): """ Dynamically create the provider info response :param capabilities: :return: """ _pinfo = self.package_capabilities() not_supported = {} for key, val in capabilities.items(): try: allowed = _pinfo[key] except KeyError: _pinfo[key] = val else: if isinstance(allowed, bool): if allowed is False: if val is True: not_supported[key] = True else: _pinfo[key] = val elif isinstance(allowed, str): if val != allowed: not_supported[key] = val elif isinstance(allowed, list): if isinstance(val, str): sv = {val} else: try: sv = set(val) except TypeError: if key == 'response_types_supported': sv = set() for v in val: v.sort() sv.add(' '.join(v)) else: raise else: sv = set() for v in val: vs = v.split(' ') vs.sort() sv.add(' '.join(vs)) sa = set(allowed) if (sv & sa) == sv: _pinfo[key] = list(sv) else: not_supported[key] = list(sv - sa) if not_supported: _msg = "Server doesn't support the following features: {}".format( not_supported) logger.error(_msg) raise ConfigurationError(_msg) if self.jwks_uri and self.keyjar: _pinfo["jwks_uri"] = self.jwks_uri for name, instance in self.endpoint.items(): if name not in ['webfinger', 'provider_info']: _pinfo['{}_endpoint'.format(name)] = instance.full_path return _pinfo
python
def create_providerinfo(self, capabilities): """ Dynamically create the provider info response :param capabilities: :return: """ _pinfo = self.package_capabilities() not_supported = {} for key, val in capabilities.items(): try: allowed = _pinfo[key] except KeyError: _pinfo[key] = val else: if isinstance(allowed, bool): if allowed is False: if val is True: not_supported[key] = True else: _pinfo[key] = val elif isinstance(allowed, str): if val != allowed: not_supported[key] = val elif isinstance(allowed, list): if isinstance(val, str): sv = {val} else: try: sv = set(val) except TypeError: if key == 'response_types_supported': sv = set() for v in val: v.sort() sv.add(' '.join(v)) else: raise else: sv = set() for v in val: vs = v.split(' ') vs.sort() sv.add(' '.join(vs)) sa = set(allowed) if (sv & sa) == sv: _pinfo[key] = list(sv) else: not_supported[key] = list(sv - sa) if not_supported: _msg = "Server doesn't support the following features: {}".format( not_supported) logger.error(_msg) raise ConfigurationError(_msg) if self.jwks_uri and self.keyjar: _pinfo["jwks_uri"] = self.jwks_uri for name, instance in self.endpoint.items(): if name not in ['webfinger', 'provider_info']: _pinfo['{}_endpoint'.format(name)] = instance.full_path return _pinfo
[ "def", "create_providerinfo", "(", "self", ",", "capabilities", ")", ":", "_pinfo", "=", "self", ".", "package_capabilities", "(", ")", "not_supported", "=", "{", "}", "for", "key", ",", "val", "in", "capabilities", ".", "items", "(", ")", ":", "try", ":", "allowed", "=", "_pinfo", "[", "key", "]", "except", "KeyError", ":", "_pinfo", "[", "key", "]", "=", "val", "else", ":", "if", "isinstance", "(", "allowed", ",", "bool", ")", ":", "if", "allowed", "is", "False", ":", "if", "val", "is", "True", ":", "not_supported", "[", "key", "]", "=", "True", "else", ":", "_pinfo", "[", "key", "]", "=", "val", "elif", "isinstance", "(", "allowed", ",", "str", ")", ":", "if", "val", "!=", "allowed", ":", "not_supported", "[", "key", "]", "=", "val", "elif", "isinstance", "(", "allowed", ",", "list", ")", ":", "if", "isinstance", "(", "val", ",", "str", ")", ":", "sv", "=", "{", "val", "}", "else", ":", "try", ":", "sv", "=", "set", "(", "val", ")", "except", "TypeError", ":", "if", "key", "==", "'response_types_supported'", ":", "sv", "=", "set", "(", ")", "for", "v", "in", "val", ":", "v", ".", "sort", "(", ")", "sv", ".", "add", "(", "' '", ".", "join", "(", "v", ")", ")", "else", ":", "raise", "else", ":", "sv", "=", "set", "(", ")", "for", "v", "in", "val", ":", "vs", "=", "v", ".", "split", "(", "' '", ")", "vs", ".", "sort", "(", ")", "sv", ".", "add", "(", "' '", ".", "join", "(", "vs", ")", ")", "sa", "=", "set", "(", "allowed", ")", "if", "(", "sv", "&", "sa", ")", "==", "sv", ":", "_pinfo", "[", "key", "]", "=", "list", "(", "sv", ")", "else", ":", "not_supported", "[", "key", "]", "=", "list", "(", "sv", "-", "sa", ")", "if", "not_supported", ":", "_msg", "=", "\"Server doesn't support the following features: {}\"", ".", "format", "(", "not_supported", ")", "logger", ".", "error", "(", "_msg", ")", "raise", "ConfigurationError", "(", "_msg", ")", "if", "self", ".", "jwks_uri", "and", "self", ".", "keyjar", ":", "_pinfo", "[", "\"jwks_uri\"", "]", "=", "self", ".", "jwks_uri", "for", "name", ",", "instance", "in", "self", ".", "endpoint", ".", "items", "(", ")", ":", "if", "name", "not", "in", "[", "'webfinger'", ",", "'provider_info'", "]", ":", "_pinfo", "[", "'{}_endpoint'", ".", "format", "(", "name", ")", "]", "=", "instance", ".", "full_path", "return", "_pinfo" ]
Dynamically create the provider info response :param capabilities: :return:
[ "Dynamically", "create", "the", "provider", "info", "response" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/endpoint_context.py#L324-L390
TkTech/PyNBT
pynbt/nbt.py
BaseTag._write_utf8
def _write_utf8(write, value): """Writes a length-prefixed UTF-8 string.""" write('h', len(value)) write.io.write(value.encode('utf-8'))
python
def _write_utf8(write, value): """Writes a length-prefixed UTF-8 string.""" write('h', len(value)) write.io.write(value.encode('utf-8'))
[ "def", "_write_utf8", "(", "write", ",", "value", ")", ":", "write", "(", "'h'", ",", "len", "(", "value", ")", ")", "write", ".", "io", ".", "write", "(", "value", ".", "encode", "(", "'utf-8'", ")", ")" ]
Writes a length-prefixed UTF-8 string.
[ "Writes", "a", "length", "-", "prefixed", "UTF", "-", "8", "string", "." ]
train
https://github.com/TkTech/PyNBT/blob/060fb0a2b58fc464a637fc108f05bd7274da8e5f/pynbt/nbt.py#L29-L32
TkTech/PyNBT
pynbt/nbt.py
BaseTag.read
def read(cls, read, has_name=True): """ Read the tag in using the reader `rd`. If `has_name` is `False`, skip reading the tag name. """ name = cls._read_utf8(read) if has_name else None if cls is TAG_Compound: # A TAG_Compound is almost identical to Python's native dict() # object, or a Java HashMap. final = {} while True: # Find the type of each tag in a compound in turn. tag = read('b', 1)[0] if tag == 0: # A tag of 0 means we've reached TAG_End, used to terminate # a TAG_Compound. break # We read in each tag in turn, using its name as the key in # the dict (Since a compound cannot have repeating names, # this works fine). tmp = _tags[tag].read(read) final[tmp.name] = tmp return cls(final, name=name) elif cls is TAG_List: # A TAG_List is a very simple homogeneous array, similar to # Python's native list() object, but restricted to a single type. tag_type, length = read('bi', 5) tag_read = _tags[tag_type].read return cls( _tags[tag_type], [tag_read(read, has_name=False) for x in range(0, length)], name=name ) elif cls is TAG_String: # A simple length-prefixed UTF-8 string. value = cls._read_utf8(read) return cls(value, name=name) elif cls is TAG_Byte_Array: # A simple array of (signed) bytes. length = read('i', 4)[0] return cls(read('{0}b'.format(length), length), name=name) elif cls is TAG_Int_Array: # A simple array of (signed) 4-byte integers. length = read('i', 4)[0] return cls(read('{0}i'.format(length), length * 4), name=name) elif cls is TAG_Long_Array: # A simple array of (signed) 8-byte longs. length = read('i', 4)[0] return cls(read('{0}q'.format(length), length * 8), name=name) elif cls is TAG_Byte: # A single (signed) byte. return cls(read('b', 1)[0], name=name) elif cls is TAG_Short: # A single (signed) short. return cls(read('h', 2)[0], name=name) elif cls is TAG_Int: # A signed (signed) 4-byte int. return cls(read('i', 4)[0], name=name) elif cls is TAG_Long: # A single (signed) 8-byte long. return cls(read('q', 8)[0], name=name) elif cls is TAG_Float: # A single single-precision floating point value. return cls(read('f', 4)[0], name=name) elif cls is TAG_Double: # A single double-precision floating point value. return cls(read('d', 8)[0], name=name) elif cls is TAG_End: # A End of Compound Tag return cls(read('2b', 2)[0], name=name)
python
def read(cls, read, has_name=True): """ Read the tag in using the reader `rd`. If `has_name` is `False`, skip reading the tag name. """ name = cls._read_utf8(read) if has_name else None if cls is TAG_Compound: # A TAG_Compound is almost identical to Python's native dict() # object, or a Java HashMap. final = {} while True: # Find the type of each tag in a compound in turn. tag = read('b', 1)[0] if tag == 0: # A tag of 0 means we've reached TAG_End, used to terminate # a TAG_Compound. break # We read in each tag in turn, using its name as the key in # the dict (Since a compound cannot have repeating names, # this works fine). tmp = _tags[tag].read(read) final[tmp.name] = tmp return cls(final, name=name) elif cls is TAG_List: # A TAG_List is a very simple homogeneous array, similar to # Python's native list() object, but restricted to a single type. tag_type, length = read('bi', 5) tag_read = _tags[tag_type].read return cls( _tags[tag_type], [tag_read(read, has_name=False) for x in range(0, length)], name=name ) elif cls is TAG_String: # A simple length-prefixed UTF-8 string. value = cls._read_utf8(read) return cls(value, name=name) elif cls is TAG_Byte_Array: # A simple array of (signed) bytes. length = read('i', 4)[0] return cls(read('{0}b'.format(length), length), name=name) elif cls is TAG_Int_Array: # A simple array of (signed) 4-byte integers. length = read('i', 4)[0] return cls(read('{0}i'.format(length), length * 4), name=name) elif cls is TAG_Long_Array: # A simple array of (signed) 8-byte longs. length = read('i', 4)[0] return cls(read('{0}q'.format(length), length * 8), name=name) elif cls is TAG_Byte: # A single (signed) byte. return cls(read('b', 1)[0], name=name) elif cls is TAG_Short: # A single (signed) short. return cls(read('h', 2)[0], name=name) elif cls is TAG_Int: # A signed (signed) 4-byte int. return cls(read('i', 4)[0], name=name) elif cls is TAG_Long: # A single (signed) 8-byte long. return cls(read('q', 8)[0], name=name) elif cls is TAG_Float: # A single single-precision floating point value. return cls(read('f', 4)[0], name=name) elif cls is TAG_Double: # A single double-precision floating point value. return cls(read('d', 8)[0], name=name) elif cls is TAG_End: # A End of Compound Tag return cls(read('2b', 2)[0], name=name)
[ "def", "read", "(", "cls", ",", "read", ",", "has_name", "=", "True", ")", ":", "name", "=", "cls", ".", "_read_utf8", "(", "read", ")", "if", "has_name", "else", "None", "if", "cls", "is", "TAG_Compound", ":", "# A TAG_Compound is almost identical to Python's native dict()", "# object, or a Java HashMap.", "final", "=", "{", "}", "while", "True", ":", "# Find the type of each tag in a compound in turn.", "tag", "=", "read", "(", "'b'", ",", "1", ")", "[", "0", "]", "if", "tag", "==", "0", ":", "# A tag of 0 means we've reached TAG_End, used to terminate", "# a TAG_Compound.", "break", "# We read in each tag in turn, using its name as the key in", "# the dict (Since a compound cannot have repeating names,", "# this works fine).", "tmp", "=", "_tags", "[", "tag", "]", ".", "read", "(", "read", ")", "final", "[", "tmp", ".", "name", "]", "=", "tmp", "return", "cls", "(", "final", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_List", ":", "# A TAG_List is a very simple homogeneous array, similar to", "# Python's native list() object, but restricted to a single type.", "tag_type", ",", "length", "=", "read", "(", "'bi'", ",", "5", ")", "tag_read", "=", "_tags", "[", "tag_type", "]", ".", "read", "return", "cls", "(", "_tags", "[", "tag_type", "]", ",", "[", "tag_read", "(", "read", ",", "has_name", "=", "False", ")", "for", "x", "in", "range", "(", "0", ",", "length", ")", "]", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_String", ":", "# A simple length-prefixed UTF-8 string.", "value", "=", "cls", ".", "_read_utf8", "(", "read", ")", "return", "cls", "(", "value", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_Byte_Array", ":", "# A simple array of (signed) bytes.", "length", "=", "read", "(", "'i'", ",", "4", ")", "[", "0", "]", "return", "cls", "(", "read", "(", "'{0}b'", ".", "format", "(", "length", ")", ",", "length", ")", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_Int_Array", ":", "# A simple array of (signed) 4-byte integers.", "length", "=", "read", "(", "'i'", ",", "4", ")", "[", "0", "]", "return", "cls", "(", "read", "(", "'{0}i'", ".", "format", "(", "length", ")", ",", "length", "*", "4", ")", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_Long_Array", ":", "# A simple array of (signed) 8-byte longs.", "length", "=", "read", "(", "'i'", ",", "4", ")", "[", "0", "]", "return", "cls", "(", "read", "(", "'{0}q'", ".", "format", "(", "length", ")", ",", "length", "*", "8", ")", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_Byte", ":", "# A single (signed) byte.", "return", "cls", "(", "read", "(", "'b'", ",", "1", ")", "[", "0", "]", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_Short", ":", "# A single (signed) short.", "return", "cls", "(", "read", "(", "'h'", ",", "2", ")", "[", "0", "]", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_Int", ":", "# A signed (signed) 4-byte int.", "return", "cls", "(", "read", "(", "'i'", ",", "4", ")", "[", "0", "]", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_Long", ":", "# A single (signed) 8-byte long.", "return", "cls", "(", "read", "(", "'q'", ",", "8", ")", "[", "0", "]", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_Float", ":", "# A single single-precision floating point value.", "return", "cls", "(", "read", "(", "'f'", ",", "4", ")", "[", "0", "]", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_Double", ":", "# A single double-precision floating point value.", "return", "cls", "(", "read", "(", "'d'", ",", "8", ")", "[", "0", "]", ",", "name", "=", "name", ")", "elif", "cls", "is", "TAG_End", ":", "# A End of Compound Tag", "return", "cls", "(", "read", "(", "'2b'", ",", "2", ")", "[", "0", "]", ",", "name", "=", "name", ")" ]
Read the tag in using the reader `rd`. If `has_name` is `False`, skip reading the tag name.
[ "Read", "the", "tag", "in", "using", "the", "reader", "rd", ".", "If", "has_name", "is", "False", "skip", "reading", "the", "tag", "name", "." ]
train
https://github.com/TkTech/PyNBT/blob/060fb0a2b58fc464a637fc108f05bd7274da8e5f/pynbt/nbt.py#L35-L105
TkTech/PyNBT
pynbt/nbt.py
BaseTag.pretty
def pretty(self, indent=0, indent_str=' '): """ Pretty-print a tag in the same general style as Markus's example output. """ return '{0}{1}({2!r}): {3!r}'.format( indent_str * indent, self.__class__.__name__, self.name, self.value )
python
def pretty(self, indent=0, indent_str=' '): """ Pretty-print a tag in the same general style as Markus's example output. """ return '{0}{1}({2!r}): {3!r}'.format( indent_str * indent, self.__class__.__name__, self.name, self.value )
[ "def", "pretty", "(", "self", ",", "indent", "=", "0", ",", "indent_str", "=", "' '", ")", ":", "return", "'{0}{1}({2!r}): {3!r}'", ".", "format", "(", "indent_str", "*", "indent", ",", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "name", ",", "self", ".", "value", ")" ]
Pretty-print a tag in the same general style as Markus's example output.
[ "Pretty", "-", "print", "a", "tag", "in", "the", "same", "general", "style", "as", "Markus", "s", "example", "output", "." ]
train
https://github.com/TkTech/PyNBT/blob/060fb0a2b58fc464a637fc108f05bd7274da8e5f/pynbt/nbt.py#L153-L163
TkTech/PyNBT
pynbt/nbt.py
TAG_Compound.update
def update(self, *args, **kwargs): """See `__setitem__`.""" super(TAG_Compound, self).update(*args, **kwargs) for key, item in self.items(): if item.name is None: item.name = key
python
def update(self, *args, **kwargs): """See `__setitem__`.""" super(TAG_Compound, self).update(*args, **kwargs) for key, item in self.items(): if item.name is None: item.name = key
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "TAG_Compound", ",", "self", ")", ".", "update", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "key", ",", "item", "in", "self", ".", "items", "(", ")", ":", "if", "item", ".", "name", "is", "None", ":", "item", ".", "name", "=", "key" ]
See `__setitem__`.
[ "See", "__setitem__", "." ]
train
https://github.com/TkTech/PyNBT/blob/060fb0a2b58fc464a637fc108f05bd7274da8e5f/pynbt/nbt.py#L272-L277
TkTech/PyNBT
pynbt/nbt.py
NBTFile.save
def save(self, io, little_endian=False): """ Saves the `NBTFile()` to `io`, which can be any file-like object providing `write()`. """ if little_endian: write = lambda fmt, *args: io.write(pack('<' + fmt, *args)) else: write = lambda fmt, *args: io.write(pack('>' + fmt, *args)) write.io = io self.write(write)
python
def save(self, io, little_endian=False): """ Saves the `NBTFile()` to `io`, which can be any file-like object providing `write()`. """ if little_endian: write = lambda fmt, *args: io.write(pack('<' + fmt, *args)) else: write = lambda fmt, *args: io.write(pack('>' + fmt, *args)) write.io = io self.write(write)
[ "def", "save", "(", "self", ",", "io", ",", "little_endian", "=", "False", ")", ":", "if", "little_endian", ":", "write", "=", "lambda", "fmt", ",", "*", "args", ":", "io", ".", "write", "(", "pack", "(", "'<'", "+", "fmt", ",", "*", "args", ")", ")", "else", ":", "write", "=", "lambda", "fmt", ",", "*", "args", ":", "io", ".", "write", "(", "pack", "(", "'>'", "+", "fmt", ",", "*", "args", ")", ")", "write", ".", "io", "=", "io", "self", ".", "write", "(", "write", ")" ]
Saves the `NBTFile()` to `io`, which can be any file-like object providing `write()`.
[ "Saves", "the", "NBTFile", "()", "to", "io", "which", "can", "be", "any", "file", "-", "like", "object", "providing", "write", "()", "." ]
train
https://github.com/TkTech/PyNBT/blob/060fb0a2b58fc464a637fc108f05bd7274da8e5f/pynbt/nbt.py#L345-L356
IdentityPython/oidcendpoint
src/oidcendpoint/token_handler.py
factory
def factory(ec, code=None, token=None, refresh=None, **kwargs): """ Create a token handler :param code: :param token: :param refresh: :return: TokenHandler instance """ TTYPE = {'code': 'A', 'token': 'T', 'refresh': 'R'} args = {} if code: args['code_handler'] = init_token_handler(ec, code, TTYPE['code']) if token: args['access_token_handler'] = init_token_handler(ec, token, TTYPE['token']) if refresh: args['refresh_token_handler'] = init_token_handler(ec, token, TTYPE['refresh']) return TokenHandler(**args)
python
def factory(ec, code=None, token=None, refresh=None, **kwargs): """ Create a token handler :param code: :param token: :param refresh: :return: TokenHandler instance """ TTYPE = {'code': 'A', 'token': 'T', 'refresh': 'R'} args = {} if code: args['code_handler'] = init_token_handler(ec, code, TTYPE['code']) if token: args['access_token_handler'] = init_token_handler(ec, token, TTYPE['token']) if refresh: args['refresh_token_handler'] = init_token_handler(ec, token, TTYPE['refresh']) return TokenHandler(**args)
[ "def", "factory", "(", "ec", ",", "code", "=", "None", ",", "token", "=", "None", ",", "refresh", "=", "None", ",", "*", "*", "kwargs", ")", ":", "TTYPE", "=", "{", "'code'", ":", "'A'", ",", "'token'", ":", "'T'", ",", "'refresh'", ":", "'R'", "}", "args", "=", "{", "}", "if", "code", ":", "args", "[", "'code_handler'", "]", "=", "init_token_handler", "(", "ec", ",", "code", ",", "TTYPE", "[", "'code'", "]", ")", "if", "token", ":", "args", "[", "'access_token_handler'", "]", "=", "init_token_handler", "(", "ec", ",", "token", ",", "TTYPE", "[", "'token'", "]", ")", "if", "refresh", ":", "args", "[", "'refresh_token_handler'", "]", "=", "init_token_handler", "(", "ec", ",", "token", ",", "TTYPE", "[", "'refresh'", "]", ")", "return", "TokenHandler", "(", "*", "*", "args", ")" ]
Create a token handler :param code: :param token: :param refresh: :return: TokenHandler instance
[ "Create", "a", "token", "handler" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/token_handler.py#L279-L300
IdentityPython/oidcendpoint
src/oidcendpoint/token_handler.py
DefaultToken.key
def key(self, user="", areq=None): """ Return a key (the session id) :param user: User id :param areq: The authorization request :return: An ID """ csum = hashlib.new('sha224') csum.update(rndstr(32).encode('utf-8')) return csum.hexdigest()
python
def key(self, user="", areq=None): """ Return a key (the session id) :param user: User id :param areq: The authorization request :return: An ID """ csum = hashlib.new('sha224') csum.update(rndstr(32).encode('utf-8')) return csum.hexdigest()
[ "def", "key", "(", "self", ",", "user", "=", "\"\"", ",", "areq", "=", "None", ")", ":", "csum", "=", "hashlib", ".", "new", "(", "'sha224'", ")", "csum", ".", "update", "(", "rndstr", "(", "32", ")", ".", "encode", "(", "'utf-8'", ")", ")", "return", "csum", ".", "hexdigest", "(", ")" ]
Return a key (the session id) :param user: User id :param areq: The authorization request :return: An ID
[ "Return", "a", "key", "(", "the", "session", "id", ")" ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/token_handler.py#L150-L160
IdentityPython/oidcendpoint
src/oidcendpoint/token_handler.py
DefaultToken.info
def info(self, token): """ Return token information. :param token: A token :return: dictionary with info about the token """ _res = dict(zip(['_id', 'type', 'sid', 'exp'], self.split_token(token))) if _res['type'] != self.type: raise WrongTokenType(_res['type']) else: _res['handler'] = self _res['black_listed'] = self.is_black_listed(token) return _res
python
def info(self, token): """ Return token information. :param token: A token :return: dictionary with info about the token """ _res = dict(zip(['_id', 'type', 'sid', 'exp'], self.split_token(token))) if _res['type'] != self.type: raise WrongTokenType(_res['type']) else: _res['handler'] = self _res['black_listed'] = self.is_black_listed(token) return _res
[ "def", "info", "(", "self", ",", "token", ")", ":", "_res", "=", "dict", "(", "zip", "(", "[", "'_id'", ",", "'type'", ",", "'sid'", ",", "'exp'", "]", ",", "self", ".", "split_token", "(", "token", ")", ")", ")", "if", "_res", "[", "'type'", "]", "!=", "self", ".", "type", ":", "raise", "WrongTokenType", "(", "_res", "[", "'type'", "]", ")", "else", ":", "_res", "[", "'handler'", "]", "=", "self", "_res", "[", "'black_listed'", "]", "=", "self", ".", "is_black_listed", "(", "token", ")", "return", "_res" ]
Return token information. :param token: A token :return: dictionary with info about the token
[ "Return", "token", "information", "." ]
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/token_handler.py#L170-L184
openmicroanalysis/pyxray
pyxray/composition.py
process_wildcard
def process_wildcard(fractions): """ Processes element with a wildcard ``?`` weight fraction and returns composition balanced to 1.0. """ wildcard_zs = set() total_fraction = 0.0 for z, fraction in fractions.items(): if fraction == '?': wildcard_zs.add(z) else: total_fraction += fraction if not wildcard_zs: return fractions balance_fraction = (1.0 - total_fraction) / len(wildcard_zs) for z in wildcard_zs: fractions[z] = balance_fraction return fractions
python
def process_wildcard(fractions): """ Processes element with a wildcard ``?`` weight fraction and returns composition balanced to 1.0. """ wildcard_zs = set() total_fraction = 0.0 for z, fraction in fractions.items(): if fraction == '?': wildcard_zs.add(z) else: total_fraction += fraction if not wildcard_zs: return fractions balance_fraction = (1.0 - total_fraction) / len(wildcard_zs) for z in wildcard_zs: fractions[z] = balance_fraction return fractions
[ "def", "process_wildcard", "(", "fractions", ")", ":", "wildcard_zs", "=", "set", "(", ")", "total_fraction", "=", "0.0", "for", "z", ",", "fraction", "in", "fractions", ".", "items", "(", ")", ":", "if", "fraction", "==", "'?'", ":", "wildcard_zs", ".", "add", "(", "z", ")", "else", ":", "total_fraction", "+=", "fraction", "if", "not", "wildcard_zs", ":", "return", "fractions", "balance_fraction", "=", "(", "1.0", "-", "total_fraction", ")", "/", "len", "(", "wildcard_zs", ")", "for", "z", "in", "wildcard_zs", ":", "fractions", "[", "z", "]", "=", "balance_fraction", "return", "fractions" ]
Processes element with a wildcard ``?`` weight fraction and returns composition balanced to 1.0.
[ "Processes", "element", "with", "a", "wildcard", "?", "weight", "fraction", "and", "returns", "composition", "balanced", "to", "1", ".", "0", "." ]
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L20-L40
openmicroanalysis/pyxray
pyxray/composition.py
convert_mass_to_atomic_fractions
def convert_mass_to_atomic_fractions(mass_fractions): """ Converts a mass fraction :class:`dict` to an atomic fraction :class:`dict`. Args: mass_fractions (dict): mass fraction :class:`dict`. The composition is specified by a dictionary. The keys are atomic numbers and the values weight fractions. No wildcard are accepted. """ atomic_fractions = {} for z, mass_fraction in mass_fractions.items(): atomic_fractions[z] = mass_fraction / pyxray.element_atomic_weight(z) total_fraction = sum(atomic_fractions.values()) for z, fraction in atomic_fractions.items(): try: atomic_fractions[z] = fraction / total_fraction except ZeroDivisionError: atomic_fractions[z] = 0.0 return atomic_fractions
python
def convert_mass_to_atomic_fractions(mass_fractions): """ Converts a mass fraction :class:`dict` to an atomic fraction :class:`dict`. Args: mass_fractions (dict): mass fraction :class:`dict`. The composition is specified by a dictionary. The keys are atomic numbers and the values weight fractions. No wildcard are accepted. """ atomic_fractions = {} for z, mass_fraction in mass_fractions.items(): atomic_fractions[z] = mass_fraction / pyxray.element_atomic_weight(z) total_fraction = sum(atomic_fractions.values()) for z, fraction in atomic_fractions.items(): try: atomic_fractions[z] = fraction / total_fraction except ZeroDivisionError: atomic_fractions[z] = 0.0 return atomic_fractions
[ "def", "convert_mass_to_atomic_fractions", "(", "mass_fractions", ")", ":", "atomic_fractions", "=", "{", "}", "for", "z", ",", "mass_fraction", "in", "mass_fractions", ".", "items", "(", ")", ":", "atomic_fractions", "[", "z", "]", "=", "mass_fraction", "/", "pyxray", ".", "element_atomic_weight", "(", "z", ")", "total_fraction", "=", "sum", "(", "atomic_fractions", ".", "values", "(", ")", ")", "for", "z", ",", "fraction", "in", "atomic_fractions", ".", "items", "(", ")", ":", "try", ":", "atomic_fractions", "[", "z", "]", "=", "fraction", "/", "total_fraction", "except", "ZeroDivisionError", ":", "atomic_fractions", "[", "z", "]", "=", "0.0", "return", "atomic_fractions" ]
Converts a mass fraction :class:`dict` to an atomic fraction :class:`dict`. Args: mass_fractions (dict): mass fraction :class:`dict`. The composition is specified by a dictionary. The keys are atomic numbers and the values weight fractions. No wildcard are accepted.
[ "Converts", "a", "mass", "fraction", ":", "class", ":", "dict", "to", "an", "atomic", "fraction", ":", "class", ":", "dict", "." ]
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L42-L65
openmicroanalysis/pyxray
pyxray/composition.py
convert_atomic_to_mass_fractions
def convert_atomic_to_mass_fractions(atomic_fractions): """ Converts an atomic fraction :class:`dict` to a mass fraction :class:`dict`. Args: atomic_fractions (dict): atomic fraction :class:`dict`. The composition is specified by a dictionary. The keys are atomic numbers and the values atomic fractions. No wildcard are accepted. """ # Calculate total atomic mass atomic_masses = {} total_atomic_mass = 0.0 for z, atomic_fraction in atomic_fractions.items(): atomic_mass = pyxray.element_atomic_weight(z) atomic_masses[z] = atomic_mass total_atomic_mass += atomic_fraction * atomic_mass # Create mass fraction mass_fractions = {} for z, atomic_fraction in atomic_fractions.items(): mass_fractions[z] = atomic_fraction * atomic_masses[z] / total_atomic_mass return mass_fractions
python
def convert_atomic_to_mass_fractions(atomic_fractions): """ Converts an atomic fraction :class:`dict` to a mass fraction :class:`dict`. Args: atomic_fractions (dict): atomic fraction :class:`dict`. The composition is specified by a dictionary. The keys are atomic numbers and the values atomic fractions. No wildcard are accepted. """ # Calculate total atomic mass atomic_masses = {} total_atomic_mass = 0.0 for z, atomic_fraction in atomic_fractions.items(): atomic_mass = pyxray.element_atomic_weight(z) atomic_masses[z] = atomic_mass total_atomic_mass += atomic_fraction * atomic_mass # Create mass fraction mass_fractions = {} for z, atomic_fraction in atomic_fractions.items(): mass_fractions[z] = atomic_fraction * atomic_masses[z] / total_atomic_mass return mass_fractions
[ "def", "convert_atomic_to_mass_fractions", "(", "atomic_fractions", ")", ":", "# Calculate total atomic mass", "atomic_masses", "=", "{", "}", "total_atomic_mass", "=", "0.0", "for", "z", ",", "atomic_fraction", "in", "atomic_fractions", ".", "items", "(", ")", ":", "atomic_mass", "=", "pyxray", ".", "element_atomic_weight", "(", "z", ")", "atomic_masses", "[", "z", "]", "=", "atomic_mass", "total_atomic_mass", "+=", "atomic_fraction", "*", "atomic_mass", "# Create mass fraction", "mass_fractions", "=", "{", "}", "for", "z", ",", "atomic_fraction", "in", "atomic_fractions", ".", "items", "(", ")", ":", "mass_fractions", "[", "z", "]", "=", "atomic_fraction", "*", "atomic_masses", "[", "z", "]", "/", "total_atomic_mass", "return", "mass_fractions" ]
Converts an atomic fraction :class:`dict` to a mass fraction :class:`dict`. Args: atomic_fractions (dict): atomic fraction :class:`dict`. The composition is specified by a dictionary. The keys are atomic numbers and the values atomic fractions. No wildcard are accepted.
[ "Converts", "an", "atomic", "fraction", ":", "class", ":", "dict", "to", "a", "mass", "fraction", ":", "class", ":", "dict", "." ]
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L67-L90
openmicroanalysis/pyxray
pyxray/composition.py
convert_formula_to_atomic_fractions
def convert_formula_to_atomic_fractions(formula): """ Converts a chemical formula to an atomic fraction :class:`dict`. Args: formula (str): chemical formula, like Al2O3. No wildcard are accepted. """ mole_fractions = {} total_mole_fraction = 0.0 for match in CHEMICAL_FORMULA_PATTERN.finditer(formula): symbol, mole_fraction = match.groups() z = pyxray.element_atomic_number(symbol.strip()) if mole_fraction == '': mole_fraction = 1.0 mole_fraction = float(mole_fraction) mole_fraction = float(mole_fraction) mole_fractions[z] = mole_fraction total_mole_fraction += mole_fraction # Calculate atomic fractions atomic_fractions = {} for z, mole_fraction in mole_fractions.items(): atomic_fractions[z] = mole_fraction / total_mole_fraction return atomic_fractions
python
def convert_formula_to_atomic_fractions(formula): """ Converts a chemical formula to an atomic fraction :class:`dict`. Args: formula (str): chemical formula, like Al2O3. No wildcard are accepted. """ mole_fractions = {} total_mole_fraction = 0.0 for match in CHEMICAL_FORMULA_PATTERN.finditer(formula): symbol, mole_fraction = match.groups() z = pyxray.element_atomic_number(symbol.strip()) if mole_fraction == '': mole_fraction = 1.0 mole_fraction = float(mole_fraction) mole_fraction = float(mole_fraction) mole_fractions[z] = mole_fraction total_mole_fraction += mole_fraction # Calculate atomic fractions atomic_fractions = {} for z, mole_fraction in mole_fractions.items(): atomic_fractions[z] = mole_fraction / total_mole_fraction return atomic_fractions
[ "def", "convert_formula_to_atomic_fractions", "(", "formula", ")", ":", "mole_fractions", "=", "{", "}", "total_mole_fraction", "=", "0.0", "for", "match", "in", "CHEMICAL_FORMULA_PATTERN", ".", "finditer", "(", "formula", ")", ":", "symbol", ",", "mole_fraction", "=", "match", ".", "groups", "(", ")", "z", "=", "pyxray", ".", "element_atomic_number", "(", "symbol", ".", "strip", "(", ")", ")", "if", "mole_fraction", "==", "''", ":", "mole_fraction", "=", "1.0", "mole_fraction", "=", "float", "(", "mole_fraction", ")", "mole_fraction", "=", "float", "(", "mole_fraction", ")", "mole_fractions", "[", "z", "]", "=", "mole_fraction", "total_mole_fraction", "+=", "mole_fraction", "# Calculate atomic fractions", "atomic_fractions", "=", "{", "}", "for", "z", ",", "mole_fraction", "in", "mole_fractions", ".", "items", "(", ")", ":", "atomic_fractions", "[", "z", "]", "=", "mole_fraction", "/", "total_mole_fraction", "return", "atomic_fractions" ]
Converts a chemical formula to an atomic fraction :class:`dict`. Args: formula (str): chemical formula, like Al2O3. No wildcard are accepted.
[ "Converts", "a", "chemical", "formula", "to", "an", "atomic", "fraction", ":", "class", ":", "dict", "." ]
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L92-L120
openmicroanalysis/pyxray
pyxray/composition.py
generate_name
def generate_name(atomic_fractions): """ Generates a name from the composition. The name is generated on the basis of a classical chemical formula. """ if not atomic_fractions: return '' if len(atomic_fractions) == 1: z = list(atomic_fractions.keys())[0] return pyxray.element_symbol(z) symbols = [] fractions = [] for z in sorted(atomic_fractions.keys(), reverse=True): symbols.append(pyxray.element_symbol(z)) fractions.append(Fraction(atomic_fractions[z]).limit_denominator()) # Find gcd of the fractions gcds = [] for a, b in itertools.combinations(fractions, 2): gcds.append(math.gcd(a.denominator, b.denominator)) smallest_gcd = min(gcds) # Write formula name = '' for symbol, fraction in zip(symbols, fractions): mole_fraction = int(fraction * smallest_gcd) if mole_fraction == 0: continue elif mole_fraction == 1: name += "%s" % symbol else: name += '%s%i' % (symbol, mole_fraction) return name
python
def generate_name(atomic_fractions): """ Generates a name from the composition. The name is generated on the basis of a classical chemical formula. """ if not atomic_fractions: return '' if len(atomic_fractions) == 1: z = list(atomic_fractions.keys())[0] return pyxray.element_symbol(z) symbols = [] fractions = [] for z in sorted(atomic_fractions.keys(), reverse=True): symbols.append(pyxray.element_symbol(z)) fractions.append(Fraction(atomic_fractions[z]).limit_denominator()) # Find gcd of the fractions gcds = [] for a, b in itertools.combinations(fractions, 2): gcds.append(math.gcd(a.denominator, b.denominator)) smallest_gcd = min(gcds) # Write formula name = '' for symbol, fraction in zip(symbols, fractions): mole_fraction = int(fraction * smallest_gcd) if mole_fraction == 0: continue elif mole_fraction == 1: name += "%s" % symbol else: name += '%s%i' % (symbol, mole_fraction) return name
[ "def", "generate_name", "(", "atomic_fractions", ")", ":", "if", "not", "atomic_fractions", ":", "return", "''", "if", "len", "(", "atomic_fractions", ")", "==", "1", ":", "z", "=", "list", "(", "atomic_fractions", ".", "keys", "(", ")", ")", "[", "0", "]", "return", "pyxray", ".", "element_symbol", "(", "z", ")", "symbols", "=", "[", "]", "fractions", "=", "[", "]", "for", "z", "in", "sorted", "(", "atomic_fractions", ".", "keys", "(", ")", ",", "reverse", "=", "True", ")", ":", "symbols", ".", "append", "(", "pyxray", ".", "element_symbol", "(", "z", ")", ")", "fractions", ".", "append", "(", "Fraction", "(", "atomic_fractions", "[", "z", "]", ")", ".", "limit_denominator", "(", ")", ")", "# Find gcd of the fractions", "gcds", "=", "[", "]", "for", "a", ",", "b", "in", "itertools", ".", "combinations", "(", "fractions", ",", "2", ")", ":", "gcds", ".", "append", "(", "math", ".", "gcd", "(", "a", ".", "denominator", ",", "b", ".", "denominator", ")", ")", "smallest_gcd", "=", "min", "(", "gcds", ")", "# Write formula", "name", "=", "''", "for", "symbol", ",", "fraction", "in", "zip", "(", "symbols", ",", "fractions", ")", ":", "mole_fraction", "=", "int", "(", "fraction", "*", "smallest_gcd", ")", "if", "mole_fraction", "==", "0", ":", "continue", "elif", "mole_fraction", "==", "1", ":", "name", "+=", "\"%s\"", "%", "symbol", "else", ":", "name", "+=", "'%s%i'", "%", "(", "symbol", ",", "mole_fraction", ")", "return", "name" ]
Generates a name from the composition. The name is generated on the basis of a classical chemical formula.
[ "Generates", "a", "name", "from", "the", "composition", ".", "The", "name", "is", "generated", "on", "the", "basis", "of", "a", "classical", "chemical", "formula", "." ]
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L122-L157
openmicroanalysis/pyxray
pyxray/composition.py
Composition.from_pure
def from_pure(cls, z): """ Creates a pure composition. Args: z (int): atomic number """ return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))
python
def from_pure(cls, z): """ Creates a pure composition. Args: z (int): atomic number """ return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))
[ "def", "from_pure", "(", "cls", ",", "z", ")", ":", "return", "cls", "(", "cls", ".", "_key", ",", "{", "z", ":", "1.0", "}", ",", "{", "z", ":", "1.0", "}", ",", "pyxray", ".", "element_symbol", "(", "z", ")", ")" ]
Creates a pure composition. Args: z (int): atomic number
[ "Creates", "a", "pure", "composition", "." ]
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L199-L206
openmicroanalysis/pyxray
pyxray/composition.py
Composition.from_mass_fractions
def from_mass_fractions(cls, mass_fractions, formula=None): """ Creates a composition from a mass fraction :class:`dict`. Args: mass_fractions (dict): mass fraction :class:`dict`. The keys are atomic numbers and the values weight fractions. Wildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron will get a mass fraction of 0.6. formula (str): optional chemical formula for the composition. If ``None``, a formula will be generated for the composition. """ mass_fractions = process_wildcard(mass_fractions) atomic_fractions = convert_mass_to_atomic_fractions(mass_fractions) if not formula: formula = generate_name(atomic_fractions) return cls(cls._key, mass_fractions, atomic_fractions, formula)
python
def from_mass_fractions(cls, mass_fractions, formula=None): """ Creates a composition from a mass fraction :class:`dict`. Args: mass_fractions (dict): mass fraction :class:`dict`. The keys are atomic numbers and the values weight fractions. Wildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron will get a mass fraction of 0.6. formula (str): optional chemical formula for the composition. If ``None``, a formula will be generated for the composition. """ mass_fractions = process_wildcard(mass_fractions) atomic_fractions = convert_mass_to_atomic_fractions(mass_fractions) if not formula: formula = generate_name(atomic_fractions) return cls(cls._key, mass_fractions, atomic_fractions, formula)
[ "def", "from_mass_fractions", "(", "cls", ",", "mass_fractions", ",", "formula", "=", "None", ")", ":", "mass_fractions", "=", "process_wildcard", "(", "mass_fractions", ")", "atomic_fractions", "=", "convert_mass_to_atomic_fractions", "(", "mass_fractions", ")", "if", "not", "formula", ":", "formula", "=", "generate_name", "(", "atomic_fractions", ")", "return", "cls", "(", "cls", ".", "_key", ",", "mass_fractions", ",", "atomic_fractions", ",", "formula", ")" ]
Creates a composition from a mass fraction :class:`dict`. Args: mass_fractions (dict): mass fraction :class:`dict`. The keys are atomic numbers and the values weight fractions. Wildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron will get a mass fraction of 0.6. formula (str): optional chemical formula for the composition. If ``None``, a formula will be generated for the composition.
[ "Creates", "a", "composition", "from", "a", "mass", "fraction", ":", "class", ":", "dict", "." ]
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L220-L236
openmicroanalysis/pyxray
pyxray/composition.py
Composition.from_atomic_fractions
def from_atomic_fractions(cls, atomic_fractions, formula=None): """ Creates a composition from an atomic fraction :class:`dict`. Args: atomic_fractions (dict): atomic fraction :class:`dict`. The keys are atomic numbers and the values atomic fractions. Wildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron will get a atomic fraction of 0.6. formula (str): optional chemical formula for the composition. If ``None``, a formula will be generated for the composition. """ atomic_fractions = process_wildcard(atomic_fractions) mass_fractions = convert_atomic_to_mass_fractions(atomic_fractions) if not formula: formula = generate_name(atomic_fractions) return cls(cls._key, mass_fractions, atomic_fractions, formula)
python
def from_atomic_fractions(cls, atomic_fractions, formula=None): """ Creates a composition from an atomic fraction :class:`dict`. Args: atomic_fractions (dict): atomic fraction :class:`dict`. The keys are atomic numbers and the values atomic fractions. Wildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron will get a atomic fraction of 0.6. formula (str): optional chemical formula for the composition. If ``None``, a formula will be generated for the composition. """ atomic_fractions = process_wildcard(atomic_fractions) mass_fractions = convert_atomic_to_mass_fractions(atomic_fractions) if not formula: formula = generate_name(atomic_fractions) return cls(cls._key, mass_fractions, atomic_fractions, formula)
[ "def", "from_atomic_fractions", "(", "cls", ",", "atomic_fractions", ",", "formula", "=", "None", ")", ":", "atomic_fractions", "=", "process_wildcard", "(", "atomic_fractions", ")", "mass_fractions", "=", "convert_atomic_to_mass_fractions", "(", "atomic_fractions", ")", "if", "not", "formula", ":", "formula", "=", "generate_name", "(", "atomic_fractions", ")", "return", "cls", "(", "cls", ".", "_key", ",", "mass_fractions", ",", "atomic_fractions", ",", "formula", ")" ]
Creates a composition from an atomic fraction :class:`dict`. Args: atomic_fractions (dict): atomic fraction :class:`dict`. The keys are atomic numbers and the values atomic fractions. Wildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron will get a atomic fraction of 0.6. formula (str): optional chemical formula for the composition. If ``None``, a formula will be generated for the composition.
[ "Creates", "a", "composition", "from", "an", "atomic", "fraction", ":", "class", ":", "dict", "." ]
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/composition.py#L239-L255
delvelabs/hammertime
hammertime/rules/status.py
DetectSoft404._collect_sample
async def _collect_sample(self, url, url_pattern): """ Sample collection is meant to be very tolerant to generic failures as failing to obtain the sample has important consequences on the results. - Multiple retries with longer delays - Larger than usual timeout """ samples = [] urls = [self.path_generator.generate_url(url, url_pattern) for _ in range(self.confirmation_factor)] iterator = asyncio.as_completed([self._fetch_sample(url) for url in urls]) for promise in iterator: try: sig = await promise if sig: samples.append(sig) except RejectRequest as e: pass if not samples: raise StopRequest("Impossible to obtain sample") else: return samples
python
async def _collect_sample(self, url, url_pattern): """ Sample collection is meant to be very tolerant to generic failures as failing to obtain the sample has important consequences on the results. - Multiple retries with longer delays - Larger than usual timeout """ samples = [] urls = [self.path_generator.generate_url(url, url_pattern) for _ in range(self.confirmation_factor)] iterator = asyncio.as_completed([self._fetch_sample(url) for url in urls]) for promise in iterator: try: sig = await promise if sig: samples.append(sig) except RejectRequest as e: pass if not samples: raise StopRequest("Impossible to obtain sample") else: return samples
[ "async", "def", "_collect_sample", "(", "self", ",", "url", ",", "url_pattern", ")", ":", "samples", "=", "[", "]", "urls", "=", "[", "self", ".", "path_generator", ".", "generate_url", "(", "url", ",", "url_pattern", ")", "for", "_", "in", "range", "(", "self", ".", "confirmation_factor", ")", "]", "iterator", "=", "asyncio", ".", "as_completed", "(", "[", "self", ".", "_fetch_sample", "(", "url", ")", "for", "url", "in", "urls", "]", ")", "for", "promise", "in", "iterator", ":", "try", ":", "sig", "=", "await", "promise", "if", "sig", ":", "samples", ".", "append", "(", "sig", ")", "except", "RejectRequest", "as", "e", ":", "pass", "if", "not", "samples", ":", "raise", "StopRequest", "(", "\"Impossible to obtain sample\"", ")", "else", ":", "return", "samples" ]
Sample collection is meant to be very tolerant to generic failures as failing to obtain the sample has important consequences on the results. - Multiple retries with longer delays - Larger than usual timeout
[ "Sample", "collection", "is", "meant", "to", "be", "very", "tolerant", "to", "generic", "failures", "as", "failing", "to", "obtain", "the", "sample", "has", "important", "consequences", "on", "the", "results", "." ]
train
https://github.com/delvelabs/hammertime/blob/0b371499869881c7d12ed71f66e0d6ac9f9a3307/hammertime/rules/status.py#L182-L206
delvelabs/hammertime
hammertime/rules/status.py
SimilarPathGenerator.get_pattern
def get_pattern(self, url): """Return the path part of the URL with the last element replaced with its pattern in a regex-like format: \l -> lowercase letters, same as [a-z]+ \L -> uppercase letters, same as [A-Z]+ \i -> letters ignoring case, same as [a-zA-Z]+ \d -> digits, same as \d+ \w -> word characters (letters, digits, underscores), same as \w+ All other characters match themselves """ path = urlparse(url).path directories, filename = os.path.split(path) if len(filename) > 0: pattern = self.get_pattern_for_filename(filename) if directories[-1] != "/": directories += "/" return directories + pattern else: return self.get_pattern_for_directory(directories)
python
def get_pattern(self, url): """Return the path part of the URL with the last element replaced with its pattern in a regex-like format: \l -> lowercase letters, same as [a-z]+ \L -> uppercase letters, same as [A-Z]+ \i -> letters ignoring case, same as [a-zA-Z]+ \d -> digits, same as \d+ \w -> word characters (letters, digits, underscores), same as \w+ All other characters match themselves """ path = urlparse(url).path directories, filename = os.path.split(path) if len(filename) > 0: pattern = self.get_pattern_for_filename(filename) if directories[-1] != "/": directories += "/" return directories + pattern else: return self.get_pattern_for_directory(directories)
[ "def", "get_pattern", "(", "self", ",", "url", ")", ":", "path", "=", "urlparse", "(", "url", ")", ".", "path", "directories", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "len", "(", "filename", ")", ">", "0", ":", "pattern", "=", "self", ".", "get_pattern_for_filename", "(", "filename", ")", "if", "directories", "[", "-", "1", "]", "!=", "\"/\"", ":", "directories", "+=", "\"/\"", "return", "directories", "+", "pattern", "else", ":", "return", "self", ".", "get_pattern_for_directory", "(", "directories", ")" ]
Return the path part of the URL with the last element replaced with its pattern in a regex-like format: \l -> lowercase letters, same as [a-z]+ \L -> uppercase letters, same as [A-Z]+ \i -> letters ignoring case, same as [a-zA-Z]+ \d -> digits, same as \d+ \w -> word characters (letters, digits, underscores), same as \w+ All other characters match themselves
[ "Return", "the", "path", "part", "of", "the", "URL", "with", "the", "last", "element", "replaced", "with", "its", "pattern", "in", "a", "regex", "-", "like", "format", ":", "\\", "l", "-", ">", "lowercase", "letters", "same", "as", "[", "a", "-", "z", "]", "+", "\\", "L", "-", ">", "uppercase", "letters", "same", "as", "[", "A", "-", "Z", "]", "+", "\\", "i", "-", ">", "letters", "ignoring", "case", "same", "as", "[", "a", "-", "zA", "-", "Z", "]", "+", "\\", "d", "-", ">", "digits", "same", "as", "\\", "d", "+", "\\", "w", "-", ">", "word", "characters", "(", "letters", "digits", "underscores", ")", "same", "as", "\\", "w", "+", "All", "other", "characters", "match", "themselves" ]
train
https://github.com/delvelabs/hammertime/blob/0b371499869881c7d12ed71f66e0d6ac9f9a3307/hammertime/rules/status.py#L241-L258
guaix-ucm/pyemir
emirdrp/tools/overplot_bounddict.py
get_boundaries
def get_boundaries(bounddict_file, slitlet_number): """Read the bounddict json file and return the polynomial boundaries. Parameters ---------- bounddict_file : file handler File containing the bounddict JSON data. slitlet_number : int Number of slitlet. Returns ------- pol_lower_boundary : numpy polynomial Polynomial defining the lower boundary of the slitlet. pol_upper_boundary : numpy polynomial Polynomial defining the upper boundary of the slitlet. xmin_lower : float Minimum abscissae for the lower boundary. xmax_lower : float Maximum abscissae for the lower boundary. xmin_upper : float Minimum abscissae for the upper boundary. xmax_upper : float Maximum abscissae for the upper boundary. csu_bar_slit_center : float CSU bar slit center (in mm) """ bounddict = json.loads(open(bounddict_file.name).read()) # return values in case the requested slitlet number is not defined pol_lower_boundary = None pol_upper_boundary = None xmin_lower = None xmax_lower = None xmin_upper = None xmax_upper = None csu_bar_slit_center = None # search the slitlet number in bounddict slitlet_label = "slitlet" + str(slitlet_number).zfill(2) if slitlet_label in bounddict['contents'].keys(): list_date_obs = list(bounddict['contents'][slitlet_label].keys()) list_date_obs.sort() num_date_obs = len(list_date_obs) if num_date_obs == 1: date_obs = list_date_obs[0] tmp_dict = bounddict['contents'][slitlet_label][date_obs] pol_lower_boundary = Polynomial(tmp_dict['boundary_coef_lower']) pol_upper_boundary = Polynomial(tmp_dict['boundary_coef_upper']) xmin_lower = tmp_dict['boundary_xmin_lower'] xmax_lower = tmp_dict['boundary_xmax_lower'] xmin_upper = tmp_dict['boundary_xmin_upper'] xmax_upper = tmp_dict['boundary_xmax_upper'] csu_bar_slit_center = tmp_dict['csu_bar_slit_center'] else: raise ValueError("num_date_obs =", num_date_obs, " (must be 1)") else: print("WARNING: slitlet number " + str(slitlet_number) + " is not available in " + bounddict_file.name) # return result return pol_lower_boundary, pol_upper_boundary, \ xmin_lower, xmax_lower, xmin_upper, xmax_upper, \ csu_bar_slit_center
python
def get_boundaries(bounddict_file, slitlet_number): """Read the bounddict json file and return the polynomial boundaries. Parameters ---------- bounddict_file : file handler File containing the bounddict JSON data. slitlet_number : int Number of slitlet. Returns ------- pol_lower_boundary : numpy polynomial Polynomial defining the lower boundary of the slitlet. pol_upper_boundary : numpy polynomial Polynomial defining the upper boundary of the slitlet. xmin_lower : float Minimum abscissae for the lower boundary. xmax_lower : float Maximum abscissae for the lower boundary. xmin_upper : float Minimum abscissae for the upper boundary. xmax_upper : float Maximum abscissae for the upper boundary. csu_bar_slit_center : float CSU bar slit center (in mm) """ bounddict = json.loads(open(bounddict_file.name).read()) # return values in case the requested slitlet number is not defined pol_lower_boundary = None pol_upper_boundary = None xmin_lower = None xmax_lower = None xmin_upper = None xmax_upper = None csu_bar_slit_center = None # search the slitlet number in bounddict slitlet_label = "slitlet" + str(slitlet_number).zfill(2) if slitlet_label in bounddict['contents'].keys(): list_date_obs = list(bounddict['contents'][slitlet_label].keys()) list_date_obs.sort() num_date_obs = len(list_date_obs) if num_date_obs == 1: date_obs = list_date_obs[0] tmp_dict = bounddict['contents'][slitlet_label][date_obs] pol_lower_boundary = Polynomial(tmp_dict['boundary_coef_lower']) pol_upper_boundary = Polynomial(tmp_dict['boundary_coef_upper']) xmin_lower = tmp_dict['boundary_xmin_lower'] xmax_lower = tmp_dict['boundary_xmax_lower'] xmin_upper = tmp_dict['boundary_xmin_upper'] xmax_upper = tmp_dict['boundary_xmax_upper'] csu_bar_slit_center = tmp_dict['csu_bar_slit_center'] else: raise ValueError("num_date_obs =", num_date_obs, " (must be 1)") else: print("WARNING: slitlet number " + str(slitlet_number) + " is not available in " + bounddict_file.name) # return result return pol_lower_boundary, pol_upper_boundary, \ xmin_lower, xmax_lower, xmin_upper, xmax_upper, \ csu_bar_slit_center
[ "def", "get_boundaries", "(", "bounddict_file", ",", "slitlet_number", ")", ":", "bounddict", "=", "json", ".", "loads", "(", "open", "(", "bounddict_file", ".", "name", ")", ".", "read", "(", ")", ")", "# return values in case the requested slitlet number is not defined", "pol_lower_boundary", "=", "None", "pol_upper_boundary", "=", "None", "xmin_lower", "=", "None", "xmax_lower", "=", "None", "xmin_upper", "=", "None", "xmax_upper", "=", "None", "csu_bar_slit_center", "=", "None", "# search the slitlet number in bounddict", "slitlet_label", "=", "\"slitlet\"", "+", "str", "(", "slitlet_number", ")", ".", "zfill", "(", "2", ")", "if", "slitlet_label", "in", "bounddict", "[", "'contents'", "]", ".", "keys", "(", ")", ":", "list_date_obs", "=", "list", "(", "bounddict", "[", "'contents'", "]", "[", "slitlet_label", "]", ".", "keys", "(", ")", ")", "list_date_obs", ".", "sort", "(", ")", "num_date_obs", "=", "len", "(", "list_date_obs", ")", "if", "num_date_obs", "==", "1", ":", "date_obs", "=", "list_date_obs", "[", "0", "]", "tmp_dict", "=", "bounddict", "[", "'contents'", "]", "[", "slitlet_label", "]", "[", "date_obs", "]", "pol_lower_boundary", "=", "Polynomial", "(", "tmp_dict", "[", "'boundary_coef_lower'", "]", ")", "pol_upper_boundary", "=", "Polynomial", "(", "tmp_dict", "[", "'boundary_coef_upper'", "]", ")", "xmin_lower", "=", "tmp_dict", "[", "'boundary_xmin_lower'", "]", "xmax_lower", "=", "tmp_dict", "[", "'boundary_xmax_lower'", "]", "xmin_upper", "=", "tmp_dict", "[", "'boundary_xmin_upper'", "]", "xmax_upper", "=", "tmp_dict", "[", "'boundary_xmax_upper'", "]", "csu_bar_slit_center", "=", "tmp_dict", "[", "'csu_bar_slit_center'", "]", "else", ":", "raise", "ValueError", "(", "\"num_date_obs =\"", ",", "num_date_obs", ",", "\" (must be 1)\"", ")", "else", ":", "print", "(", "\"WARNING: slitlet number \"", "+", "str", "(", "slitlet_number", ")", "+", "\" is not available in \"", "+", "bounddict_file", ".", "name", ")", "# return result", "return", "pol_lower_boundary", ",", "pol_upper_boundary", ",", "xmin_lower", ",", "xmax_lower", ",", "xmin_upper", ",", "xmax_upper", ",", "csu_bar_slit_center" ]
Read the bounddict json file and return the polynomial boundaries. Parameters ---------- bounddict_file : file handler File containing the bounddict JSON data. slitlet_number : int Number of slitlet. Returns ------- pol_lower_boundary : numpy polynomial Polynomial defining the lower boundary of the slitlet. pol_upper_boundary : numpy polynomial Polynomial defining the upper boundary of the slitlet. xmin_lower : float Minimum abscissae for the lower boundary. xmax_lower : float Maximum abscissae for the lower boundary. xmin_upper : float Minimum abscissae for the upper boundary. xmax_upper : float Maximum abscissae for the upper boundary. csu_bar_slit_center : float CSU bar slit center (in mm)
[ "Read", "the", "bounddict", "json", "file", "and", "return", "the", "polynomial", "boundaries", "." ]
train
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/tools/overplot_bounddict.py#L38-L104
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/provider.py
_to_enos_roles
def _to_enos_roles(roles): """Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init` """ def to_host(h): extra = {} # create extra_vars for the nics # network_role = ethX for nic, roles in h["nics"]: for role in roles: extra[role] = nic return Host(h["host"], user="root", extra=extra) enos_roles = {} for role, hosts in roles.items(): enos_roles[role] = [to_host(h) for h in hosts] logger.debug(enos_roles) return enos_roles
python
def _to_enos_roles(roles): """Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init` """ def to_host(h): extra = {} # create extra_vars for the nics # network_role = ethX for nic, roles in h["nics"]: for role in roles: extra[role] = nic return Host(h["host"], user="root", extra=extra) enos_roles = {} for role, hosts in roles.items(): enos_roles[role] = [to_host(h) for h in hosts] logger.debug(enos_roles) return enos_roles
[ "def", "_to_enos_roles", "(", "roles", ")", ":", "def", "to_host", "(", "h", ")", ":", "extra", "=", "{", "}", "# create extra_vars for the nics", "# network_role = ethX", "for", "nic", ",", "roles", "in", "h", "[", "\"nics\"", "]", ":", "for", "role", "in", "roles", ":", "extra", "[", "role", "]", "=", "nic", "return", "Host", "(", "h", "[", "\"host\"", "]", ",", "user", "=", "\"root\"", ",", "extra", "=", "extra", ")", "enos_roles", "=", "{", "}", "for", "role", ",", "hosts", "in", "roles", ".", "items", "(", ")", ":", "enos_roles", "[", "role", "]", "=", "[", "to_host", "(", "h", ")", "for", "h", "in", "hosts", "]", "logger", ".", "debug", "(", "enos_roles", ")", "return", "enos_roles" ]
Transform the roles to use enoslib.host.Host hosts. Args: roles (dict): roles returned by :py:func:`enoslib.infra.provider.Provider.init`
[ "Transform", "the", "roles", "to", "use", "enoslib", ".", "host", ".", "Host", "hosts", "." ]
train
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/provider.py#L12-L34
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/provider.py
_to_enos_networks
def _to_enos_networks(networks): """Transform the networks returned by deploy5k. Args: networks (dict): networks returned by :py:func:`enoslib.infra.provider.Provider.init` """ nets = [] for roles, network in networks: nets.append(network.to_enos(roles)) logger.debug(nets) return nets
python
def _to_enos_networks(networks): """Transform the networks returned by deploy5k. Args: networks (dict): networks returned by :py:func:`enoslib.infra.provider.Provider.init` """ nets = [] for roles, network in networks: nets.append(network.to_enos(roles)) logger.debug(nets) return nets
[ "def", "_to_enos_networks", "(", "networks", ")", ":", "nets", "=", "[", "]", "for", "roles", ",", "network", "in", "networks", ":", "nets", ".", "append", "(", "network", ".", "to_enos", "(", "roles", ")", ")", "logger", ".", "debug", "(", "nets", ")", "return", "nets" ]
Transform the networks returned by deploy5k. Args: networks (dict): networks returned by :py:func:`enoslib.infra.provider.Provider.init`
[ "Transform", "the", "networks", "returned", "by", "deploy5k", "." ]
train
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/provider.py#L37-L48
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/provider.py
G5k.init
def init(self, force_deploy=False, client=None): """Reserve and deploys the nodes according to the resources section In comparison to the vagrant provider, networks must be characterized as in the networks key. Args: force_deploy (bool): True iff the environment must be redeployed Raises: MissingNetworkError: If one network is missing in comparison to what is claimed. NotEnoughNodesError: If the `min` constraints can't be met. """ _force_deploy = self.provider_conf.force_deploy self.provider_conf.force_deploy = _force_deploy or force_deploy self._provider_conf = self.provider_conf.to_dict() r = api.Resources(self._provider_conf, client=client) r.launch() roles = r.get_roles() networks = r.get_networks() return (_to_enos_roles(roles), _to_enos_networks(networks))
python
def init(self, force_deploy=False, client=None): """Reserve and deploys the nodes according to the resources section In comparison to the vagrant provider, networks must be characterized as in the networks key. Args: force_deploy (bool): True iff the environment must be redeployed Raises: MissingNetworkError: If one network is missing in comparison to what is claimed. NotEnoughNodesError: If the `min` constraints can't be met. """ _force_deploy = self.provider_conf.force_deploy self.provider_conf.force_deploy = _force_deploy or force_deploy self._provider_conf = self.provider_conf.to_dict() r = api.Resources(self._provider_conf, client=client) r.launch() roles = r.get_roles() networks = r.get_networks() return (_to_enos_roles(roles), _to_enos_networks(networks))
[ "def", "init", "(", "self", ",", "force_deploy", "=", "False", ",", "client", "=", "None", ")", ":", "_force_deploy", "=", "self", ".", "provider_conf", ".", "force_deploy", "self", ".", "provider_conf", ".", "force_deploy", "=", "_force_deploy", "or", "force_deploy", "self", ".", "_provider_conf", "=", "self", ".", "provider_conf", ".", "to_dict", "(", ")", "r", "=", "api", ".", "Resources", "(", "self", ".", "_provider_conf", ",", "client", "=", "client", ")", "r", ".", "launch", "(", ")", "roles", "=", "r", ".", "get_roles", "(", ")", "networks", "=", "r", ".", "get_networks", "(", ")", "return", "(", "_to_enos_roles", "(", "roles", ")", ",", "_to_enos_networks", "(", "networks", ")", ")" ]
Reserve and deploys the nodes according to the resources section In comparison to the vagrant provider, networks must be characterized as in the networks key. Args: force_deploy (bool): True iff the environment must be redeployed Raises: MissingNetworkError: If one network is missing in comparison to what is claimed. NotEnoughNodesError: If the `min` constraints can't be met.
[ "Reserve", "and", "deploys", "the", "nodes", "according", "to", "the", "resources", "section" ]
train
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/provider.py#L54-L77
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/provider.py
G5k.destroy
def destroy(self): """Destroys the jobs.""" r = api.Resources(self.provider_conf.to_dict()) # insert force_deploy r.destroy()
python
def destroy(self): """Destroys the jobs.""" r = api.Resources(self.provider_conf.to_dict()) # insert force_deploy r.destroy()
[ "def", "destroy", "(", "self", ")", ":", "r", "=", "api", ".", "Resources", "(", "self", ".", "provider_conf", ".", "to_dict", "(", ")", ")", "# insert force_deploy", "r", ".", "destroy", "(", ")" ]
Destroys the jobs.
[ "Destroys", "the", "jobs", "." ]
train
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/provider.py#L79-L83
openmicroanalysis/pyxray
pyxray/parser/wikipedia.py
WikipediaElementNameParser._find_wikipedia_names
def _find_wikipedia_names(self, name_en): """ Finds all Wikipedia pages referring to the specified name in English and returns a dictionary where the keys are the language code and the values are the titles of the corresponding pages. """ url = 'https://en.wikipedia.org/w/api.php' params = {'action': 'query', 'titles': name_en, 'prop': 'langlinks', 'lllimit': 500, 'format': 'json'} r = requests.get(url, params=params) if not r: raise ValueError('Could not find wikipedia page: {0}'.format(name_en)) out = r.json() names = {} pages = out['query']['pages'] for page in pages: for langlink in pages[page].get('langlinks', []): names[langlink['lang']] = langlink['*'] return names
python
def _find_wikipedia_names(self, name_en): """ Finds all Wikipedia pages referring to the specified name in English and returns a dictionary where the keys are the language code and the values are the titles of the corresponding pages. """ url = 'https://en.wikipedia.org/w/api.php' params = {'action': 'query', 'titles': name_en, 'prop': 'langlinks', 'lllimit': 500, 'format': 'json'} r = requests.get(url, params=params) if not r: raise ValueError('Could not find wikipedia page: {0}'.format(name_en)) out = r.json() names = {} pages = out['query']['pages'] for page in pages: for langlink in pages[page].get('langlinks', []): names[langlink['lang']] = langlink['*'] return names
[ "def", "_find_wikipedia_names", "(", "self", ",", "name_en", ")", ":", "url", "=", "'https://en.wikipedia.org/w/api.php'", "params", "=", "{", "'action'", ":", "'query'", ",", "'titles'", ":", "name_en", ",", "'prop'", ":", "'langlinks'", ",", "'lllimit'", ":", "500", ",", "'format'", ":", "'json'", "}", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ")", "if", "not", "r", ":", "raise", "ValueError", "(", "'Could not find wikipedia page: {0}'", ".", "format", "(", "name_en", ")", ")", "out", "=", "r", ".", "json", "(", ")", "names", "=", "{", "}", "pages", "=", "out", "[", "'query'", "]", "[", "'pages'", "]", "for", "page", "in", "pages", ":", "for", "langlink", "in", "pages", "[", "page", "]", ".", "get", "(", "'langlinks'", ",", "[", "]", ")", ":", "names", "[", "langlink", "[", "'lang'", "]", "]", "=", "langlink", "[", "'*'", "]", "return", "names" ]
Finds all Wikipedia pages referring to the specified name in English and returns a dictionary where the keys are the language code and the values are the titles of the corresponding pages.
[ "Finds", "all", "Wikipedia", "pages", "referring", "to", "the", "specified", "name", "in", "English", "and", "returns", "a", "dictionary", "where", "the", "keys", "are", "the", "language", "code", "and", "the", "values", "are", "the", "titles", "of", "the", "corresponding", "pages", "." ]
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/parser/wikipedia.py#L73-L96
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse.append_response
def append_response(self, response): """Append the response to the stack of responses. :param tornado.httpclient.HTTPResponse response: The HTTP response """ self._responses.append(response) if 'Warning' in response.headers: LOGGER.warning( 'HTTP %s %s Warning (%s): %s (attempt %s)', response.request.method, response.request.url, response.code, response.headers['Warning'], len(self._responses))
python
def append_response(self, response): """Append the response to the stack of responses. :param tornado.httpclient.HTTPResponse response: The HTTP response """ self._responses.append(response) if 'Warning' in response.headers: LOGGER.warning( 'HTTP %s %s Warning (%s): %s (attempt %s)', response.request.method, response.request.url, response.code, response.headers['Warning'], len(self._responses))
[ "def", "append_response", "(", "self", ",", "response", ")", ":", "self", ".", "_responses", ".", "append", "(", "response", ")", "if", "'Warning'", "in", "response", ".", "headers", ":", "LOGGER", ".", "warning", "(", "'HTTP %s %s Warning (%s): %s (attempt %s)'", ",", "response", ".", "request", ".", "method", ",", "response", ".", "request", ".", "url", ",", "response", ".", "code", ",", "response", ".", "headers", "[", "'Warning'", "]", ",", "len", "(", "self", ".", "_responses", ")", ")" ]
Append the response to the stack of responses. :param tornado.httpclient.HTTPResponse response: The HTTP response
[ "Append", "the", "response", "to", "the", "stack", "of", "responses", "." ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L64-L76
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse.body
def body(self): """Returns the HTTP response body, deserialized if possible. :rtype: mixed """ if not self._responses: return None if self._responses[-1].code >= 400: return self._error_message() return self._deserialize()
python
def body(self): """Returns the HTTP response body, deserialized if possible. :rtype: mixed """ if not self._responses: return None if self._responses[-1].code >= 400: return self._error_message() return self._deserialize()
[ "def", "body", "(", "self", ")", ":", "if", "not", "self", ".", "_responses", ":", "return", "None", "if", "self", ".", "_responses", "[", "-", "1", "]", ".", "code", ">=", "400", ":", "return", "self", ".", "_error_message", "(", ")", "return", "self", ".", "_deserialize", "(", ")" ]
Returns the HTTP response body, deserialized if possible. :rtype: mixed
[ "Returns", "the", "HTTP", "response", "body", "deserialized", "if", "possible", "." ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L94-L104
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse.links
def links(self): """Return the parsed link header if it was set, returning a list of the links as a dict. :rtype: list(dict()) or None """ if not self._responses: return None if 'Link' in self._responses[-1].headers: links = [] for l in headers.parse_link(self._responses[-1].headers['Link']): link = {'target': l.target} link.update({k: v for (k, v) in l.parameters}) links.append(link) return links
python
def links(self): """Return the parsed link header if it was set, returning a list of the links as a dict. :rtype: list(dict()) or None """ if not self._responses: return None if 'Link' in self._responses[-1].headers: links = [] for l in headers.parse_link(self._responses[-1].headers['Link']): link = {'target': l.target} link.update({k: v for (k, v) in l.parameters}) links.append(link) return links
[ "def", "links", "(", "self", ")", ":", "if", "not", "self", ".", "_responses", ":", "return", "None", "if", "'Link'", "in", "self", ".", "_responses", "[", "-", "1", "]", ".", "headers", ":", "links", "=", "[", "]", "for", "l", "in", "headers", ".", "parse_link", "(", "self", ".", "_responses", "[", "-", "1", "]", ".", "headers", "[", "'Link'", "]", ")", ":", "link", "=", "{", "'target'", ":", "l", ".", "target", "}", "link", ".", "update", "(", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "l", ".", "parameters", "}", ")", "links", ".", "append", "(", "link", ")", "return", "links" ]
Return the parsed link header if it was set, returning a list of the links as a dict. :rtype: list(dict()) or None
[ "Return", "the", "parsed", "link", "header", "if", "it", "was", "set", "returning", "a", "list", "of", "the", "links", "as", "a", "dict", "." ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L155-L170
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse._decode
def _decode(self, value): """Decode bytes to UTF-8 strings as a singe value, list, or dict. :param mixed value: The value to decode :rtype: mixed """ if isinstance(value, list): return [self._decode(v) for v in value] elif isinstance(value, dict): return {self._decode(k): self._decode(v) for k, v in value.items()} elif isinstance(value, bytes): return value.decode('utf-8') return value
python
def _decode(self, value): """Decode bytes to UTF-8 strings as a singe value, list, or dict. :param mixed value: The value to decode :rtype: mixed """ if isinstance(value, list): return [self._decode(v) for v in value] elif isinstance(value, dict): return {self._decode(k): self._decode(v) for k, v in value.items()} elif isinstance(value, bytes): return value.decode('utf-8') return value
[ "def", "_decode", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "[", "self", ".", "_decode", "(", "v", ")", "for", "v", "in", "value", "]", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "{", "self", ".", "_decode", "(", "k", ")", ":", "self", ".", "_decode", "(", "v", ")", "for", "k", ",", "v", "in", "value", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "value", ",", "bytes", ")", ":", "return", "value", ".", "decode", "(", "'utf-8'", ")", "return", "value" ]
Decode bytes to UTF-8 strings as a singe value, list, or dict. :param mixed value: The value to decode :rtype: mixed
[ "Decode", "bytes", "to", "UTF", "-", "8", "strings", "as", "a", "singe", "value", "list", "or", "dict", "." ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L196-L210
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse._deserialize
def _deserialize(self): """Try and deserialize a response body based upon the specified content type. :rtype: mixed """ if not self._responses or not self._responses[-1].body: return None if 'Content-Type' not in self._responses[-1].headers: return self._responses[-1].body try: content_type = algorithms.select_content_type( [headers.parse_content_type( self._responses[-1].headers['Content-Type'])], AVAILABLE_CONTENT_TYPES) except errors.NoMatch: return self._responses[-1].body if content_type[0] == CONTENT_TYPE_JSON: return self._decode( self._json.loads(self._decode(self._responses[-1].body))) elif content_type[0] == CONTENT_TYPE_MSGPACK: # pragma: nocover return self._decode( self._msgpack.unpackb(self._responses[-1].body))
python
def _deserialize(self): """Try and deserialize a response body based upon the specified content type. :rtype: mixed """ if not self._responses or not self._responses[-1].body: return None if 'Content-Type' not in self._responses[-1].headers: return self._responses[-1].body try: content_type = algorithms.select_content_type( [headers.parse_content_type( self._responses[-1].headers['Content-Type'])], AVAILABLE_CONTENT_TYPES) except errors.NoMatch: return self._responses[-1].body if content_type[0] == CONTENT_TYPE_JSON: return self._decode( self._json.loads(self._decode(self._responses[-1].body))) elif content_type[0] == CONTENT_TYPE_MSGPACK: # pragma: nocover return self._decode( self._msgpack.unpackb(self._responses[-1].body))
[ "def", "_deserialize", "(", "self", ")", ":", "if", "not", "self", ".", "_responses", "or", "not", "self", ".", "_responses", "[", "-", "1", "]", ".", "body", ":", "return", "None", "if", "'Content-Type'", "not", "in", "self", ".", "_responses", "[", "-", "1", "]", ".", "headers", ":", "return", "self", ".", "_responses", "[", "-", "1", "]", ".", "body", "try", ":", "content_type", "=", "algorithms", ".", "select_content_type", "(", "[", "headers", ".", "parse_content_type", "(", "self", ".", "_responses", "[", "-", "1", "]", ".", "headers", "[", "'Content-Type'", "]", ")", "]", ",", "AVAILABLE_CONTENT_TYPES", ")", "except", "errors", ".", "NoMatch", ":", "return", "self", ".", "_responses", "[", "-", "1", "]", ".", "body", "if", "content_type", "[", "0", "]", "==", "CONTENT_TYPE_JSON", ":", "return", "self", ".", "_decode", "(", "self", ".", "_json", ".", "loads", "(", "self", ".", "_decode", "(", "self", ".", "_responses", "[", "-", "1", "]", ".", "body", ")", ")", ")", "elif", "content_type", "[", "0", "]", "==", "CONTENT_TYPE_MSGPACK", ":", "# pragma: nocover", "return", "self", ".", "_decode", "(", "self", ".", "_msgpack", ".", "unpackb", "(", "self", ".", "_responses", "[", "-", "1", "]", ".", "body", ")", ")" ]
Try and deserialize a response body based upon the specified content type. :rtype: mixed
[ "Try", "and", "deserialize", "a", "response", "body", "based", "upon", "the", "specified", "content", "type", "." ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L212-L236
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPResponse._error_message
def _error_message(self): """Try and extract the error message from a HTTP error response. :rtype: str """ body = self._deserialize() return body.get('message', body) if isinstance(body, dict) else body
python
def _error_message(self): """Try and extract the error message from a HTTP error response. :rtype: str """ body = self._deserialize() return body.get('message', body) if isinstance(body, dict) else body
[ "def", "_error_message", "(", "self", ")", ":", "body", "=", "self", ".", "_deserialize", "(", ")", "return", "body", ".", "get", "(", "'message'", ",", "body", ")", "if", "isinstance", "(", "body", ",", "dict", ")", "else", "body" ]
Try and extract the error message from a HTTP error response. :rtype: str
[ "Try", "and", "extract", "the", "error", "message", "from", "a", "HTTP", "error", "response", "." ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L238-L245
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPClientMixin.http_fetch
async def http_fetch(self, url, method='GET', request_headers=None, body=None, content_type=CONTENT_TYPE_MSGPACK, follow_redirects=False, max_redirects=MAX_REDIRECTS, connect_timeout=DEFAULT_CONNECT_TIMEOUT, request_timeout=DEFAULT_REQUEST_TIMEOUT, max_http_attempts=MAX_HTTP_RETRIES, auth_username=None, auth_password=None, user_agent=None, validate_cert=True, allow_nonstandard_methods=False, dont_retry=None): """Perform a HTTP request Will retry up to ``self.MAX_HTTP_RETRIES`` times. :param str url: The URL for the request :param str method: The HTTP request method, defaults to ``GET`` :param dict request_headers: Headers to include in the HTTP request :param mixed body: The HTTP request body to send with the request :param content_type: The mime type to use for requests & responses. Defaults to ``application/msgpack`` :type content_type: :py:class:`~ietfparse.datastructures.ContentType` or str :param bool follow_redirects: Follow HTTP redirects when received :param int max_redirects: Maximum number of redirects to follow, default is 5 :param float connect_timeout: Timeout for initial connection in seconds, default 20 seconds :param float request_timeout: Timeout for entire request in seconds, default 20 seconds :param int max_http_attempts: Maximum number of times to retry a request, default is 3 attempts :param str auth_username: Username for HTTP authentication :param str auth_password: Password for HTTP authentication :param str user_agent: The str used for the ``User-Agent`` header, default used if unspecified. :param bool validate_cert: For HTTPS requests, validate the server's certificate? Default is True :param bool allow_nonstandard_methods: Allow methods that don't adhere to the HTTP spec. :param set dont_retry: A list of status codes that will not be retried if an error is returned. Default: set({}) :rtype: HTTPResponse """ response = HTTPResponse() request_headers = self._http_req_apply_default_headers( request_headers, content_type, body) if body: body = self._http_req_body_serialize( body, request_headers['Content-Type']) if not dont_retry: dont_retry = set({}) client = httpclient.AsyncHTTPClient() # Workaround for Tornado defect. if hasattr(client, 'max_clients') and os.getenv('HTTP_MAX_CLIENTS'): client.max_clients = int(os.getenv('HTTP_MAX_CLIENTS')) for attempt in range(0, max_http_attempts): LOGGER.debug('%s %s (Attempt %i of %i) %r', method, url, attempt + 1, max_http_attempts, request_headers) if attempt > 0: request_headers['X-Retry-Attempt'] = str(attempt + 1) try: resp = await client.fetch( url, method=method, headers=request_headers, body=body, auth_username=auth_username, auth_password=auth_password, connect_timeout=connect_timeout, request_timeout=request_timeout, user_agent=user_agent or self._http_req_user_agent(), follow_redirects=follow_redirects, max_redirects=max_redirects, raise_error=False, validate_cert=validate_cert, allow_nonstandard_methods=allow_nonstandard_methods) except (ConnectionError, CurlError, OSError, socket.gaierror) as error: response.append_exception(error) LOGGER.warning( 'HTTP Request Error for %s to %s attempt %i of %i: %s', method, url, attempt + 1, max_http_attempts, error) continue # Keep track of each response response.append_response(resp) # If the response is ok, finish and exit if response.ok: response.finish() return response elif resp.code in dont_retry: break elif resp.code in {423, 429}: await self._http_resp_rate_limited(resp) elif resp.code < 500: LOGGER.debug('HTTP Response Error for %s to %s' 'attempt %i of %i (%s): %s', method, url, resp.code, attempt + 1, max_http_attempts, response.body) response.finish() return response LOGGER.warning( 'HTTP Error for %s to %s, attempt %i of %i (%s): %s', method, url, attempt + 1, max_http_attempts, resp.code, response.body) LOGGER.warning('HTTP %s to %s failed after %i attempts', method, url, max_http_attempts) response.finish() return response
python
async def http_fetch(self, url, method='GET', request_headers=None, body=None, content_type=CONTENT_TYPE_MSGPACK, follow_redirects=False, max_redirects=MAX_REDIRECTS, connect_timeout=DEFAULT_CONNECT_TIMEOUT, request_timeout=DEFAULT_REQUEST_TIMEOUT, max_http_attempts=MAX_HTTP_RETRIES, auth_username=None, auth_password=None, user_agent=None, validate_cert=True, allow_nonstandard_methods=False, dont_retry=None): """Perform a HTTP request Will retry up to ``self.MAX_HTTP_RETRIES`` times. :param str url: The URL for the request :param str method: The HTTP request method, defaults to ``GET`` :param dict request_headers: Headers to include in the HTTP request :param mixed body: The HTTP request body to send with the request :param content_type: The mime type to use for requests & responses. Defaults to ``application/msgpack`` :type content_type: :py:class:`~ietfparse.datastructures.ContentType` or str :param bool follow_redirects: Follow HTTP redirects when received :param int max_redirects: Maximum number of redirects to follow, default is 5 :param float connect_timeout: Timeout for initial connection in seconds, default 20 seconds :param float request_timeout: Timeout for entire request in seconds, default 20 seconds :param int max_http_attempts: Maximum number of times to retry a request, default is 3 attempts :param str auth_username: Username for HTTP authentication :param str auth_password: Password for HTTP authentication :param str user_agent: The str used for the ``User-Agent`` header, default used if unspecified. :param bool validate_cert: For HTTPS requests, validate the server's certificate? Default is True :param bool allow_nonstandard_methods: Allow methods that don't adhere to the HTTP spec. :param set dont_retry: A list of status codes that will not be retried if an error is returned. Default: set({}) :rtype: HTTPResponse """ response = HTTPResponse() request_headers = self._http_req_apply_default_headers( request_headers, content_type, body) if body: body = self._http_req_body_serialize( body, request_headers['Content-Type']) if not dont_retry: dont_retry = set({}) client = httpclient.AsyncHTTPClient() # Workaround for Tornado defect. if hasattr(client, 'max_clients') and os.getenv('HTTP_MAX_CLIENTS'): client.max_clients = int(os.getenv('HTTP_MAX_CLIENTS')) for attempt in range(0, max_http_attempts): LOGGER.debug('%s %s (Attempt %i of %i) %r', method, url, attempt + 1, max_http_attempts, request_headers) if attempt > 0: request_headers['X-Retry-Attempt'] = str(attempt + 1) try: resp = await client.fetch( url, method=method, headers=request_headers, body=body, auth_username=auth_username, auth_password=auth_password, connect_timeout=connect_timeout, request_timeout=request_timeout, user_agent=user_agent or self._http_req_user_agent(), follow_redirects=follow_redirects, max_redirects=max_redirects, raise_error=False, validate_cert=validate_cert, allow_nonstandard_methods=allow_nonstandard_methods) except (ConnectionError, CurlError, OSError, socket.gaierror) as error: response.append_exception(error) LOGGER.warning( 'HTTP Request Error for %s to %s attempt %i of %i: %s', method, url, attempt + 1, max_http_attempts, error) continue # Keep track of each response response.append_response(resp) # If the response is ok, finish and exit if response.ok: response.finish() return response elif resp.code in dont_retry: break elif resp.code in {423, 429}: await self._http_resp_rate_limited(resp) elif resp.code < 500: LOGGER.debug('HTTP Response Error for %s to %s' 'attempt %i of %i (%s): %s', method, url, resp.code, attempt + 1, max_http_attempts, response.body) response.finish() return response LOGGER.warning( 'HTTP Error for %s to %s, attempt %i of %i (%s): %s', method, url, attempt + 1, max_http_attempts, resp.code, response.body) LOGGER.warning('HTTP %s to %s failed after %i attempts', method, url, max_http_attempts) response.finish() return response
[ "async", "def", "http_fetch", "(", "self", ",", "url", ",", "method", "=", "'GET'", ",", "request_headers", "=", "None", ",", "body", "=", "None", ",", "content_type", "=", "CONTENT_TYPE_MSGPACK", ",", "follow_redirects", "=", "False", ",", "max_redirects", "=", "MAX_REDIRECTS", ",", "connect_timeout", "=", "DEFAULT_CONNECT_TIMEOUT", ",", "request_timeout", "=", "DEFAULT_REQUEST_TIMEOUT", ",", "max_http_attempts", "=", "MAX_HTTP_RETRIES", ",", "auth_username", "=", "None", ",", "auth_password", "=", "None", ",", "user_agent", "=", "None", ",", "validate_cert", "=", "True", ",", "allow_nonstandard_methods", "=", "False", ",", "dont_retry", "=", "None", ")", ":", "response", "=", "HTTPResponse", "(", ")", "request_headers", "=", "self", ".", "_http_req_apply_default_headers", "(", "request_headers", ",", "content_type", ",", "body", ")", "if", "body", ":", "body", "=", "self", ".", "_http_req_body_serialize", "(", "body", ",", "request_headers", "[", "'Content-Type'", "]", ")", "if", "not", "dont_retry", ":", "dont_retry", "=", "set", "(", "{", "}", ")", "client", "=", "httpclient", ".", "AsyncHTTPClient", "(", ")", "# Workaround for Tornado defect.", "if", "hasattr", "(", "client", ",", "'max_clients'", ")", "and", "os", ".", "getenv", "(", "'HTTP_MAX_CLIENTS'", ")", ":", "client", ".", "max_clients", "=", "int", "(", "os", ".", "getenv", "(", "'HTTP_MAX_CLIENTS'", ")", ")", "for", "attempt", "in", "range", "(", "0", ",", "max_http_attempts", ")", ":", "LOGGER", ".", "debug", "(", "'%s %s (Attempt %i of %i) %r'", ",", "method", ",", "url", ",", "attempt", "+", "1", ",", "max_http_attempts", ",", "request_headers", ")", "if", "attempt", ">", "0", ":", "request_headers", "[", "'X-Retry-Attempt'", "]", "=", "str", "(", "attempt", "+", "1", ")", "try", ":", "resp", "=", "await", "client", ".", "fetch", "(", "url", ",", "method", "=", "method", ",", "headers", "=", "request_headers", ",", "body", "=", "body", ",", "auth_username", "=", "auth_username", ",", "auth_password", "=", "auth_password", ",", "connect_timeout", "=", "connect_timeout", ",", "request_timeout", "=", "request_timeout", ",", "user_agent", "=", "user_agent", "or", "self", ".", "_http_req_user_agent", "(", ")", ",", "follow_redirects", "=", "follow_redirects", ",", "max_redirects", "=", "max_redirects", ",", "raise_error", "=", "False", ",", "validate_cert", "=", "validate_cert", ",", "allow_nonstandard_methods", "=", "allow_nonstandard_methods", ")", "except", "(", "ConnectionError", ",", "CurlError", ",", "OSError", ",", "socket", ".", "gaierror", ")", "as", "error", ":", "response", ".", "append_exception", "(", "error", ")", "LOGGER", ".", "warning", "(", "'HTTP Request Error for %s to %s attempt %i of %i: %s'", ",", "method", ",", "url", ",", "attempt", "+", "1", ",", "max_http_attempts", ",", "error", ")", "continue", "# Keep track of each response", "response", ".", "append_response", "(", "resp", ")", "# If the response is ok, finish and exit", "if", "response", ".", "ok", ":", "response", ".", "finish", "(", ")", "return", "response", "elif", "resp", ".", "code", "in", "dont_retry", ":", "break", "elif", "resp", ".", "code", "in", "{", "423", ",", "429", "}", ":", "await", "self", ".", "_http_resp_rate_limited", "(", "resp", ")", "elif", "resp", ".", "code", "<", "500", ":", "LOGGER", ".", "debug", "(", "'HTTP Response Error for %s to %s'", "'attempt %i of %i (%s): %s'", ",", "method", ",", "url", ",", "resp", ".", "code", ",", "attempt", "+", "1", ",", "max_http_attempts", ",", "response", ".", "body", ")", "response", ".", "finish", "(", ")", "return", "response", "LOGGER", ".", "warning", "(", "'HTTP Error for %s to %s, attempt %i of %i (%s): %s'", ",", "method", ",", "url", ",", "attempt", "+", "1", ",", "max_http_attempts", ",", "resp", ".", "code", ",", "response", ".", "body", ")", "LOGGER", ".", "warning", "(", "'HTTP %s to %s failed after %i attempts'", ",", "method", ",", "url", ",", "max_http_attempts", ")", "response", ".", "finish", "(", ")", "return", "response" ]
Perform a HTTP request Will retry up to ``self.MAX_HTTP_RETRIES`` times. :param str url: The URL for the request :param str method: The HTTP request method, defaults to ``GET`` :param dict request_headers: Headers to include in the HTTP request :param mixed body: The HTTP request body to send with the request :param content_type: The mime type to use for requests & responses. Defaults to ``application/msgpack`` :type content_type: :py:class:`~ietfparse.datastructures.ContentType` or str :param bool follow_redirects: Follow HTTP redirects when received :param int max_redirects: Maximum number of redirects to follow, default is 5 :param float connect_timeout: Timeout for initial connection in seconds, default 20 seconds :param float request_timeout: Timeout for entire request in seconds, default 20 seconds :param int max_http_attempts: Maximum number of times to retry a request, default is 3 attempts :param str auth_username: Username for HTTP authentication :param str auth_password: Password for HTTP authentication :param str user_agent: The str used for the ``User-Agent`` header, default used if unspecified. :param bool validate_cert: For HTTPS requests, validate the server's certificate? Default is True :param bool allow_nonstandard_methods: Allow methods that don't adhere to the HTTP spec. :param set dont_retry: A list of status codes that will not be retried if an error is returned. Default: set({}) :rtype: HTTPResponse
[ "Perform", "a", "HTTP", "request" ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L265-L392
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPClientMixin._http_req_apply_default_headers
def _http_req_apply_default_headers(self, request_headers, content_type, body): """Set default values for common HTTP request headers :param dict request_headers: The HTTP request headers :param content_type: The mime-type used in the request/response :type content_type: :py:class:`ietfparse.datastructures.ContentType` or str :param mixed body: The request body :rtype: dict """ if not request_headers: request_headers = {} request_headers.setdefault( 'Accept', ', '.join([str(ct) for ct in AVAILABLE_CONTENT_TYPES])) if body: request_headers.setdefault( 'Content-Type', str(content_type) or str(CONTENT_TYPE_MSGPACK)) if hasattr(self, 'correlation_id'): request_headers.setdefault( 'Correlation-Id', self.correlation_id) elif hasattr(self, 'request') and \ self.request.headers.get('Correlation-Id'): request_headers.setdefault( 'Correlation-Id', self.request.headers['Correlation-Id']) return request_headers
python
def _http_req_apply_default_headers(self, request_headers, content_type, body): """Set default values for common HTTP request headers :param dict request_headers: The HTTP request headers :param content_type: The mime-type used in the request/response :type content_type: :py:class:`ietfparse.datastructures.ContentType` or str :param mixed body: The request body :rtype: dict """ if not request_headers: request_headers = {} request_headers.setdefault( 'Accept', ', '.join([str(ct) for ct in AVAILABLE_CONTENT_TYPES])) if body: request_headers.setdefault( 'Content-Type', str(content_type) or str(CONTENT_TYPE_MSGPACK)) if hasattr(self, 'correlation_id'): request_headers.setdefault( 'Correlation-Id', self.correlation_id) elif hasattr(self, 'request') and \ self.request.headers.get('Correlation-Id'): request_headers.setdefault( 'Correlation-Id', self.request.headers['Correlation-Id']) return request_headers
[ "def", "_http_req_apply_default_headers", "(", "self", ",", "request_headers", ",", "content_type", ",", "body", ")", ":", "if", "not", "request_headers", ":", "request_headers", "=", "{", "}", "request_headers", ".", "setdefault", "(", "'Accept'", ",", "', '", ".", "join", "(", "[", "str", "(", "ct", ")", "for", "ct", "in", "AVAILABLE_CONTENT_TYPES", "]", ")", ")", "if", "body", ":", "request_headers", ".", "setdefault", "(", "'Content-Type'", ",", "str", "(", "content_type", ")", "or", "str", "(", "CONTENT_TYPE_MSGPACK", ")", ")", "if", "hasattr", "(", "self", ",", "'correlation_id'", ")", ":", "request_headers", ".", "setdefault", "(", "'Correlation-Id'", ",", "self", ".", "correlation_id", ")", "elif", "hasattr", "(", "self", ",", "'request'", ")", "and", "self", ".", "request", ".", "headers", ".", "get", "(", "'Correlation-Id'", ")", ":", "request_headers", ".", "setdefault", "(", "'Correlation-Id'", ",", "self", ".", "request", ".", "headers", "[", "'Correlation-Id'", "]", ")", "return", "request_headers" ]
Set default values for common HTTP request headers :param dict request_headers: The HTTP request headers :param content_type: The mime-type used in the request/response :type content_type: :py:class:`ietfparse.datastructures.ContentType` or str :param mixed body: The request body :rtype: dict
[ "Set", "default", "values", "for", "common", "HTTP", "request", "headers" ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L394-L420
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPClientMixin._http_req_body_serialize
def _http_req_body_serialize(self, body, content_type): """Conditionally serialize the request body value if mime_type is set and it's serializable. :param mixed body: The request body :param str content_type: The content type for the request body :raises: ValueError """ if not body or not isinstance(body, (dict, list)): return body content_type = headers.parse_content_type(content_type) if content_type == CONTENT_TYPE_JSON: return self.__hcm_json.dumps(body) elif content_type == CONTENT_TYPE_MSGPACK: return self.__hcm_msgpack.packb(body) raise ValueError('Unsupported Content Type')
python
def _http_req_body_serialize(self, body, content_type): """Conditionally serialize the request body value if mime_type is set and it's serializable. :param mixed body: The request body :param str content_type: The content type for the request body :raises: ValueError """ if not body or not isinstance(body, (dict, list)): return body content_type = headers.parse_content_type(content_type) if content_type == CONTENT_TYPE_JSON: return self.__hcm_json.dumps(body) elif content_type == CONTENT_TYPE_MSGPACK: return self.__hcm_msgpack.packb(body) raise ValueError('Unsupported Content Type')
[ "def", "_http_req_body_serialize", "(", "self", ",", "body", ",", "content_type", ")", ":", "if", "not", "body", "or", "not", "isinstance", "(", "body", ",", "(", "dict", ",", "list", ")", ")", ":", "return", "body", "content_type", "=", "headers", ".", "parse_content_type", "(", "content_type", ")", "if", "content_type", "==", "CONTENT_TYPE_JSON", ":", "return", "self", ".", "__hcm_json", ".", "dumps", "(", "body", ")", "elif", "content_type", "==", "CONTENT_TYPE_MSGPACK", ":", "return", "self", ".", "__hcm_msgpack", ".", "packb", "(", "body", ")", "raise", "ValueError", "(", "'Unsupported Content Type'", ")" ]
Conditionally serialize the request body value if mime_type is set and it's serializable. :param mixed body: The request body :param str content_type: The content type for the request body :raises: ValueError
[ "Conditionally", "serialize", "the", "request", "body", "value", "if", "mime_type", "is", "set", "and", "it", "s", "serializable", "." ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L422-L438
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPClientMixin._http_req_user_agent
def _http_req_user_agent(self): """Return the User-Agent value to specify in HTTP requests, defaulting to ``service/version`` if configured in the application settings, or if used in a consumer, it will attempt to obtain a user-agent from the consumer's process. If it can not auto-set the User-Agent, it defaults to ``sprockets.mixins.http/[VERSION]``. :rtype: str """ # Tornado Request Handler try: return '{}/{}'.format( self.settings['service'], self.settings['version']) except (AttributeError, KeyError): pass # Rejected Consumer if hasattr(self, '_process'): try: return '{}/{}'.format( self._process.consumer_name, self._process.consumer_version) except AttributeError: pass return DEFAULT_USER_AGENT
python
def _http_req_user_agent(self): """Return the User-Agent value to specify in HTTP requests, defaulting to ``service/version`` if configured in the application settings, or if used in a consumer, it will attempt to obtain a user-agent from the consumer's process. If it can not auto-set the User-Agent, it defaults to ``sprockets.mixins.http/[VERSION]``. :rtype: str """ # Tornado Request Handler try: return '{}/{}'.format( self.settings['service'], self.settings['version']) except (AttributeError, KeyError): pass # Rejected Consumer if hasattr(self, '_process'): try: return '{}/{}'.format( self._process.consumer_name, self._process.consumer_version) except AttributeError: pass return DEFAULT_USER_AGENT
[ "def", "_http_req_user_agent", "(", "self", ")", ":", "# Tornado Request Handler", "try", ":", "return", "'{}/{}'", ".", "format", "(", "self", ".", "settings", "[", "'service'", "]", ",", "self", ".", "settings", "[", "'version'", "]", ")", "except", "(", "AttributeError", ",", "KeyError", ")", ":", "pass", "# Rejected Consumer", "if", "hasattr", "(", "self", ",", "'_process'", ")", ":", "try", ":", "return", "'{}/{}'", ".", "format", "(", "self", ".", "_process", ".", "consumer_name", ",", "self", ".", "_process", ".", "consumer_version", ")", "except", "AttributeError", ":", "pass", "return", "DEFAULT_USER_AGENT" ]
Return the User-Agent value to specify in HTTP requests, defaulting to ``service/version`` if configured in the application settings, or if used in a consumer, it will attempt to obtain a user-agent from the consumer's process. If it can not auto-set the User-Agent, it defaults to ``sprockets.mixins.http/[VERSION]``. :rtype: str
[ "Return", "the", "User", "-", "Agent", "value", "to", "specify", "in", "HTTP", "requests", "defaulting", "to", "service", "/", "version", "if", "configured", "in", "the", "application", "settings", "or", "if", "used", "in", "a", "consumer", "it", "will", "attempt", "to", "obtain", "a", "user", "-", "agent", "from", "the", "consumer", "s", "process", ".", "If", "it", "can", "not", "auto", "-", "set", "the", "User", "-", "Agent", "it", "defaults", "to", "sprockets", ".", "mixins", ".", "http", "/", "[", "VERSION", "]", "." ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L440-L465
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
HTTPClientMixin._http_resp_rate_limited
def _http_resp_rate_limited(response): """Extract the ``Retry-After`` header value if the request was rate limited and return a future to sleep for the specified duration. :param tornado.httpclient.HTTPResponse response: The response :rtype: tornado.concurrent.Future """ parsed = parse.urlparse(response.request.url) duration = int(response.headers.get('Retry-After', 3)) LOGGER.warning('Rate Limited by %s, retrying in %i seconds', parsed.netloc, duration) return asyncio.sleep(duration)
python
def _http_resp_rate_limited(response): """Extract the ``Retry-After`` header value if the request was rate limited and return a future to sleep for the specified duration. :param tornado.httpclient.HTTPResponse response: The response :rtype: tornado.concurrent.Future """ parsed = parse.urlparse(response.request.url) duration = int(response.headers.get('Retry-After', 3)) LOGGER.warning('Rate Limited by %s, retrying in %i seconds', parsed.netloc, duration) return asyncio.sleep(duration)
[ "def", "_http_resp_rate_limited", "(", "response", ")", ":", "parsed", "=", "parse", ".", "urlparse", "(", "response", ".", "request", ".", "url", ")", "duration", "=", "int", "(", "response", ".", "headers", ".", "get", "(", "'Retry-After'", ",", "3", ")", ")", "LOGGER", ".", "warning", "(", "'Rate Limited by %s, retrying in %i seconds'", ",", "parsed", ".", "netloc", ",", "duration", ")", "return", "asyncio", ".", "sleep", "(", "duration", ")" ]
Extract the ``Retry-After`` header value if the request was rate limited and return a future to sleep for the specified duration. :param tornado.httpclient.HTTPResponse response: The response :rtype: tornado.concurrent.Future
[ "Extract", "the", "Retry", "-", "After", "header", "value", "if", "the", "request", "was", "rate", "limited", "and", "return", "a", "future", "to", "sleep", "for", "the", "specified", "duration", "." ]
train
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L468-L480
thorgate/tg-utils
tg_utils/lock.py
acquires_lock
def acquires_lock(expires, should_fail=True, should_wait=False, resource=None, prefix=DEFAULT_PREFIX): """ Decorator to ensure function only runs when it is unique holder of the resource. Any invocations of the functions before the first is done will raise RuntimeError. Locks are stored in redis with prefix: `lock:acquires_lock` Arguments: expires(timedelta|int): Expiry time of lock, way more than expected time to run. Intended as a failsafe clean-up mechanism. should_fail(bool): Should error be raised if failed to acquire lock. should_wait(bool): Should this task wait for lock to be released. resource(str): Resource identifier, by default taken from function name. prefix(str): Change prefix added to redis key (the 'lock:' part will always be added) Example: You have a celery task and you want to ensure it is never executed concurrently: @shared_task @acquire_lock(60, resource='foo') def foo(): ... """ # Seconds from now on if isinstance(expires, timedelta): expires = expires.total_seconds() # This is just a tiny wrapper around redis_lock # 1) acquire lock or fail # 2) run function # 3) release lock def decorator(f): nonlocal resource if resource is None: resource = f.__name__ resource = '%s%s' % (prefix, resource) @wraps(f) def wrapper(*args, **kwargs): # The context manager is annoying and always blocking... lock = redis_lock.Lock(_redis_conn, resource, expire=expires) lock_acquired = False # Get default lock blocking mode # Copying to local variable so original variable would not be touched nonlocal should_wait is_blocking = should_wait should_execute_if_lock_fails = False if 'should_execute_if_lock_fails' in kwargs: should_execute_if_lock_fails = kwargs.pop("should_execute_if_lock_fails") # If decorated fn is called with should_wait kwarg # Override lock blocking mode if 'should_wait' in kwargs: is_blocking = kwargs.pop('should_wait') if is_blocking: logger.debug('Waiting for resource "%s"', resource) if not lock.acquire(blocking=is_blocking): if should_fail: raise RuntimeError("Failed to acquire lock: %s" % resource) logger.warning('Failed to acquire lock: %s', resource) if not should_execute_if_lock_fails: return False else: lock_acquired = True try: return f(*args, **kwargs) finally: try: if lock_acquired: lock.release() except Exception as e: logger.exception('Failed to release lock: %s', str(e), exc_info=False) return wrapper return decorator
python
def acquires_lock(expires, should_fail=True, should_wait=False, resource=None, prefix=DEFAULT_PREFIX): """ Decorator to ensure function only runs when it is unique holder of the resource. Any invocations of the functions before the first is done will raise RuntimeError. Locks are stored in redis with prefix: `lock:acquires_lock` Arguments: expires(timedelta|int): Expiry time of lock, way more than expected time to run. Intended as a failsafe clean-up mechanism. should_fail(bool): Should error be raised if failed to acquire lock. should_wait(bool): Should this task wait for lock to be released. resource(str): Resource identifier, by default taken from function name. prefix(str): Change prefix added to redis key (the 'lock:' part will always be added) Example: You have a celery task and you want to ensure it is never executed concurrently: @shared_task @acquire_lock(60, resource='foo') def foo(): ... """ # Seconds from now on if isinstance(expires, timedelta): expires = expires.total_seconds() # This is just a tiny wrapper around redis_lock # 1) acquire lock or fail # 2) run function # 3) release lock def decorator(f): nonlocal resource if resource is None: resource = f.__name__ resource = '%s%s' % (prefix, resource) @wraps(f) def wrapper(*args, **kwargs): # The context manager is annoying and always blocking... lock = redis_lock.Lock(_redis_conn, resource, expire=expires) lock_acquired = False # Get default lock blocking mode # Copying to local variable so original variable would not be touched nonlocal should_wait is_blocking = should_wait should_execute_if_lock_fails = False if 'should_execute_if_lock_fails' in kwargs: should_execute_if_lock_fails = kwargs.pop("should_execute_if_lock_fails") # If decorated fn is called with should_wait kwarg # Override lock blocking mode if 'should_wait' in kwargs: is_blocking = kwargs.pop('should_wait') if is_blocking: logger.debug('Waiting for resource "%s"', resource) if not lock.acquire(blocking=is_blocking): if should_fail: raise RuntimeError("Failed to acquire lock: %s" % resource) logger.warning('Failed to acquire lock: %s', resource) if not should_execute_if_lock_fails: return False else: lock_acquired = True try: return f(*args, **kwargs) finally: try: if lock_acquired: lock.release() except Exception as e: logger.exception('Failed to release lock: %s', str(e), exc_info=False) return wrapper return decorator
[ "def", "acquires_lock", "(", "expires", ",", "should_fail", "=", "True", ",", "should_wait", "=", "False", ",", "resource", "=", "None", ",", "prefix", "=", "DEFAULT_PREFIX", ")", ":", "# Seconds from now on", "if", "isinstance", "(", "expires", ",", "timedelta", ")", ":", "expires", "=", "expires", ".", "total_seconds", "(", ")", "# This is just a tiny wrapper around redis_lock", "# 1) acquire lock or fail", "# 2) run function", "# 3) release lock", "def", "decorator", "(", "f", ")", ":", "nonlocal", "resource", "if", "resource", "is", "None", ":", "resource", "=", "f", ".", "__name__", "resource", "=", "'%s%s'", "%", "(", "prefix", ",", "resource", ")", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# The context manager is annoying and always blocking...", "lock", "=", "redis_lock", ".", "Lock", "(", "_redis_conn", ",", "resource", ",", "expire", "=", "expires", ")", "lock_acquired", "=", "False", "# Get default lock blocking mode", "# Copying to local variable so original variable would not be touched", "nonlocal", "should_wait", "is_blocking", "=", "should_wait", "should_execute_if_lock_fails", "=", "False", "if", "'should_execute_if_lock_fails'", "in", "kwargs", ":", "should_execute_if_lock_fails", "=", "kwargs", ".", "pop", "(", "\"should_execute_if_lock_fails\"", ")", "# If decorated fn is called with should_wait kwarg", "# Override lock blocking mode", "if", "'should_wait'", "in", "kwargs", ":", "is_blocking", "=", "kwargs", ".", "pop", "(", "'should_wait'", ")", "if", "is_blocking", ":", "logger", ".", "debug", "(", "'Waiting for resource \"%s\"'", ",", "resource", ")", "if", "not", "lock", ".", "acquire", "(", "blocking", "=", "is_blocking", ")", ":", "if", "should_fail", ":", "raise", "RuntimeError", "(", "\"Failed to acquire lock: %s\"", "%", "resource", ")", "logger", ".", "warning", "(", "'Failed to acquire lock: %s'", ",", "resource", ")", "if", "not", "should_execute_if_lock_fails", ":", "return", "False", "else", ":", "lock_acquired", "=", "True", "try", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "try", ":", "if", "lock_acquired", ":", "lock", ".", "release", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "'Failed to release lock: %s'", ",", "str", "(", "e", ")", ",", "exc_info", "=", "False", ")", "return", "wrapper", "return", "decorator" ]
Decorator to ensure function only runs when it is unique holder of the resource. Any invocations of the functions before the first is done will raise RuntimeError. Locks are stored in redis with prefix: `lock:acquires_lock` Arguments: expires(timedelta|int): Expiry time of lock, way more than expected time to run. Intended as a failsafe clean-up mechanism. should_fail(bool): Should error be raised if failed to acquire lock. should_wait(bool): Should this task wait for lock to be released. resource(str): Resource identifier, by default taken from function name. prefix(str): Change prefix added to redis key (the 'lock:' part will always be added) Example: You have a celery task and you want to ensure it is never executed concurrently: @shared_task @acquire_lock(60, resource='foo') def foo(): ...
[ "Decorator", "to", "ensure", "function", "only", "runs", "when", "it", "is", "unique", "holder", "of", "the", "resource", "." ]
train
https://github.com/thorgate/tg-utils/blob/81e404e837334b241686d9159cc3eb44de509a88/tg_utils/lock.py#L36-L124