repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
listlengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
listlengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.__get_config
|
def __get_config(self):
""" Really connect """
if not self.name:
room_resp = self.conn.get(BASE_URL + "/new")
room_resp.raise_for_status()
url = room_resp.url
try:
self.name = re.search(r"r/(.+?)$", url).group(1)
except Exception:
raise IOError("Failed to create room")
params = {"room": self.name}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
config = self.conn.make_api_call("getRoomConfig", params)
if "error" in config:
raise RuntimeError(
f"Failed to get room config for {self.name}\n"
f"{config['error'].get('message') or config['error']}"
)
self.config.update(config)
self.__add_prop("private")
self.__add_prop("title")
self.__add_prop("motd")
self.__add_prop("adult")
self.__add_prop("disabled", True)
self.__add_prop("file_ttl", True)
return (self.config.room_id, self.config.owner, config["checksum2"])
|
python
|
def __get_config(self):
""" Really connect """
if not self.name:
room_resp = self.conn.get(BASE_URL + "/new")
room_resp.raise_for_status()
url = room_resp.url
try:
self.name = re.search(r"r/(.+?)$", url).group(1)
except Exception:
raise IOError("Failed to create room")
params = {"room": self.name}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
config = self.conn.make_api_call("getRoomConfig", params)
if "error" in config:
raise RuntimeError(
f"Failed to get room config for {self.name}\n"
f"{config['error'].get('message') or config['error']}"
)
self.config.update(config)
self.__add_prop("private")
self.__add_prop("title")
self.__add_prop("motd")
self.__add_prop("adult")
self.__add_prop("disabled", True)
self.__add_prop("file_ttl", True)
return (self.config.room_id, self.config.owner, config["checksum2"])
|
[
"def",
"__get_config",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"name",
":",
"room_resp",
"=",
"self",
".",
"conn",
".",
"get",
"(",
"BASE_URL",
"+",
"\"/new\"",
")",
"room_resp",
".",
"raise_for_status",
"(",
")",
"url",
"=",
"room_resp",
".",
"url",
"try",
":",
"self",
".",
"name",
"=",
"re",
".",
"search",
"(",
"r\"r/(.+?)$\"",
",",
"url",
")",
".",
"group",
"(",
"1",
")",
"except",
"Exception",
":",
"raise",
"IOError",
"(",
"\"Failed to create room\"",
")",
"params",
"=",
"{",
"\"room\"",
":",
"self",
".",
"name",
"}",
"if",
"self",
".",
"key",
":",
"params",
"[",
"\"roomKey\"",
"]",
"=",
"self",
".",
"key",
"if",
"self",
".",
"password",
":",
"params",
"[",
"\"password\"",
"]",
"=",
"self",
".",
"password",
"config",
"=",
"self",
".",
"conn",
".",
"make_api_call",
"(",
"\"getRoomConfig\"",
",",
"params",
")",
"if",
"\"error\"",
"in",
"config",
":",
"raise",
"RuntimeError",
"(",
"f\"Failed to get room config for {self.name}\\n\"",
"f\"{config['error'].get('message') or config['error']}\"",
")",
"self",
".",
"config",
".",
"update",
"(",
"config",
")",
"self",
".",
"__add_prop",
"(",
"\"private\"",
")",
"self",
".",
"__add_prop",
"(",
"\"title\"",
")",
"self",
".",
"__add_prop",
"(",
"\"motd\"",
")",
"self",
".",
"__add_prop",
"(",
"\"adult\"",
")",
"self",
".",
"__add_prop",
"(",
"\"disabled\"",
",",
"True",
")",
"self",
".",
"__add_prop",
"(",
"\"file_ttl\"",
",",
"True",
")",
"return",
"(",
"self",
".",
"config",
".",
"room_id",
",",
"self",
".",
"config",
".",
"owner",
",",
"config",
"[",
"\"checksum2\"",
"]",
")"
] |
Really connect
|
[
"Really",
"connect"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L442-L471
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.listen
|
def listen(self, once=False):
"""Listen for changes in all registered listeners.
Use add_listener before calling this funcion to listen for desired
events or set `once` to True to listen for initial room information """
if once:
# we listen for time event and return false so our
# run_queues function will be also falsy and break the loop
self.add_listener("time", lambda _: False)
return self.conn.listen()
|
python
|
def listen(self, once=False):
"""Listen for changes in all registered listeners.
Use add_listener before calling this funcion to listen for desired
events or set `once` to True to listen for initial room information """
if once:
# we listen for time event and return false so our
# run_queues function will be also falsy and break the loop
self.add_listener("time", lambda _: False)
return self.conn.listen()
|
[
"def",
"listen",
"(",
"self",
",",
"once",
"=",
"False",
")",
":",
"if",
"once",
":",
"# we listen for time event and return false so our",
"# run_queues function will be also falsy and break the loop",
"self",
".",
"add_listener",
"(",
"\"time\"",
",",
"lambda",
"_",
":",
"False",
")",
"return",
"self",
".",
"conn",
".",
"listen",
"(",
")"
] |
Listen for changes in all registered listeners.
Use add_listener before calling this funcion to listen for desired
events or set `once` to True to listen for initial room information
|
[
"Listen",
"for",
"changes",
"in",
"all",
"registered",
"listeners",
".",
"Use",
"add_listener",
"before",
"calling",
"this",
"funcion",
"to",
"listen",
"for",
"desired",
"events",
"or",
"set",
"once",
"to",
"True",
"to",
"listen",
"for",
"initial",
"room",
"information"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L497-L506
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.__expire_files
|
def __expire_files(self):
"""Because files are always unclean"""
self.__files = OrderedDict(
item for item in self.__files.items() if not item[1].expired
)
|
python
|
def __expire_files(self):
"""Because files are always unclean"""
self.__files = OrderedDict(
item for item in self.__files.items() if not item[1].expired
)
|
[
"def",
"__expire_files",
"(",
"self",
")",
":",
"self",
".",
"__files",
"=",
"OrderedDict",
"(",
"item",
"for",
"item",
"in",
"self",
".",
"__files",
".",
"items",
"(",
")",
"if",
"not",
"item",
"[",
"1",
"]",
".",
"expired",
")"
] |
Because files are always unclean
|
[
"Because",
"files",
"are",
"always",
"unclean"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L529-L534
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.filedict
|
def filedict(self, kv):
"""Updates filedict with single file entry or deletes given
key if the value is False. Shouldn't be used by the user."""
k, v = kv
if v is not None:
self.__files.update({k: v})
else:
with suppress(KeyError):
del self.__files[k]
|
python
|
def filedict(self, kv):
"""Updates filedict with single file entry or deletes given
key if the value is False. Shouldn't be used by the user."""
k, v = kv
if v is not None:
self.__files.update({k: v})
else:
with suppress(KeyError):
del self.__files[k]
|
[
"def",
"filedict",
"(",
"self",
",",
"kv",
")",
":",
"k",
",",
"v",
"=",
"kv",
"if",
"v",
"is",
"not",
"None",
":",
"self",
".",
"__files",
".",
"update",
"(",
"{",
"k",
":",
"v",
"}",
")",
"else",
":",
"with",
"suppress",
"(",
"KeyError",
")",
":",
"del",
"self",
".",
"__files",
"[",
"k",
"]"
] |
Updates filedict with single file entry or deletes given
key if the value is False. Shouldn't be used by the user.
|
[
"Updates",
"filedict",
"with",
"single",
"file",
"entry",
"or",
"deletes",
"given",
"key",
"if",
"the",
"value",
"is",
"False",
".",
"Shouldn",
"t",
"be",
"used",
"by",
"the",
"user",
"."
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L555-L564
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.get_user_stats
|
def get_user_stats(self, name):
"""Return data about the given user. Returns None if user
does not exist."""
req = self.conn.get(BASE_URL + "/user/" + name)
if req.status_code != 200 or not name:
return None
return self.conn.make_api_call("getUserInfo", {"name": name})
|
python
|
def get_user_stats(self, name):
"""Return data about the given user. Returns None if user
does not exist."""
req = self.conn.get(BASE_URL + "/user/" + name)
if req.status_code != 200 or not name:
return None
return self.conn.make_api_call("getUserInfo", {"name": name})
|
[
"def",
"get_user_stats",
"(",
"self",
",",
"name",
")",
":",
"req",
"=",
"self",
".",
"conn",
".",
"get",
"(",
"BASE_URL",
"+",
"\"/user/\"",
"+",
"name",
")",
"if",
"req",
".",
"status_code",
"!=",
"200",
"or",
"not",
"name",
":",
"return",
"None",
"return",
"self",
".",
"conn",
".",
"make_api_call",
"(",
"\"getUserInfo\"",
",",
"{",
"\"name\"",
":",
"name",
"}",
")"
] |
Return data about the given user. Returns None if user
does not exist.
|
[
"Return",
"data",
"about",
"the",
"given",
"user",
".",
"Returns",
"None",
"if",
"user",
"does",
"not",
"exist",
"."
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L566-L574
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.post_chat
|
def post_chat(self, msg, is_me=False, is_a=False):
"""Posts a msg to this room's chat. Set me=True if you want to /me"""
if len(msg) > self.config.max_message:
raise ValueError(
f"Chat message must be at most {self.config.max_message} characters."
)
while not self.user.nick:
with ARBITRATOR.condition:
ARBITRATOR.condition.wait()
if is_a:
if not self.admin and not self.staff:
raise RuntimeError("Can't modchat if you're not a mod or trusted")
self.conn.make_call("command", self.user.nick, "a", msg)
return
if is_me:
self.conn.make_call("command", self.user.nick, "me", msg)
return
self.conn.make_call("chat", self.user.nick, msg)
|
python
|
def post_chat(self, msg, is_me=False, is_a=False):
"""Posts a msg to this room's chat. Set me=True if you want to /me"""
if len(msg) > self.config.max_message:
raise ValueError(
f"Chat message must be at most {self.config.max_message} characters."
)
while not self.user.nick:
with ARBITRATOR.condition:
ARBITRATOR.condition.wait()
if is_a:
if not self.admin and not self.staff:
raise RuntimeError("Can't modchat if you're not a mod or trusted")
self.conn.make_call("command", self.user.nick, "a", msg)
return
if is_me:
self.conn.make_call("command", self.user.nick, "me", msg)
return
self.conn.make_call("chat", self.user.nick, msg)
|
[
"def",
"post_chat",
"(",
"self",
",",
"msg",
",",
"is_me",
"=",
"False",
",",
"is_a",
"=",
"False",
")",
":",
"if",
"len",
"(",
"msg",
")",
">",
"self",
".",
"config",
".",
"max_message",
":",
"raise",
"ValueError",
"(",
"f\"Chat message must be at most {self.config.max_message} characters.\"",
")",
"while",
"not",
"self",
".",
"user",
".",
"nick",
":",
"with",
"ARBITRATOR",
".",
"condition",
":",
"ARBITRATOR",
".",
"condition",
".",
"wait",
"(",
")",
"if",
"is_a",
":",
"if",
"not",
"self",
".",
"admin",
"and",
"not",
"self",
".",
"staff",
":",
"raise",
"RuntimeError",
"(",
"\"Can't modchat if you're not a mod or trusted\"",
")",
"self",
".",
"conn",
".",
"make_call",
"(",
"\"command\"",
",",
"self",
".",
"user",
".",
"nick",
",",
"\"a\"",
",",
"msg",
")",
"return",
"if",
"is_me",
":",
"self",
".",
"conn",
".",
"make_call",
"(",
"\"command\"",
",",
"self",
".",
"user",
".",
"nick",
",",
"\"me\"",
",",
"msg",
")",
"return",
"self",
".",
"conn",
".",
"make_call",
"(",
"\"chat\"",
",",
"self",
".",
"user",
".",
"nick",
",",
"msg",
")"
] |
Posts a msg to this room's chat. Set me=True if you want to /me
|
[
"Posts",
"a",
"msg",
"to",
"this",
"room",
"s",
"chat",
".",
"Set",
"me",
"=",
"True",
"if",
"you",
"want",
"to",
"/",
"me"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L576-L595
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.upload_file
|
def upload_file(
self,
filename,
upload_as=None,
blocksize=None,
callback=None,
information_callback=None,
allow_timeout=False,
):
"""Uploads a file with given filename to this room.
You may specify upload_as to change the name it is uploaded as.
You can also specify a blocksize and a callback if you wish.
Returns the file's id on success and None on failure."""
with delayed_close(
filename if hasattr(filename, "read") else open(filename, "rb")
) as file:
filename = upload_as or os.path.split(filename)[1]
try:
file.seek(0, 2)
if file.tell() > self.config.max_file:
raise ValueError(
f"File must be at most {self.config.max_file >> 30} GB"
)
finally:
try:
file.seek(0)
except Exception:
pass
files = Data(
{"file": {"name": filename, "value": file}},
blocksize=blocksize,
callback=callback,
)
headers = {"Origin": BASE_URL}
headers.update(files.headers)
while True:
key, server, file_id = self._generate_upload_key(
allow_timeout=allow_timeout
)
info = dict(
key=key,
server=server,
file_id=file_id,
room=self.room_id,
filename=filename,
len=files.len,
resumecount=0,
)
if information_callback:
if information_callback(info) is False:
continue
break
params = {"room": self.room_id, "key": key, "filename": filename}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
while True:
try:
post = self.conn.post(
f"https://{server}/upload",
params=params,
data=files,
headers=headers,
)
post.raise_for_status()
break
except requests.exceptions.ConnectionError as ex:
if "aborted" not in repr(ex): # ye, that's nasty but "compatible"
raise
try:
resume = self.conn.get(
f"https://{server}/rest/uploadStatus",
params={"key": key, "c": 1},
).text
resume = from_json(resume)
resume = resume["receivedBytes"]
if resume <= 0:
raise ConnectionError("Cannot resume")
file.seek(resume)
files = Data(
{"file": {"name": filename, "value": file}},
blocksize=blocksize,
callback=callback,
logical_offset=resume,
)
headers.update(files.headers)
params["startAt"] = resume
info["resumecount"] += 1
if information_callback:
information_callback(info)
except requests.exceptions.ConnectionError as iex:
# ye, that's nasty but "compatible"
if "aborted" not in repr(iex):
raise
continue # another day, another try
return file_id
|
python
|
def upload_file(
self,
filename,
upload_as=None,
blocksize=None,
callback=None,
information_callback=None,
allow_timeout=False,
):
"""Uploads a file with given filename to this room.
You may specify upload_as to change the name it is uploaded as.
You can also specify a blocksize and a callback if you wish.
Returns the file's id on success and None on failure."""
with delayed_close(
filename if hasattr(filename, "read") else open(filename, "rb")
) as file:
filename = upload_as or os.path.split(filename)[1]
try:
file.seek(0, 2)
if file.tell() > self.config.max_file:
raise ValueError(
f"File must be at most {self.config.max_file >> 30} GB"
)
finally:
try:
file.seek(0)
except Exception:
pass
files = Data(
{"file": {"name": filename, "value": file}},
blocksize=blocksize,
callback=callback,
)
headers = {"Origin": BASE_URL}
headers.update(files.headers)
while True:
key, server, file_id = self._generate_upload_key(
allow_timeout=allow_timeout
)
info = dict(
key=key,
server=server,
file_id=file_id,
room=self.room_id,
filename=filename,
len=files.len,
resumecount=0,
)
if information_callback:
if information_callback(info) is False:
continue
break
params = {"room": self.room_id, "key": key, "filename": filename}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
while True:
try:
post = self.conn.post(
f"https://{server}/upload",
params=params,
data=files,
headers=headers,
)
post.raise_for_status()
break
except requests.exceptions.ConnectionError as ex:
if "aborted" not in repr(ex): # ye, that's nasty but "compatible"
raise
try:
resume = self.conn.get(
f"https://{server}/rest/uploadStatus",
params={"key": key, "c": 1},
).text
resume = from_json(resume)
resume = resume["receivedBytes"]
if resume <= 0:
raise ConnectionError("Cannot resume")
file.seek(resume)
files = Data(
{"file": {"name": filename, "value": file}},
blocksize=blocksize,
callback=callback,
logical_offset=resume,
)
headers.update(files.headers)
params["startAt"] = resume
info["resumecount"] += 1
if information_callback:
information_callback(info)
except requests.exceptions.ConnectionError as iex:
# ye, that's nasty but "compatible"
if "aborted" not in repr(iex):
raise
continue # another day, another try
return file_id
|
[
"def",
"upload_file",
"(",
"self",
",",
"filename",
",",
"upload_as",
"=",
"None",
",",
"blocksize",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"information_callback",
"=",
"None",
",",
"allow_timeout",
"=",
"False",
",",
")",
":",
"with",
"delayed_close",
"(",
"filename",
"if",
"hasattr",
"(",
"filename",
",",
"\"read\"",
")",
"else",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
")",
"as",
"file",
":",
"filename",
"=",
"upload_as",
"or",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"1",
"]",
"try",
":",
"file",
".",
"seek",
"(",
"0",
",",
"2",
")",
"if",
"file",
".",
"tell",
"(",
")",
">",
"self",
".",
"config",
".",
"max_file",
":",
"raise",
"ValueError",
"(",
"f\"File must be at most {self.config.max_file >> 30} GB\"",
")",
"finally",
":",
"try",
":",
"file",
".",
"seek",
"(",
"0",
")",
"except",
"Exception",
":",
"pass",
"files",
"=",
"Data",
"(",
"{",
"\"file\"",
":",
"{",
"\"name\"",
":",
"filename",
",",
"\"value\"",
":",
"file",
"}",
"}",
",",
"blocksize",
"=",
"blocksize",
",",
"callback",
"=",
"callback",
",",
")",
"headers",
"=",
"{",
"\"Origin\"",
":",
"BASE_URL",
"}",
"headers",
".",
"update",
"(",
"files",
".",
"headers",
")",
"while",
"True",
":",
"key",
",",
"server",
",",
"file_id",
"=",
"self",
".",
"_generate_upload_key",
"(",
"allow_timeout",
"=",
"allow_timeout",
")",
"info",
"=",
"dict",
"(",
"key",
"=",
"key",
",",
"server",
"=",
"server",
",",
"file_id",
"=",
"file_id",
",",
"room",
"=",
"self",
".",
"room_id",
",",
"filename",
"=",
"filename",
",",
"len",
"=",
"files",
".",
"len",
",",
"resumecount",
"=",
"0",
",",
")",
"if",
"information_callback",
":",
"if",
"information_callback",
"(",
"info",
")",
"is",
"False",
":",
"continue",
"break",
"params",
"=",
"{",
"\"room\"",
":",
"self",
".",
"room_id",
",",
"\"key\"",
":",
"key",
",",
"\"filename\"",
":",
"filename",
"}",
"if",
"self",
".",
"key",
":",
"params",
"[",
"\"roomKey\"",
"]",
"=",
"self",
".",
"key",
"if",
"self",
".",
"password",
":",
"params",
"[",
"\"password\"",
"]",
"=",
"self",
".",
"password",
"while",
"True",
":",
"try",
":",
"post",
"=",
"self",
".",
"conn",
".",
"post",
"(",
"f\"https://{server}/upload\"",
",",
"params",
"=",
"params",
",",
"data",
"=",
"files",
",",
"headers",
"=",
"headers",
",",
")",
"post",
".",
"raise_for_status",
"(",
")",
"break",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
"as",
"ex",
":",
"if",
"\"aborted\"",
"not",
"in",
"repr",
"(",
"ex",
")",
":",
"# ye, that's nasty but \"compatible\"",
"raise",
"try",
":",
"resume",
"=",
"self",
".",
"conn",
".",
"get",
"(",
"f\"https://{server}/rest/uploadStatus\"",
",",
"params",
"=",
"{",
"\"key\"",
":",
"key",
",",
"\"c\"",
":",
"1",
"}",
",",
")",
".",
"text",
"resume",
"=",
"from_json",
"(",
"resume",
")",
"resume",
"=",
"resume",
"[",
"\"receivedBytes\"",
"]",
"if",
"resume",
"<=",
"0",
":",
"raise",
"ConnectionError",
"(",
"\"Cannot resume\"",
")",
"file",
".",
"seek",
"(",
"resume",
")",
"files",
"=",
"Data",
"(",
"{",
"\"file\"",
":",
"{",
"\"name\"",
":",
"filename",
",",
"\"value\"",
":",
"file",
"}",
"}",
",",
"blocksize",
"=",
"blocksize",
",",
"callback",
"=",
"callback",
",",
"logical_offset",
"=",
"resume",
",",
")",
"headers",
".",
"update",
"(",
"files",
".",
"headers",
")",
"params",
"[",
"\"startAt\"",
"]",
"=",
"resume",
"info",
"[",
"\"resumecount\"",
"]",
"+=",
"1",
"if",
"information_callback",
":",
"information_callback",
"(",
"info",
")",
"except",
"requests",
".",
"exceptions",
".",
"ConnectionError",
"as",
"iex",
":",
"# ye, that's nasty but \"compatible\"",
"if",
"\"aborted\"",
"not",
"in",
"repr",
"(",
"iex",
")",
":",
"raise",
"continue",
"# another day, another try",
"return",
"file_id"
] |
Uploads a file with given filename to this room.
You may specify upload_as to change the name it is uploaded as.
You can also specify a blocksize and a callback if you wish.
Returns the file's id on success and None on failure.
|
[
"Uploads",
"a",
"file",
"with",
"given",
"filename",
"to",
"this",
"room",
".",
"You",
"may",
"specify",
"upload_as",
"to",
"change",
"the",
"name",
"it",
"is",
"uploaded",
"as",
".",
"You",
"can",
"also",
"specify",
"a",
"blocksize",
"and",
"a",
"callback",
"if",
"you",
"wish",
".",
"Returns",
"the",
"file",
"s",
"id",
"on",
"success",
"and",
"None",
"on",
"failure",
"."
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L597-L701
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.close
|
def close(self):
"""Close connection to this room"""
if hasattr(self, "conn"):
self.conn.close()
del self.conn
if hasattr(self, "user"):
del self.user
|
python
|
def close(self):
"""Close connection to this room"""
if hasattr(self, "conn"):
self.conn.close()
del self.conn
if hasattr(self, "user"):
del self.user
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"conn\"",
")",
":",
"self",
".",
"conn",
".",
"close",
"(",
")",
"del",
"self",
".",
"conn",
"if",
"hasattr",
"(",
"self",
",",
"\"user\"",
")",
":",
"del",
"self",
".",
"user"
] |
Close connection to this room
|
[
"Close",
"connection",
"to",
"this",
"room"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L703-L710
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.user_info
|
def user_info(self, kv):
"""Sets user_info dict entry through a tuple."""
key, value = kv
self.__user_info[key] = value
|
python
|
def user_info(self, kv):
"""Sets user_info dict entry through a tuple."""
key, value = kv
self.__user_info[key] = value
|
[
"def",
"user_info",
"(",
"self",
",",
"kv",
")",
":",
"key",
",",
"value",
"=",
"kv",
"self",
".",
"__user_info",
"[",
"key",
"]",
"=",
"value"
] |
Sets user_info dict entry through a tuple.
|
[
"Sets",
"user_info",
"dict",
"entry",
"through",
"a",
"tuple",
"."
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L724-L728
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.fileinfo
|
def fileinfo(self, fid):
"""Ask lain about what he knows about given file. If the given file
exists in the file dict, it will get updated."""
if not isinstance(fid, str):
raise TypeError("Your file ID must be a string")
try:
info = self.conn.make_call_with_cb("getFileinfo", fid).get(timeout=5)
if not info:
warnings.warn(
f"Your query for file with ID: '{fid}' failed.", RuntimeWarning
)
elif fid in self.__files and not self.__files[fid].updated:
self.__files[fid].fileupdate(info)
except queue.Empty as ex:
raise ValueError(
"lain didn't produce a callback!\n"
"Are you sure your query wasn't malformed?"
) from ex
return info
|
python
|
def fileinfo(self, fid):
"""Ask lain about what he knows about given file. If the given file
exists in the file dict, it will get updated."""
if not isinstance(fid, str):
raise TypeError("Your file ID must be a string")
try:
info = self.conn.make_call_with_cb("getFileinfo", fid).get(timeout=5)
if not info:
warnings.warn(
f"Your query for file with ID: '{fid}' failed.", RuntimeWarning
)
elif fid in self.__files and not self.__files[fid].updated:
self.__files[fid].fileupdate(info)
except queue.Empty as ex:
raise ValueError(
"lain didn't produce a callback!\n"
"Are you sure your query wasn't malformed?"
) from ex
return info
|
[
"def",
"fileinfo",
"(",
"self",
",",
"fid",
")",
":",
"if",
"not",
"isinstance",
"(",
"fid",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Your file ID must be a string\"",
")",
"try",
":",
"info",
"=",
"self",
".",
"conn",
".",
"make_call_with_cb",
"(",
"\"getFileinfo\"",
",",
"fid",
")",
".",
"get",
"(",
"timeout",
"=",
"5",
")",
"if",
"not",
"info",
":",
"warnings",
".",
"warn",
"(",
"f\"Your query for file with ID: '{fid}' failed.\"",
",",
"RuntimeWarning",
")",
"elif",
"fid",
"in",
"self",
".",
"__files",
"and",
"not",
"self",
".",
"__files",
"[",
"fid",
"]",
".",
"updated",
":",
"self",
".",
"__files",
"[",
"fid",
"]",
".",
"fileupdate",
"(",
"info",
")",
"except",
"queue",
".",
"Empty",
"as",
"ex",
":",
"raise",
"ValueError",
"(",
"\"lain didn't produce a callback!\\n\"",
"\"Are you sure your query wasn't malformed?\"",
")",
"from",
"ex",
"return",
"info"
] |
Ask lain about what he knows about given file. If the given file
exists in the file dict, it will get updated.
|
[
"Ask",
"lain",
"about",
"what",
"he",
"knows",
"about",
"given",
"file",
".",
"If",
"the",
"given",
"file",
"exists",
"in",
"the",
"file",
"dict",
"it",
"will",
"get",
"updated",
"."
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L749-L768
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room._generate_upload_key
|
def _generate_upload_key(self, allow_timeout=False):
"""Generates a new upload key"""
# Wait for server to set username if not set already.
while not self.user.nick:
with ARBITRATOR.condition:
ARBITRATOR.condition.wait()
while True:
params = {
"name": self.user.nick,
"room": self.room_id,
"c": self.__upload_count,
}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
info = self.conn.make_api_call("getUploadKey", params)
self.__upload_count += 1
try:
return info["key"], info["server"], info["file_id"]
except Exception:
to = int(info.get("error", {}).get("info", {}).get("timeout", 0))
if to <= 0 or not allow_timeout:
raise IOError(f"Failed to retrieve key {info}")
time.sleep(to / 10000)
|
python
|
def _generate_upload_key(self, allow_timeout=False):
"""Generates a new upload key"""
# Wait for server to set username if not set already.
while not self.user.nick:
with ARBITRATOR.condition:
ARBITRATOR.condition.wait()
while True:
params = {
"name": self.user.nick,
"room": self.room_id,
"c": self.__upload_count,
}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
info = self.conn.make_api_call("getUploadKey", params)
self.__upload_count += 1
try:
return info["key"], info["server"], info["file_id"]
except Exception:
to = int(info.get("error", {}).get("info", {}).get("timeout", 0))
if to <= 0 or not allow_timeout:
raise IOError(f"Failed to retrieve key {info}")
time.sleep(to / 10000)
|
[
"def",
"_generate_upload_key",
"(",
"self",
",",
"allow_timeout",
"=",
"False",
")",
":",
"# Wait for server to set username if not set already.",
"while",
"not",
"self",
".",
"user",
".",
"nick",
":",
"with",
"ARBITRATOR",
".",
"condition",
":",
"ARBITRATOR",
".",
"condition",
".",
"wait",
"(",
")",
"while",
"True",
":",
"params",
"=",
"{",
"\"name\"",
":",
"self",
".",
"user",
".",
"nick",
",",
"\"room\"",
":",
"self",
".",
"room_id",
",",
"\"c\"",
":",
"self",
".",
"__upload_count",
",",
"}",
"if",
"self",
".",
"key",
":",
"params",
"[",
"\"roomKey\"",
"]",
"=",
"self",
".",
"key",
"if",
"self",
".",
"password",
":",
"params",
"[",
"\"password\"",
"]",
"=",
"self",
".",
"password",
"info",
"=",
"self",
".",
"conn",
".",
"make_api_call",
"(",
"\"getUploadKey\"",
",",
"params",
")",
"self",
".",
"__upload_count",
"+=",
"1",
"try",
":",
"return",
"info",
"[",
"\"key\"",
"]",
",",
"info",
"[",
"\"server\"",
"]",
",",
"info",
"[",
"\"file_id\"",
"]",
"except",
"Exception",
":",
"to",
"=",
"int",
"(",
"info",
".",
"get",
"(",
"\"error\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"info\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"timeout\"",
",",
"0",
")",
")",
"if",
"to",
"<=",
"0",
"or",
"not",
"allow_timeout",
":",
"raise",
"IOError",
"(",
"f\"Failed to retrieve key {info}\"",
")",
"time",
".",
"sleep",
"(",
"to",
"/",
"10000",
")"
] |
Generates a new upload key
|
[
"Generates",
"a",
"new",
"upload",
"key"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L770-L795
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.delete_files
|
def delete_files(self, ids):
"""Remove one or more files"""
self.check_owner()
if not isinstance(ids, list):
raise TypeError("You must specify list of files to delete!")
self.conn.make_call("deleteFiles", ids)
|
python
|
def delete_files(self, ids):
"""Remove one or more files"""
self.check_owner()
if not isinstance(ids, list):
raise TypeError("You must specify list of files to delete!")
self.conn.make_call("deleteFiles", ids)
|
[
"def",
"delete_files",
"(",
"self",
",",
"ids",
")",
":",
"self",
".",
"check_owner",
"(",
")",
"if",
"not",
"isinstance",
"(",
"ids",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"You must specify list of files to delete!\"",
")",
"self",
".",
"conn",
".",
"make_call",
"(",
"\"deleteFiles\"",
",",
"ids",
")"
] |
Remove one or more files
|
[
"Remove",
"one",
"or",
"more",
"files"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L797-L803
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.transfer_owner
|
def transfer_owner(self, new_owner):
"""You had good run at it, it's time for someone else to get dirty"""
if not self.owner and not self.admin:
raise RuntimeError("You need more street creed to do this")
new_owner = new_owner.strip().lower()
if not new_owner:
raise ValueError("Empty strings cannot be new owners")
self.__set_config_value("owner", new_owner)
|
python
|
def transfer_owner(self, new_owner):
"""You had good run at it, it's time for someone else to get dirty"""
if not self.owner and not self.admin:
raise RuntimeError("You need more street creed to do this")
new_owner = new_owner.strip().lower()
if not new_owner:
raise ValueError("Empty strings cannot be new owners")
self.__set_config_value("owner", new_owner)
|
[
"def",
"transfer_owner",
"(",
"self",
",",
"new_owner",
")",
":",
"if",
"not",
"self",
".",
"owner",
"and",
"not",
"self",
".",
"admin",
":",
"raise",
"RuntimeError",
"(",
"\"You need more street creed to do this\"",
")",
"new_owner",
"=",
"new_owner",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"not",
"new_owner",
":",
"raise",
"ValueError",
"(",
"\"Empty strings cannot be new owners\"",
")",
"self",
".",
"__set_config_value",
"(",
"\"owner\"",
",",
"new_owner",
")"
] |
You had good run at it, it's time for someone else to get dirty
|
[
"You",
"had",
"good",
"run",
"at",
"it",
"it",
"s",
"time",
"for",
"someone",
"else",
"to",
"get",
"dirty"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L805-L815
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.add_janitor
|
def add_janitor(self, janitor):
"""Add janitor to the room"""
if not self.owner and not self.admin:
raise RuntimeError("Not enough street creed to do this")
janitor = janitor.strip().lower()
if not janitor:
raise ValueError("Empty strings cannot be janitors")
if janitor in self.config.janitors:
return
self.config.janitors.append(janitor)
self.__set_config_value("janitors", self.config.janitors)
|
python
|
def add_janitor(self, janitor):
"""Add janitor to the room"""
if not self.owner and not self.admin:
raise RuntimeError("Not enough street creed to do this")
janitor = janitor.strip().lower()
if not janitor:
raise ValueError("Empty strings cannot be janitors")
if janitor in self.config.janitors:
return
self.config.janitors.append(janitor)
self.__set_config_value("janitors", self.config.janitors)
|
[
"def",
"add_janitor",
"(",
"self",
",",
"janitor",
")",
":",
"if",
"not",
"self",
".",
"owner",
"and",
"not",
"self",
".",
"admin",
":",
"raise",
"RuntimeError",
"(",
"\"Not enough street creed to do this\"",
")",
"janitor",
"=",
"janitor",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"not",
"janitor",
":",
"raise",
"ValueError",
"(",
"\"Empty strings cannot be janitors\"",
")",
"if",
"janitor",
"in",
"self",
".",
"config",
".",
"janitors",
":",
"return",
"self",
".",
"config",
".",
"janitors",
".",
"append",
"(",
"janitor",
")",
"self",
".",
"__set_config_value",
"(",
"\"janitors\"",
",",
"self",
".",
"config",
".",
"janitors",
")"
] |
Add janitor to the room
|
[
"Add",
"janitor",
"to",
"the",
"room"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L817-L831
|
volafiled/python-volapi
|
volapi/volapi.py
|
Room.remove_janitor
|
def remove_janitor(self, janitor):
"""Remove janitor from the room"""
if not self.owner and not self.admin:
raise RuntimeError("Not enough street creed to do this")
janitor = janitor.strip().lower()
if not janitor:
raise ValueError("Empty strings cannot be janitors")
if janitor not in self.config.janitors:
return
self.config.janitors.remove(janitor)
self.__set_config_value("janitors", self.config.janitors)
|
python
|
def remove_janitor(self, janitor):
"""Remove janitor from the room"""
if not self.owner and not self.admin:
raise RuntimeError("Not enough street creed to do this")
janitor = janitor.strip().lower()
if not janitor:
raise ValueError("Empty strings cannot be janitors")
if janitor not in self.config.janitors:
return
self.config.janitors.remove(janitor)
self.__set_config_value("janitors", self.config.janitors)
|
[
"def",
"remove_janitor",
"(",
"self",
",",
"janitor",
")",
":",
"if",
"not",
"self",
".",
"owner",
"and",
"not",
"self",
".",
"admin",
":",
"raise",
"RuntimeError",
"(",
"\"Not enough street creed to do this\"",
")",
"janitor",
"=",
"janitor",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"not",
"janitor",
":",
"raise",
"ValueError",
"(",
"\"Empty strings cannot be janitors\"",
")",
"if",
"janitor",
"not",
"in",
"self",
".",
"config",
".",
"janitors",
":",
"return",
"self",
".",
"config",
".",
"janitors",
".",
"remove",
"(",
"janitor",
")",
"self",
".",
"__set_config_value",
"(",
"\"janitors\"",
",",
"self",
".",
"config",
".",
"janitors",
")"
] |
Remove janitor from the room
|
[
"Remove",
"janitor",
"from",
"the",
"room"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L833-L847
|
guaix-ucm/pyemir
|
emirdrp/processing/wavecal/overplot_boundary_model.py
|
overplot_lines
|
def overplot_lines(ax,
catlines_all_wave,
list_valid_islitlets,
rectwv_coeff,
global_integer_offset_x_pix, global_integer_offset_y_pix,
ds9_file, debugplot):
"""Overplot lines (arc/OH).
Parameters
----------
ax : matplotlib axes
Current plot axes.
catlines_all_wave : numpy array
Array with wavelengths of the lines to be overplotted.
list_valid_islitlets : list of integers
List with numbers of valid slitlets.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
global_integer_offset_x_pix : int
Global offset in the X direction to be applied after computing
the expected location.
global_integer_offset_y_pix : int
Global offset in the Y direction to be applied after computing
the expected location.
ds9_file : file handler or None
File handler to ds9 region file where the location of lines
must be saved.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
"""
for islitlet in list_valid_islitlets:
crval1_linear = rectwv_coeff.contents[islitlet - 1]['crval1_linear']
cdelt1_linear = rectwv_coeff.contents[islitlet - 1]['cdelt1_linear']
crvaln_linear = crval1_linear + (EMIR_NAXIS1 - 1) * cdelt1_linear
bb_ns1_orig = rectwv_coeff.contents[islitlet - 1]['bb_ns1_orig']
ttd_order = rectwv_coeff.contents[islitlet - 1]['ttd_order']
aij = rectwv_coeff.contents[islitlet - 1]['ttd_aij']
bij = rectwv_coeff.contents[islitlet - 1]['ttd_bij']
min_row_rectified = float(
rectwv_coeff.contents[islitlet - 1]['min_row_rectified']
)
max_row_rectified = float(
rectwv_coeff.contents[islitlet - 1]['max_row_rectified']
)
mean_row_rectified = (min_row_rectified + max_row_rectified) / 2
wpoly_coeff = rectwv_coeff.contents[islitlet - 1]['wpoly_coeff']
x0 = []
y0 = []
x1 = []
y1 = []
x2 = []
y2 = []
for line in catlines_all_wave:
if crval1_linear <= line <= crvaln_linear:
tmp_coeff = np.copy(wpoly_coeff)
tmp_coeff[0] -= line
tmp_xroots = np.polynomial.Polynomial(tmp_coeff).roots()
for dum in tmp_xroots:
if np.isreal(dum):
dum = dum.real
if 1 <= dum <= EMIR_NAXIS1:
x0.append(dum)
y0.append(mean_row_rectified)
x1.append(dum)
y1.append(min_row_rectified)
x2.append(dum)
y2.append(max_row_rectified)
xx0, yy0 = fmap(ttd_order, aij, bij, np.array(x0), np.array(y0))
xx0 -= global_integer_offset_x_pix
yy0 += bb_ns1_orig
yy0 -= global_integer_offset_y_pix
xx1, yy1 = fmap(ttd_order, aij, bij, np.array(x1), np.array(y1))
xx1 -= global_integer_offset_x_pix
yy1 += bb_ns1_orig
yy1 -= global_integer_offset_y_pix
xx2, yy2 = fmap(ttd_order, aij, bij, np.array(x2), np.array(y2))
xx2 -= global_integer_offset_x_pix
yy2 += bb_ns1_orig
yy2 -= global_integer_offset_y_pix
if abs(debugplot) % 10 != 0:
if abs(debugplot) == 22:
for xx1_, xx2_, yy1_, yy2_ in zip(xx1, xx2, yy1, yy2):
ax.plot([xx1_, xx2_], [yy1_, yy2_], 'c-', linewidth=2.0)
else:
ax.plot(xx0, yy0, 'c.')
if ds9_file is not None:
ds9_file.write(
'#\n# islitlet...........: {0}\n'.format(islitlet)
)
for xx0_, yy0_ in zip(xx0, yy0):
ds9_file.write(
'circle {0} {1} 2 # fill=1\n'.format(
xx0_, yy0_)
)
|
python
|
def overplot_lines(ax,
catlines_all_wave,
list_valid_islitlets,
rectwv_coeff,
global_integer_offset_x_pix, global_integer_offset_y_pix,
ds9_file, debugplot):
"""Overplot lines (arc/OH).
Parameters
----------
ax : matplotlib axes
Current plot axes.
catlines_all_wave : numpy array
Array with wavelengths of the lines to be overplotted.
list_valid_islitlets : list of integers
List with numbers of valid slitlets.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
global_integer_offset_x_pix : int
Global offset in the X direction to be applied after computing
the expected location.
global_integer_offset_y_pix : int
Global offset in the Y direction to be applied after computing
the expected location.
ds9_file : file handler or None
File handler to ds9 region file where the location of lines
must be saved.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
"""
for islitlet in list_valid_islitlets:
crval1_linear = rectwv_coeff.contents[islitlet - 1]['crval1_linear']
cdelt1_linear = rectwv_coeff.contents[islitlet - 1]['cdelt1_linear']
crvaln_linear = crval1_linear + (EMIR_NAXIS1 - 1) * cdelt1_linear
bb_ns1_orig = rectwv_coeff.contents[islitlet - 1]['bb_ns1_orig']
ttd_order = rectwv_coeff.contents[islitlet - 1]['ttd_order']
aij = rectwv_coeff.contents[islitlet - 1]['ttd_aij']
bij = rectwv_coeff.contents[islitlet - 1]['ttd_bij']
min_row_rectified = float(
rectwv_coeff.contents[islitlet - 1]['min_row_rectified']
)
max_row_rectified = float(
rectwv_coeff.contents[islitlet - 1]['max_row_rectified']
)
mean_row_rectified = (min_row_rectified + max_row_rectified) / 2
wpoly_coeff = rectwv_coeff.contents[islitlet - 1]['wpoly_coeff']
x0 = []
y0 = []
x1 = []
y1 = []
x2 = []
y2 = []
for line in catlines_all_wave:
if crval1_linear <= line <= crvaln_linear:
tmp_coeff = np.copy(wpoly_coeff)
tmp_coeff[0] -= line
tmp_xroots = np.polynomial.Polynomial(tmp_coeff).roots()
for dum in tmp_xroots:
if np.isreal(dum):
dum = dum.real
if 1 <= dum <= EMIR_NAXIS1:
x0.append(dum)
y0.append(mean_row_rectified)
x1.append(dum)
y1.append(min_row_rectified)
x2.append(dum)
y2.append(max_row_rectified)
xx0, yy0 = fmap(ttd_order, aij, bij, np.array(x0), np.array(y0))
xx0 -= global_integer_offset_x_pix
yy0 += bb_ns1_orig
yy0 -= global_integer_offset_y_pix
xx1, yy1 = fmap(ttd_order, aij, bij, np.array(x1), np.array(y1))
xx1 -= global_integer_offset_x_pix
yy1 += bb_ns1_orig
yy1 -= global_integer_offset_y_pix
xx2, yy2 = fmap(ttd_order, aij, bij, np.array(x2), np.array(y2))
xx2 -= global_integer_offset_x_pix
yy2 += bb_ns1_orig
yy2 -= global_integer_offset_y_pix
if abs(debugplot) % 10 != 0:
if abs(debugplot) == 22:
for xx1_, xx2_, yy1_, yy2_ in zip(xx1, xx2, yy1, yy2):
ax.plot([xx1_, xx2_], [yy1_, yy2_], 'c-', linewidth=2.0)
else:
ax.plot(xx0, yy0, 'c.')
if ds9_file is not None:
ds9_file.write(
'#\n# islitlet...........: {0}\n'.format(islitlet)
)
for xx0_, yy0_ in zip(xx0, yy0):
ds9_file.write(
'circle {0} {1} 2 # fill=1\n'.format(
xx0_, yy0_)
)
|
[
"def",
"overplot_lines",
"(",
"ax",
",",
"catlines_all_wave",
",",
"list_valid_islitlets",
",",
"rectwv_coeff",
",",
"global_integer_offset_x_pix",
",",
"global_integer_offset_y_pix",
",",
"ds9_file",
",",
"debugplot",
")",
":",
"for",
"islitlet",
"in",
"list_valid_islitlets",
":",
"crval1_linear",
"=",
"rectwv_coeff",
".",
"contents",
"[",
"islitlet",
"-",
"1",
"]",
"[",
"'crval1_linear'",
"]",
"cdelt1_linear",
"=",
"rectwv_coeff",
".",
"contents",
"[",
"islitlet",
"-",
"1",
"]",
"[",
"'cdelt1_linear'",
"]",
"crvaln_linear",
"=",
"crval1_linear",
"+",
"(",
"EMIR_NAXIS1",
"-",
"1",
")",
"*",
"cdelt1_linear",
"bb_ns1_orig",
"=",
"rectwv_coeff",
".",
"contents",
"[",
"islitlet",
"-",
"1",
"]",
"[",
"'bb_ns1_orig'",
"]",
"ttd_order",
"=",
"rectwv_coeff",
".",
"contents",
"[",
"islitlet",
"-",
"1",
"]",
"[",
"'ttd_order'",
"]",
"aij",
"=",
"rectwv_coeff",
".",
"contents",
"[",
"islitlet",
"-",
"1",
"]",
"[",
"'ttd_aij'",
"]",
"bij",
"=",
"rectwv_coeff",
".",
"contents",
"[",
"islitlet",
"-",
"1",
"]",
"[",
"'ttd_bij'",
"]",
"min_row_rectified",
"=",
"float",
"(",
"rectwv_coeff",
".",
"contents",
"[",
"islitlet",
"-",
"1",
"]",
"[",
"'min_row_rectified'",
"]",
")",
"max_row_rectified",
"=",
"float",
"(",
"rectwv_coeff",
".",
"contents",
"[",
"islitlet",
"-",
"1",
"]",
"[",
"'max_row_rectified'",
"]",
")",
"mean_row_rectified",
"=",
"(",
"min_row_rectified",
"+",
"max_row_rectified",
")",
"/",
"2",
"wpoly_coeff",
"=",
"rectwv_coeff",
".",
"contents",
"[",
"islitlet",
"-",
"1",
"]",
"[",
"'wpoly_coeff'",
"]",
"x0",
"=",
"[",
"]",
"y0",
"=",
"[",
"]",
"x1",
"=",
"[",
"]",
"y1",
"=",
"[",
"]",
"x2",
"=",
"[",
"]",
"y2",
"=",
"[",
"]",
"for",
"line",
"in",
"catlines_all_wave",
":",
"if",
"crval1_linear",
"<=",
"line",
"<=",
"crvaln_linear",
":",
"tmp_coeff",
"=",
"np",
".",
"copy",
"(",
"wpoly_coeff",
")",
"tmp_coeff",
"[",
"0",
"]",
"-=",
"line",
"tmp_xroots",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"tmp_coeff",
")",
".",
"roots",
"(",
")",
"for",
"dum",
"in",
"tmp_xroots",
":",
"if",
"np",
".",
"isreal",
"(",
"dum",
")",
":",
"dum",
"=",
"dum",
".",
"real",
"if",
"1",
"<=",
"dum",
"<=",
"EMIR_NAXIS1",
":",
"x0",
".",
"append",
"(",
"dum",
")",
"y0",
".",
"append",
"(",
"mean_row_rectified",
")",
"x1",
".",
"append",
"(",
"dum",
")",
"y1",
".",
"append",
"(",
"min_row_rectified",
")",
"x2",
".",
"append",
"(",
"dum",
")",
"y2",
".",
"append",
"(",
"max_row_rectified",
")",
"xx0",
",",
"yy0",
"=",
"fmap",
"(",
"ttd_order",
",",
"aij",
",",
"bij",
",",
"np",
".",
"array",
"(",
"x0",
")",
",",
"np",
".",
"array",
"(",
"y0",
")",
")",
"xx0",
"-=",
"global_integer_offset_x_pix",
"yy0",
"+=",
"bb_ns1_orig",
"yy0",
"-=",
"global_integer_offset_y_pix",
"xx1",
",",
"yy1",
"=",
"fmap",
"(",
"ttd_order",
",",
"aij",
",",
"bij",
",",
"np",
".",
"array",
"(",
"x1",
")",
",",
"np",
".",
"array",
"(",
"y1",
")",
")",
"xx1",
"-=",
"global_integer_offset_x_pix",
"yy1",
"+=",
"bb_ns1_orig",
"yy1",
"-=",
"global_integer_offset_y_pix",
"xx2",
",",
"yy2",
"=",
"fmap",
"(",
"ttd_order",
",",
"aij",
",",
"bij",
",",
"np",
".",
"array",
"(",
"x2",
")",
",",
"np",
".",
"array",
"(",
"y2",
")",
")",
"xx2",
"-=",
"global_integer_offset_x_pix",
"yy2",
"+=",
"bb_ns1_orig",
"yy2",
"-=",
"global_integer_offset_y_pix",
"if",
"abs",
"(",
"debugplot",
")",
"%",
"10",
"!=",
"0",
":",
"if",
"abs",
"(",
"debugplot",
")",
"==",
"22",
":",
"for",
"xx1_",
",",
"xx2_",
",",
"yy1_",
",",
"yy2_",
"in",
"zip",
"(",
"xx1",
",",
"xx2",
",",
"yy1",
",",
"yy2",
")",
":",
"ax",
".",
"plot",
"(",
"[",
"xx1_",
",",
"xx2_",
"]",
",",
"[",
"yy1_",
",",
"yy2_",
"]",
",",
"'c-'",
",",
"linewidth",
"=",
"2.0",
")",
"else",
":",
"ax",
".",
"plot",
"(",
"xx0",
",",
"yy0",
",",
"'c.'",
")",
"if",
"ds9_file",
"is",
"not",
"None",
":",
"ds9_file",
".",
"write",
"(",
"'#\\n# islitlet...........: {0}\\n'",
".",
"format",
"(",
"islitlet",
")",
")",
"for",
"xx0_",
",",
"yy0_",
"in",
"zip",
"(",
"xx0",
",",
"yy0",
")",
":",
"ds9_file",
".",
"write",
"(",
"'circle {0} {1} 2 # fill=1\\n'",
".",
"format",
"(",
"xx0_",
",",
"yy0_",
")",
")"
] |
Overplot lines (arc/OH).
Parameters
----------
ax : matplotlib axes
Current plot axes.
catlines_all_wave : numpy array
Array with wavelengths of the lines to be overplotted.
list_valid_islitlets : list of integers
List with numbers of valid slitlets.
rectwv_coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration.
global_integer_offset_x_pix : int
Global offset in the X direction to be applied after computing
the expected location.
global_integer_offset_y_pix : int
Global offset in the Y direction to be applied after computing
the expected location.
ds9_file : file handler or None
File handler to ds9 region file where the location of lines
must be saved.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
|
[
"Overplot",
"lines",
"(",
"arc",
"/",
"OH",
")",
"."
] |
train
|
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/overplot_boundary_model.py#L54-L153
|
BeyondTheClouds/enoslib
|
enoslib/task.py
|
enostask
|
def enostask(new=False):
"""Decorator for an Enos Task.
This decorator lets you define a new Enos task and helps you manage the
environment. It injects the environment in the function called.
Args:
new (bool): indicates if a new environment must be created.
Usually this is set on the first task of the workflow.
Examples:
.. code-block:: python
@enostask(new=True)
def foo(env=None, **kwargs):
# The key resultdir is available once the env is created
print(env["resultdir"])
@enostask()
def myfunction(env=None, **kwargs):
# saving a new key
env['foo'] = 'bar'
"""
def decorator(fn):
@wraps(fn)
def decorated(*args, **kwargs):
# Constructs the environment
# --env or env are reserved keyword to reuse existing env
k_env = kwargs.get("--env") or kwargs.get("env")
if new:
kwargs["env"] = _make_env(k_env)
kwargs["env"]["resultdir"] = _set_resultdir(k_env)
# the previous SYMLINK to the created result_dir
else:
kwargs["env"] = _make_env(k_env or SYMLINK_NAME)
try:
# Proceeds with the function execution
logger.info("- Task %s started -" % fn.__name__)
r = fn(*args, **kwargs)
logger.info("- Task %s finished -" % fn.__name__)
return r
# Save the environment
finally:
_save_env(kwargs["env"])
return decorated
return decorator
|
python
|
def enostask(new=False):
"""Decorator for an Enos Task.
This decorator lets you define a new Enos task and helps you manage the
environment. It injects the environment in the function called.
Args:
new (bool): indicates if a new environment must be created.
Usually this is set on the first task of the workflow.
Examples:
.. code-block:: python
@enostask(new=True)
def foo(env=None, **kwargs):
# The key resultdir is available once the env is created
print(env["resultdir"])
@enostask()
def myfunction(env=None, **kwargs):
# saving a new key
env['foo'] = 'bar'
"""
def decorator(fn):
@wraps(fn)
def decorated(*args, **kwargs):
# Constructs the environment
# --env or env are reserved keyword to reuse existing env
k_env = kwargs.get("--env") or kwargs.get("env")
if new:
kwargs["env"] = _make_env(k_env)
kwargs["env"]["resultdir"] = _set_resultdir(k_env)
# the previous SYMLINK to the created result_dir
else:
kwargs["env"] = _make_env(k_env or SYMLINK_NAME)
try:
# Proceeds with the function execution
logger.info("- Task %s started -" % fn.__name__)
r = fn(*args, **kwargs)
logger.info("- Task %s finished -" % fn.__name__)
return r
# Save the environment
finally:
_save_env(kwargs["env"])
return decorated
return decorator
|
[
"def",
"enostask",
"(",
"new",
"=",
"False",
")",
":",
"def",
"decorator",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"decorated",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Constructs the environment",
"# --env or env are reserved keyword to reuse existing env",
"k_env",
"=",
"kwargs",
".",
"get",
"(",
"\"--env\"",
")",
"or",
"kwargs",
".",
"get",
"(",
"\"env\"",
")",
"if",
"new",
":",
"kwargs",
"[",
"\"env\"",
"]",
"=",
"_make_env",
"(",
"k_env",
")",
"kwargs",
"[",
"\"env\"",
"]",
"[",
"\"resultdir\"",
"]",
"=",
"_set_resultdir",
"(",
"k_env",
")",
"# the previous SYMLINK to the created result_dir",
"else",
":",
"kwargs",
"[",
"\"env\"",
"]",
"=",
"_make_env",
"(",
"k_env",
"or",
"SYMLINK_NAME",
")",
"try",
":",
"# Proceeds with the function execution",
"logger",
".",
"info",
"(",
"\"- Task %s started -\"",
"%",
"fn",
".",
"__name__",
")",
"r",
"=",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"logger",
".",
"info",
"(",
"\"- Task %s finished -\"",
"%",
"fn",
".",
"__name__",
")",
"return",
"r",
"# Save the environment",
"finally",
":",
"_save_env",
"(",
"kwargs",
"[",
"\"env\"",
"]",
")",
"return",
"decorated",
"return",
"decorator"
] |
Decorator for an Enos Task.
This decorator lets you define a new Enos task and helps you manage the
environment. It injects the environment in the function called.
Args:
new (bool): indicates if a new environment must be created.
Usually this is set on the first task of the workflow.
Examples:
.. code-block:: python
@enostask(new=True)
def foo(env=None, **kwargs):
# The key resultdir is available once the env is created
print(env["resultdir"])
@enostask()
def myfunction(env=None, **kwargs):
# saving a new key
env['foo'] = 'bar'
|
[
"Decorator",
"for",
"an",
"Enos",
"Task",
"."
] |
train
|
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/task.py#L20-L66
|
BeyondTheClouds/enoslib
|
enoslib/task.py
|
_make_env
|
def _make_env(resultdir=None):
"""Loads the env from `resultdir` if not `None` or makes a new one.
An Enos environment handles all specific variables of an
experiment. This function either generates a new environment or
loads a previous one. If the value of `resultdir` is `None`, then
this function makes a new environment and return it. If the value
is a directory path that contains an Enos environment, then this function
loads and returns it.
In case of a directory path, this function also rereads the
configuration file (the reservation.yaml) and reloads it. This
lets the user update his configuration between each phase.
Args:
resultdir (str): directory path to load the env from.
"""
env = {
"config": {}, # The config
"resultdir": "", # Path to the result directory
"config_file": "", # The initial config file
"nodes": {}, # Roles with nodes
"phase": "", # Last phase that have been run
"user": "", # User id for this job
"cwd": os.getcwd() # Current Working Directory
}
if resultdir:
env_path = os.path.join(resultdir, "env")
if os.path.isfile(env_path):
with open(env_path, "r") as f:
env.update(yaml.load(f))
logger.debug("Loaded environment %s", env_path)
if "config_file" in env and env["config_file"] is not None:
# Resets the configuration of the environment
if os.path.isfile(env["config_file"]):
with open(env["config_file"], "r") as f:
env["config"].update(yaml.load(f))
logger.debug("Reloaded config %s", env["config"])
return env
|
python
|
def _make_env(resultdir=None):
"""Loads the env from `resultdir` if not `None` or makes a new one.
An Enos environment handles all specific variables of an
experiment. This function either generates a new environment or
loads a previous one. If the value of `resultdir` is `None`, then
this function makes a new environment and return it. If the value
is a directory path that contains an Enos environment, then this function
loads and returns it.
In case of a directory path, this function also rereads the
configuration file (the reservation.yaml) and reloads it. This
lets the user update his configuration between each phase.
Args:
resultdir (str): directory path to load the env from.
"""
env = {
"config": {}, # The config
"resultdir": "", # Path to the result directory
"config_file": "", # The initial config file
"nodes": {}, # Roles with nodes
"phase": "", # Last phase that have been run
"user": "", # User id for this job
"cwd": os.getcwd() # Current Working Directory
}
if resultdir:
env_path = os.path.join(resultdir, "env")
if os.path.isfile(env_path):
with open(env_path, "r") as f:
env.update(yaml.load(f))
logger.debug("Loaded environment %s", env_path)
if "config_file" in env and env["config_file"] is not None:
# Resets the configuration of the environment
if os.path.isfile(env["config_file"]):
with open(env["config_file"], "r") as f:
env["config"].update(yaml.load(f))
logger.debug("Reloaded config %s", env["config"])
return env
|
[
"def",
"_make_env",
"(",
"resultdir",
"=",
"None",
")",
":",
"env",
"=",
"{",
"\"config\"",
":",
"{",
"}",
",",
"# The config",
"\"resultdir\"",
":",
"\"\"",
",",
"# Path to the result directory",
"\"config_file\"",
":",
"\"\"",
",",
"# The initial config file",
"\"nodes\"",
":",
"{",
"}",
",",
"# Roles with nodes",
"\"phase\"",
":",
"\"\"",
",",
"# Last phase that have been run",
"\"user\"",
":",
"\"\"",
",",
"# User id for this job",
"\"cwd\"",
":",
"os",
".",
"getcwd",
"(",
")",
"# Current Working Directory",
"}",
"if",
"resultdir",
":",
"env_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"resultdir",
",",
"\"env\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"env_path",
")",
":",
"with",
"open",
"(",
"env_path",
",",
"\"r\"",
")",
"as",
"f",
":",
"env",
".",
"update",
"(",
"yaml",
".",
"load",
"(",
"f",
")",
")",
"logger",
".",
"debug",
"(",
"\"Loaded environment %s\"",
",",
"env_path",
")",
"if",
"\"config_file\"",
"in",
"env",
"and",
"env",
"[",
"\"config_file\"",
"]",
"is",
"not",
"None",
":",
"# Resets the configuration of the environment",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"env",
"[",
"\"config_file\"",
"]",
")",
":",
"with",
"open",
"(",
"env",
"[",
"\"config_file\"",
"]",
",",
"\"r\"",
")",
"as",
"f",
":",
"env",
"[",
"\"config\"",
"]",
".",
"update",
"(",
"yaml",
".",
"load",
"(",
"f",
")",
")",
"logger",
".",
"debug",
"(",
"\"Reloaded config %s\"",
",",
"env",
"[",
"\"config\"",
"]",
")",
"return",
"env"
] |
Loads the env from `resultdir` if not `None` or makes a new one.
An Enos environment handles all specific variables of an
experiment. This function either generates a new environment or
loads a previous one. If the value of `resultdir` is `None`, then
this function makes a new environment and return it. If the value
is a directory path that contains an Enos environment, then this function
loads and returns it.
In case of a directory path, this function also rereads the
configuration file (the reservation.yaml) and reloads it. This
lets the user update his configuration between each phase.
Args:
resultdir (str): directory path to load the env from.
|
[
"Loads",
"the",
"env",
"from",
"resultdir",
"if",
"not",
"None",
"or",
"makes",
"a",
"new",
"one",
"."
] |
train
|
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/task.py#L69-L110
|
BeyondTheClouds/enoslib
|
enoslib/task.py
|
_save_env
|
def _save_env(env):
"""Saves one environment.
Args:
env (dict): the env dict to save.
"""
env_path = os.path.join(env["resultdir"], "env")
if os.path.isdir(env["resultdir"]):
with open(env_path, "w") as f:
yaml.dump(env, f)
|
python
|
def _save_env(env):
"""Saves one environment.
Args:
env (dict): the env dict to save.
"""
env_path = os.path.join(env["resultdir"], "env")
if os.path.isdir(env["resultdir"]):
with open(env_path, "w") as f:
yaml.dump(env, f)
|
[
"def",
"_save_env",
"(",
"env",
")",
":",
"env_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env",
"[",
"\"resultdir\"",
"]",
",",
"\"env\"",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"env",
"[",
"\"resultdir\"",
"]",
")",
":",
"with",
"open",
"(",
"env_path",
",",
"\"w\"",
")",
"as",
"f",
":",
"yaml",
".",
"dump",
"(",
"env",
",",
"f",
")"
] |
Saves one environment.
Args:
env (dict): the env dict to save.
|
[
"Saves",
"one",
"environment",
"."
] |
train
|
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/task.py#L113-L123
|
BeyondTheClouds/enoslib
|
enoslib/task.py
|
_check_env
|
def _check_env(fn):
"""Decorator for an Enos Task.
This decorator checks if an environment file exists.
"""
def decorator(*args, **kwargs):
# If no directory is provided, set the default one
resultdir = kwargs.get("--env", SYMLINK_NAME)
# Check if the env file exists
env_path = os.path.join(resultdir, "env")
if not os.path.isfile(env_path):
raise Exception("The file %s does not exist." % env_path)
# Proceeds with the function execution
return fn(*args, **kwargs)
return decorator
|
python
|
def _check_env(fn):
"""Decorator for an Enos Task.
This decorator checks if an environment file exists.
"""
def decorator(*args, **kwargs):
# If no directory is provided, set the default one
resultdir = kwargs.get("--env", SYMLINK_NAME)
# Check if the env file exists
env_path = os.path.join(resultdir, "env")
if not os.path.isfile(env_path):
raise Exception("The file %s does not exist." % env_path)
# Proceeds with the function execution
return fn(*args, **kwargs)
return decorator
|
[
"def",
"_check_env",
"(",
"fn",
")",
":",
"def",
"decorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# If no directory is provided, set the default one",
"resultdir",
"=",
"kwargs",
".",
"get",
"(",
"\"--env\"",
",",
"SYMLINK_NAME",
")",
"# Check if the env file exists",
"env_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"resultdir",
",",
"\"env\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"env_path",
")",
":",
"raise",
"Exception",
"(",
"\"The file %s does not exist.\"",
"%",
"env_path",
")",
"# Proceeds with the function execution",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"decorator"
] |
Decorator for an Enos Task.
This decorator checks if an environment file exists.
|
[
"Decorator",
"for",
"an",
"Enos",
"Task",
"."
] |
train
|
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/task.py#L126-L141
|
BeyondTheClouds/enoslib
|
enoslib/task.py
|
_set_resultdir
|
def _set_resultdir(name=None):
"""Set or get the directory to store experiment results.
Looks at the `name` and create the directory if it doesn"t exist
or returns it in other cases. If the name is `None`, then the
function generates an unique name for the results directory.
Finally, it links the directory to `SYMLINK_NAME`.
Args:
name (str): file path to an existing directory. It could be
weather an absolute or a relative to the current working
directory.
Returns:
the file path of the results directory.
"""
# Compute file path of results directory
resultdir_name = name or "enos_" + datetime.today().isoformat()
resultdir_path = os.path.abspath(resultdir_name)
# Raise error if a related file exists
if os.path.isfile(resultdir_path):
raise EnosFilePathError(resultdir_path,
"Result directory cannot be created due "
"to existing file %s" % resultdir_path)
# Create the result directory if it does not exist
if not os.path.isdir(resultdir_path):
os.mkdir(resultdir_path)
logger.info("Generate results directory %s" % resultdir_path)
# Symlink the result directory with the "cwd/current" directory
link_path = SYMLINK_NAME
if os.path.lexists(link_path):
os.remove(link_path)
try:
os.symlink(resultdir_path, link_path)
logger.info("Symlink %s to %s" % (resultdir_path, link_path))
except OSError:
# An harmless error can occur due to a race condition when
# multiple regions are simultaneously deployed
logger.warning("Symlink %s to %s failed" %
(resultdir_path, link_path))
return resultdir_path
|
python
|
def _set_resultdir(name=None):
"""Set or get the directory to store experiment results.
Looks at the `name` and create the directory if it doesn"t exist
or returns it in other cases. If the name is `None`, then the
function generates an unique name for the results directory.
Finally, it links the directory to `SYMLINK_NAME`.
Args:
name (str): file path to an existing directory. It could be
weather an absolute or a relative to the current working
directory.
Returns:
the file path of the results directory.
"""
# Compute file path of results directory
resultdir_name = name or "enos_" + datetime.today().isoformat()
resultdir_path = os.path.abspath(resultdir_name)
# Raise error if a related file exists
if os.path.isfile(resultdir_path):
raise EnosFilePathError(resultdir_path,
"Result directory cannot be created due "
"to existing file %s" % resultdir_path)
# Create the result directory if it does not exist
if not os.path.isdir(resultdir_path):
os.mkdir(resultdir_path)
logger.info("Generate results directory %s" % resultdir_path)
# Symlink the result directory with the "cwd/current" directory
link_path = SYMLINK_NAME
if os.path.lexists(link_path):
os.remove(link_path)
try:
os.symlink(resultdir_path, link_path)
logger.info("Symlink %s to %s" % (resultdir_path, link_path))
except OSError:
# An harmless error can occur due to a race condition when
# multiple regions are simultaneously deployed
logger.warning("Symlink %s to %s failed" %
(resultdir_path, link_path))
return resultdir_path
|
[
"def",
"_set_resultdir",
"(",
"name",
"=",
"None",
")",
":",
"# Compute file path of results directory",
"resultdir_name",
"=",
"name",
"or",
"\"enos_\"",
"+",
"datetime",
".",
"today",
"(",
")",
".",
"isoformat",
"(",
")",
"resultdir_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"resultdir_name",
")",
"# Raise error if a related file exists",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"resultdir_path",
")",
":",
"raise",
"EnosFilePathError",
"(",
"resultdir_path",
",",
"\"Result directory cannot be created due \"",
"\"to existing file %s\"",
"%",
"resultdir_path",
")",
"# Create the result directory if it does not exist",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"resultdir_path",
")",
":",
"os",
".",
"mkdir",
"(",
"resultdir_path",
")",
"logger",
".",
"info",
"(",
"\"Generate results directory %s\"",
"%",
"resultdir_path",
")",
"# Symlink the result directory with the \"cwd/current\" directory",
"link_path",
"=",
"SYMLINK_NAME",
"if",
"os",
".",
"path",
".",
"lexists",
"(",
"link_path",
")",
":",
"os",
".",
"remove",
"(",
"link_path",
")",
"try",
":",
"os",
".",
"symlink",
"(",
"resultdir_path",
",",
"link_path",
")",
"logger",
".",
"info",
"(",
"\"Symlink %s to %s\"",
"%",
"(",
"resultdir_path",
",",
"link_path",
")",
")",
"except",
"OSError",
":",
"# An harmless error can occur due to a race condition when",
"# multiple regions are simultaneously deployed",
"logger",
".",
"warning",
"(",
"\"Symlink %s to %s failed\"",
"%",
"(",
"resultdir_path",
",",
"link_path",
")",
")",
"return",
"resultdir_path"
] |
Set or get the directory to store experiment results.
Looks at the `name` and create the directory if it doesn"t exist
or returns it in other cases. If the name is `None`, then the
function generates an unique name for the results directory.
Finally, it links the directory to `SYMLINK_NAME`.
Args:
name (str): file path to an existing directory. It could be
weather an absolute or a relative to the current working
directory.
Returns:
the file path of the results directory.
|
[
"Set",
"or",
"get",
"the",
"directory",
"to",
"store",
"experiment",
"results",
"."
] |
train
|
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/task.py#L144-L189
|
BreakingBytes/simkit
|
simkit/core/data_sources.py
|
DataRegistry.register
|
def register(self, newdata, *args, **kwargs):
"""
Register data in registry. Meta for each data is specified by positional
or keyword arguments after the new data and consists of the following:
* ``uncertainty`` - Map of uncertainties in percent corresponding to new
keys. The uncertainty keys must be a subset of the new data keys.
* ``variance`` - Square of the uncertainty (no units).
* ``isconstant``: Map corresponding to new keys whose values are``True``
if constant or ``False`` if periodic. These keys must be a subset of
the new data keys.
* ``timeseries``: Name of corresponding time series data, ``None`` if no
time series. _EG_: DNI data ``timeseries`` attribute might be set to a
date/time data that it corresponds to. More than one data can have the
same ``timeseries`` data.
* ``data_source``: the
:class:`~simkit.core.data_sources.DataSource` superclass that
was used to acquire this data. This can be used to group data from a
specific source together.
:param newdata: New data to add to registry. When registering new data,
keys are not allowed to override existing keys in the data
registry.
:type newdata: mapping
:raises:
:exc:`~simkit.core.exceptions.UncertaintyPercentUnitsError`
"""
kwargs.update(zip(self.meta_names, args))
# check uncertainty has units of percent
uncertainty = kwargs['uncertainty']
variance = kwargs['variance']
isconstant = kwargs['isconstant']
# check uncertainty is percent
if uncertainty:
for k0, d in uncertainty.iteritems():
for k1, v01 in d.iteritems():
units = v01.units
if units != UREG('percent'):
keys = '%s-%s' % (k0, k1)
raise UncertaintyPercentUnitsError(keys, units)
# check variance is square of uncertainty
if variance and uncertainty:
for k0, d in variance.iteritems():
for k1, v01 in d.iteritems():
keys = '%s-%s' % (k0, k1)
missing = k1 not in uncertainty[k0]
v2 = np.asarray(uncertainty[k0][k1].to('fraction').m) ** 2.0
if missing or not np.allclose(np.asarray(v01), v2):
raise UncertaintyVarianceError(keys, v01)
# check that isconstant is boolean
if isconstant:
for k, v in isconstant.iteritems():
if not isinstance(v, bool):
classname = self.__class__.__name__
error_msg = ['%s meta "isconstant" should be' % classname,
'boolean, but it was "%s" for "%s".' % (v, k)]
raise TypeError(' '.join(error_msg))
# call super method, meta must be passed as kwargs!
super(DataRegistry, self).register(newdata, **kwargs)
|
python
|
def register(self, newdata, *args, **kwargs):
"""
Register data in registry. Meta for each data is specified by positional
or keyword arguments after the new data and consists of the following:
* ``uncertainty`` - Map of uncertainties in percent corresponding to new
keys. The uncertainty keys must be a subset of the new data keys.
* ``variance`` - Square of the uncertainty (no units).
* ``isconstant``: Map corresponding to new keys whose values are``True``
if constant or ``False`` if periodic. These keys must be a subset of
the new data keys.
* ``timeseries``: Name of corresponding time series data, ``None`` if no
time series. _EG_: DNI data ``timeseries`` attribute might be set to a
date/time data that it corresponds to. More than one data can have the
same ``timeseries`` data.
* ``data_source``: the
:class:`~simkit.core.data_sources.DataSource` superclass that
was used to acquire this data. This can be used to group data from a
specific source together.
:param newdata: New data to add to registry. When registering new data,
keys are not allowed to override existing keys in the data
registry.
:type newdata: mapping
:raises:
:exc:`~simkit.core.exceptions.UncertaintyPercentUnitsError`
"""
kwargs.update(zip(self.meta_names, args))
# check uncertainty has units of percent
uncertainty = kwargs['uncertainty']
variance = kwargs['variance']
isconstant = kwargs['isconstant']
# check uncertainty is percent
if uncertainty:
for k0, d in uncertainty.iteritems():
for k1, v01 in d.iteritems():
units = v01.units
if units != UREG('percent'):
keys = '%s-%s' % (k0, k1)
raise UncertaintyPercentUnitsError(keys, units)
# check variance is square of uncertainty
if variance and uncertainty:
for k0, d in variance.iteritems():
for k1, v01 in d.iteritems():
keys = '%s-%s' % (k0, k1)
missing = k1 not in uncertainty[k0]
v2 = np.asarray(uncertainty[k0][k1].to('fraction').m) ** 2.0
if missing or not np.allclose(np.asarray(v01), v2):
raise UncertaintyVarianceError(keys, v01)
# check that isconstant is boolean
if isconstant:
for k, v in isconstant.iteritems():
if not isinstance(v, bool):
classname = self.__class__.__name__
error_msg = ['%s meta "isconstant" should be' % classname,
'boolean, but it was "%s" for "%s".' % (v, k)]
raise TypeError(' '.join(error_msg))
# call super method, meta must be passed as kwargs!
super(DataRegistry, self).register(newdata, **kwargs)
|
[
"def",
"register",
"(",
"self",
",",
"newdata",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"update",
"(",
"zip",
"(",
"self",
".",
"meta_names",
",",
"args",
")",
")",
"# check uncertainty has units of percent",
"uncertainty",
"=",
"kwargs",
"[",
"'uncertainty'",
"]",
"variance",
"=",
"kwargs",
"[",
"'variance'",
"]",
"isconstant",
"=",
"kwargs",
"[",
"'isconstant'",
"]",
"# check uncertainty is percent",
"if",
"uncertainty",
":",
"for",
"k0",
",",
"d",
"in",
"uncertainty",
".",
"iteritems",
"(",
")",
":",
"for",
"k1",
",",
"v01",
"in",
"d",
".",
"iteritems",
"(",
")",
":",
"units",
"=",
"v01",
".",
"units",
"if",
"units",
"!=",
"UREG",
"(",
"'percent'",
")",
":",
"keys",
"=",
"'%s-%s'",
"%",
"(",
"k0",
",",
"k1",
")",
"raise",
"UncertaintyPercentUnitsError",
"(",
"keys",
",",
"units",
")",
"# check variance is square of uncertainty",
"if",
"variance",
"and",
"uncertainty",
":",
"for",
"k0",
",",
"d",
"in",
"variance",
".",
"iteritems",
"(",
")",
":",
"for",
"k1",
",",
"v01",
"in",
"d",
".",
"iteritems",
"(",
")",
":",
"keys",
"=",
"'%s-%s'",
"%",
"(",
"k0",
",",
"k1",
")",
"missing",
"=",
"k1",
"not",
"in",
"uncertainty",
"[",
"k0",
"]",
"v2",
"=",
"np",
".",
"asarray",
"(",
"uncertainty",
"[",
"k0",
"]",
"[",
"k1",
"]",
".",
"to",
"(",
"'fraction'",
")",
".",
"m",
")",
"**",
"2.0",
"if",
"missing",
"or",
"not",
"np",
".",
"allclose",
"(",
"np",
".",
"asarray",
"(",
"v01",
")",
",",
"v2",
")",
":",
"raise",
"UncertaintyVarianceError",
"(",
"keys",
",",
"v01",
")",
"# check that isconstant is boolean",
"if",
"isconstant",
":",
"for",
"k",
",",
"v",
"in",
"isconstant",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"v",
",",
"bool",
")",
":",
"classname",
"=",
"self",
".",
"__class__",
".",
"__name__",
"error_msg",
"=",
"[",
"'%s meta \"isconstant\" should be'",
"%",
"classname",
",",
"'boolean, but it was \"%s\" for \"%s\".'",
"%",
"(",
"v",
",",
"k",
")",
"]",
"raise",
"TypeError",
"(",
"' '",
".",
"join",
"(",
"error_msg",
")",
")",
"# call super method, meta must be passed as kwargs!",
"super",
"(",
"DataRegistry",
",",
"self",
")",
".",
"register",
"(",
"newdata",
",",
"*",
"*",
"kwargs",
")"
] |
Register data in registry. Meta for each data is specified by positional
or keyword arguments after the new data and consists of the following:
* ``uncertainty`` - Map of uncertainties in percent corresponding to new
keys. The uncertainty keys must be a subset of the new data keys.
* ``variance`` - Square of the uncertainty (no units).
* ``isconstant``: Map corresponding to new keys whose values are``True``
if constant or ``False`` if periodic. These keys must be a subset of
the new data keys.
* ``timeseries``: Name of corresponding time series data, ``None`` if no
time series. _EG_: DNI data ``timeseries`` attribute might be set to a
date/time data that it corresponds to. More than one data can have the
same ``timeseries`` data.
* ``data_source``: the
:class:`~simkit.core.data_sources.DataSource` superclass that
was used to acquire this data. This can be used to group data from a
specific source together.
:param newdata: New data to add to registry. When registering new data,
keys are not allowed to override existing keys in the data
registry.
:type newdata: mapping
:raises:
:exc:`~simkit.core.exceptions.UncertaintyPercentUnitsError`
|
[
"Register",
"data",
"in",
"registry",
".",
"Meta",
"for",
"each",
"data",
"is",
"specified",
"by",
"positional",
"or",
"keyword",
"arguments",
"after",
"the",
"new",
"data",
"and",
"consists",
"of",
"the",
"following",
":"
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_sources.py#L49-L107
|
BreakingBytes/simkit
|
simkit/core/data_sources.py
|
DataSource._is_cached
|
def _is_cached(self, ext='.json'):
"""
Determine if ``filename`` is cached using extension ``ex`` a string.
:param ext: extension used to cache ``filename``, default is '.json'
:type ext: str
:return: True if ``filename`` is cached using extensions ``ex``
:rtype: bool
"""
# extension must start with a dot
if not ext.startswith('.'):
# prepend extension with a dot
ext = '.%s' % ext
# cache file is filename with extension
cache_file = '%s%s' % (self.filename, ext)
# if filename already ends with extension or there's a file with the
# extension, then assume the data is cached
return self.filename.endswith(ext) or os.path.exists(cache_file)
|
python
|
def _is_cached(self, ext='.json'):
"""
Determine if ``filename`` is cached using extension ``ex`` a string.
:param ext: extension used to cache ``filename``, default is '.json'
:type ext: str
:return: True if ``filename`` is cached using extensions ``ex``
:rtype: bool
"""
# extension must start with a dot
if not ext.startswith('.'):
# prepend extension with a dot
ext = '.%s' % ext
# cache file is filename with extension
cache_file = '%s%s' % (self.filename, ext)
# if filename already ends with extension or there's a file with the
# extension, then assume the data is cached
return self.filename.endswith(ext) or os.path.exists(cache_file)
|
[
"def",
"_is_cached",
"(",
"self",
",",
"ext",
"=",
"'.json'",
")",
":",
"# extension must start with a dot",
"if",
"not",
"ext",
".",
"startswith",
"(",
"'.'",
")",
":",
"# prepend extension with a dot",
"ext",
"=",
"'.%s'",
"%",
"ext",
"# cache file is filename with extension",
"cache_file",
"=",
"'%s%s'",
"%",
"(",
"self",
".",
"filename",
",",
"ext",
")",
"# if filename already ends with extension or there's a file with the",
"# extension, then assume the data is cached",
"return",
"self",
".",
"filename",
".",
"endswith",
"(",
"ext",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"cache_file",
")"
] |
Determine if ``filename`` is cached using extension ``ex`` a string.
:param ext: extension used to cache ``filename``, default is '.json'
:type ext: str
:return: True if ``filename`` is cached using extensions ``ex``
:rtype: bool
|
[
"Determine",
"if",
"filename",
"is",
"cached",
"using",
"extension",
"ex",
"a",
"string",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_sources.py#L237-L254
|
BreakingBytes/simkit
|
simkit/core/data_sources.py
|
DataSource.saveas_json
|
def saveas_json(self, save_name):
"""
Save :attr:`data`, :attr:`param_file`, original :attr:`data_reader`
and UTC modification time as keys in JSON file. If data is edited then
it should be saved using this method. Non-JSON data files are also
saved using this method.
:param save_name: Name to save JSON file as, ".json" is appended.
:type save_name: str
"""
# make pycharm by defining inferred objects
meta = getattr(self, DataSourceBase._meta_attr)
param_file = getattr(self, DataSourceBase._param_file)
# JSONEncoder removes units and converts arrays to lists
# save last time file was modified
utc_mod_time = list(time.gmtime(os.path.getmtime(save_name)))
json_data = {'data': self.data, 'utc_mod_time': utc_mod_time,
'param_file': param_file,
'data_reader': meta.data_reader.__name__,
'data_source': self.__class__.__name__}
if not save_name.endswith('.json'):
save_name += '.json'
with open(save_name, 'w') as fp:
json.dump(json_data, fp, cls=SimKitJSONEncoder)
# TODO: test file save successful
# TODO: need to update model
self._is_saved = True
|
python
|
def saveas_json(self, save_name):
"""
Save :attr:`data`, :attr:`param_file`, original :attr:`data_reader`
and UTC modification time as keys in JSON file. If data is edited then
it should be saved using this method. Non-JSON data files are also
saved using this method.
:param save_name: Name to save JSON file as, ".json" is appended.
:type save_name: str
"""
# make pycharm by defining inferred objects
meta = getattr(self, DataSourceBase._meta_attr)
param_file = getattr(self, DataSourceBase._param_file)
# JSONEncoder removes units and converts arrays to lists
# save last time file was modified
utc_mod_time = list(time.gmtime(os.path.getmtime(save_name)))
json_data = {'data': self.data, 'utc_mod_time': utc_mod_time,
'param_file': param_file,
'data_reader': meta.data_reader.__name__,
'data_source': self.__class__.__name__}
if not save_name.endswith('.json'):
save_name += '.json'
with open(save_name, 'w') as fp:
json.dump(json_data, fp, cls=SimKitJSONEncoder)
# TODO: test file save successful
# TODO: need to update model
self._is_saved = True
|
[
"def",
"saveas_json",
"(",
"self",
",",
"save_name",
")",
":",
"# make pycharm by defining inferred objects",
"meta",
"=",
"getattr",
"(",
"self",
",",
"DataSourceBase",
".",
"_meta_attr",
")",
"param_file",
"=",
"getattr",
"(",
"self",
",",
"DataSourceBase",
".",
"_param_file",
")",
"# JSONEncoder removes units and converts arrays to lists",
"# save last time file was modified",
"utc_mod_time",
"=",
"list",
"(",
"time",
".",
"gmtime",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"save_name",
")",
")",
")",
"json_data",
"=",
"{",
"'data'",
":",
"self",
".",
"data",
",",
"'utc_mod_time'",
":",
"utc_mod_time",
",",
"'param_file'",
":",
"param_file",
",",
"'data_reader'",
":",
"meta",
".",
"data_reader",
".",
"__name__",
",",
"'data_source'",
":",
"self",
".",
"__class__",
".",
"__name__",
"}",
"if",
"not",
"save_name",
".",
"endswith",
"(",
"'.json'",
")",
":",
"save_name",
"+=",
"'.json'",
"with",
"open",
"(",
"save_name",
",",
"'w'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"json_data",
",",
"fp",
",",
"cls",
"=",
"SimKitJSONEncoder",
")",
"# TODO: test file save successful",
"# TODO: need to update model",
"self",
".",
"_is_saved",
"=",
"True"
] |
Save :attr:`data`, :attr:`param_file`, original :attr:`data_reader`
and UTC modification time as keys in JSON file. If data is edited then
it should be saved using this method. Non-JSON data files are also
saved using this method.
:param save_name: Name to save JSON file as, ".json" is appended.
:type save_name: str
|
[
"Save",
":",
"attr",
":",
"data",
":",
"attr",
":",
"param_file",
"original",
":",
"attr",
":",
"data_reader",
"and",
"UTC",
"modification",
"time",
"as",
"keys",
"in",
"JSON",
"file",
".",
"If",
"data",
"is",
"edited",
"then",
"it",
"should",
"be",
"saved",
"using",
"this",
"method",
".",
"Non",
"-",
"JSON",
"data",
"files",
"are",
"also",
"saved",
"using",
"this",
"method",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_sources.py#L260-L286
|
BreakingBytes/simkit
|
simkit/core/data_sources.py
|
DataSource.edit
|
def edit(self, edits, data_reg):
"""
Edit data in :class:`Data_Source`. Sets :attr:`issaved` to ``False``.
"""
data_reg.update(edits)
self._is_saved = False
|
python
|
def edit(self, edits, data_reg):
"""
Edit data in :class:`Data_Source`. Sets :attr:`issaved` to ``False``.
"""
data_reg.update(edits)
self._is_saved = False
|
[
"def",
"edit",
"(",
"self",
",",
"edits",
",",
"data_reg",
")",
":",
"data_reg",
".",
"update",
"(",
"edits",
")",
"self",
".",
"_is_saved",
"=",
"False"
] |
Edit data in :class:`Data_Source`. Sets :attr:`issaved` to ``False``.
|
[
"Edit",
"data",
"in",
":",
"class",
":",
"Data_Source",
".",
"Sets",
":",
"attr",
":",
"issaved",
"to",
"False",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_sources.py#L288-L293
|
guaix-ucm/pyemir
|
emirdrp/tools/list_slitlets_from_string.py
|
list_slitlets_from_string
|
def list_slitlets_from_string(s, islitlet_min, islitlet_max):
"""Return list of slitlets from string specification.
Parameters
----------
s : string
String defining the slitlets. The slitlets must be specify
as a set of n1[,n2[,step]] tuples. If only n1 is provided,
the slitlet number n1 is considered. When n1 and n2 are given
but step is missing, step=1 is assumed. Finally, when all
n1, n2 and step are given, slitlets considered are those
returned by range(n1, n2 + 1, step) function.
islitlet_min : int
Minimum slitlet number allowed.
islitlet_max : int
Maximum slitlet number allowed.
Returns
-------
list_slitlets : Python list
List of slitlets.
"""
# protection
if not isinstance(s, str):
print('type(s): ', type(s))
print('ERROR: function expected a string parameter')
# initialize empty output
set_slitlets = set()
# remove leading blank spaces
s = re.sub('^ *', '', s)
# remove trailing blank spaces
s = re.sub(' *$', '', s)
# remove blank space before ','
s = re.sub(' *,', ',', s)
# remove blank spaces after ','
s = re.sub(', *', ',', s)
# remove many blank spaces by a single blank space
s = re.sub(' +', ' ', s)
stuples = s.split()
for item in stuples:
subitems = item.split(',')
nsubitems = len(subitems)
if nsubitems == 1:
n1 = int(subitems[0])
n2 = n1
step = 1
elif nsubitems == 2:
n1 = int(subitems[0])
n2 = int(subitems[1])
step = 1
elif nsubitems == 3:
n1 = int(subitems[0])
n2 = int(subitems[1])
step = int(subitems[2])
else:
raise ValueError('Unexpected slitlet range:', s)
for i in range(n1, n2 + 1, step):
if islitlet_min <= i <= islitlet_max:
set_slitlets.add(i)
else:
print('islitlet_min: ', islitlet_min)
print('islitlet_max: ', islitlet_max)
print('i...........: ', i)
raise ValueError("Slitlet number out of range!")
list_slitlets = list(set_slitlets)
list_slitlets.sort()
# return result
return list_slitlets
|
python
|
def list_slitlets_from_string(s, islitlet_min, islitlet_max):
"""Return list of slitlets from string specification.
Parameters
----------
s : string
String defining the slitlets. The slitlets must be specify
as a set of n1[,n2[,step]] tuples. If only n1 is provided,
the slitlet number n1 is considered. When n1 and n2 are given
but step is missing, step=1 is assumed. Finally, when all
n1, n2 and step are given, slitlets considered are those
returned by range(n1, n2 + 1, step) function.
islitlet_min : int
Minimum slitlet number allowed.
islitlet_max : int
Maximum slitlet number allowed.
Returns
-------
list_slitlets : Python list
List of slitlets.
"""
# protection
if not isinstance(s, str):
print('type(s): ', type(s))
print('ERROR: function expected a string parameter')
# initialize empty output
set_slitlets = set()
# remove leading blank spaces
s = re.sub('^ *', '', s)
# remove trailing blank spaces
s = re.sub(' *$', '', s)
# remove blank space before ','
s = re.sub(' *,', ',', s)
# remove blank spaces after ','
s = re.sub(', *', ',', s)
# remove many blank spaces by a single blank space
s = re.sub(' +', ' ', s)
stuples = s.split()
for item in stuples:
subitems = item.split(',')
nsubitems = len(subitems)
if nsubitems == 1:
n1 = int(subitems[0])
n2 = n1
step = 1
elif nsubitems == 2:
n1 = int(subitems[0])
n2 = int(subitems[1])
step = 1
elif nsubitems == 3:
n1 = int(subitems[0])
n2 = int(subitems[1])
step = int(subitems[2])
else:
raise ValueError('Unexpected slitlet range:', s)
for i in range(n1, n2 + 1, step):
if islitlet_min <= i <= islitlet_max:
set_slitlets.add(i)
else:
print('islitlet_min: ', islitlet_min)
print('islitlet_max: ', islitlet_max)
print('i...........: ', i)
raise ValueError("Slitlet number out of range!")
list_slitlets = list(set_slitlets)
list_slitlets.sort()
# return result
return list_slitlets
|
[
"def",
"list_slitlets_from_string",
"(",
"s",
",",
"islitlet_min",
",",
"islitlet_max",
")",
":",
"# protection",
"if",
"not",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"print",
"(",
"'type(s): '",
",",
"type",
"(",
"s",
")",
")",
"print",
"(",
"'ERROR: function expected a string parameter'",
")",
"# initialize empty output",
"set_slitlets",
"=",
"set",
"(",
")",
"# remove leading blank spaces",
"s",
"=",
"re",
".",
"sub",
"(",
"'^ *'",
",",
"''",
",",
"s",
")",
"# remove trailing blank spaces",
"s",
"=",
"re",
".",
"sub",
"(",
"' *$'",
",",
"''",
",",
"s",
")",
"# remove blank space before ','",
"s",
"=",
"re",
".",
"sub",
"(",
"' *,'",
",",
"','",
",",
"s",
")",
"# remove blank spaces after ','",
"s",
"=",
"re",
".",
"sub",
"(",
"', *'",
",",
"','",
",",
"s",
")",
"# remove many blank spaces by a single blank space",
"s",
"=",
"re",
".",
"sub",
"(",
"' +'",
",",
"' '",
",",
"s",
")",
"stuples",
"=",
"s",
".",
"split",
"(",
")",
"for",
"item",
"in",
"stuples",
":",
"subitems",
"=",
"item",
".",
"split",
"(",
"','",
")",
"nsubitems",
"=",
"len",
"(",
"subitems",
")",
"if",
"nsubitems",
"==",
"1",
":",
"n1",
"=",
"int",
"(",
"subitems",
"[",
"0",
"]",
")",
"n2",
"=",
"n1",
"step",
"=",
"1",
"elif",
"nsubitems",
"==",
"2",
":",
"n1",
"=",
"int",
"(",
"subitems",
"[",
"0",
"]",
")",
"n2",
"=",
"int",
"(",
"subitems",
"[",
"1",
"]",
")",
"step",
"=",
"1",
"elif",
"nsubitems",
"==",
"3",
":",
"n1",
"=",
"int",
"(",
"subitems",
"[",
"0",
"]",
")",
"n2",
"=",
"int",
"(",
"subitems",
"[",
"1",
"]",
")",
"step",
"=",
"int",
"(",
"subitems",
"[",
"2",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unexpected slitlet range:'",
",",
"s",
")",
"for",
"i",
"in",
"range",
"(",
"n1",
",",
"n2",
"+",
"1",
",",
"step",
")",
":",
"if",
"islitlet_min",
"<=",
"i",
"<=",
"islitlet_max",
":",
"set_slitlets",
".",
"add",
"(",
"i",
")",
"else",
":",
"print",
"(",
"'islitlet_min: '",
",",
"islitlet_min",
")",
"print",
"(",
"'islitlet_max: '",
",",
"islitlet_max",
")",
"print",
"(",
"'i...........: '",
",",
"i",
")",
"raise",
"ValueError",
"(",
"\"Slitlet number out of range!\"",
")",
"list_slitlets",
"=",
"list",
"(",
"set_slitlets",
")",
"list_slitlets",
".",
"sort",
"(",
")",
"# return result",
"return",
"list_slitlets"
] |
Return list of slitlets from string specification.
Parameters
----------
s : string
String defining the slitlets. The slitlets must be specify
as a set of n1[,n2[,step]] tuples. If only n1 is provided,
the slitlet number n1 is considered. When n1 and n2 are given
but step is missing, step=1 is assumed. Finally, when all
n1, n2 and step are given, slitlets considered are those
returned by range(n1, n2 + 1, step) function.
islitlet_min : int
Minimum slitlet number allowed.
islitlet_max : int
Maximum slitlet number allowed.
Returns
-------
list_slitlets : Python list
List of slitlets.
|
[
"Return",
"list",
"of",
"slitlets",
"from",
"string",
"specification",
"."
] |
train
|
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/tools/list_slitlets_from_string.py#L26-L99
|
Jaymon/prom
|
prom/query.py
|
Query.ref
|
def ref(self, orm_classpath, cls_pk=None):
"""
takes a classpath to allow query-ing from another Orm class
the reason why it takes string paths is to avoid infinite recursion import
problems because an orm class from module A might have a ref from module B
and sometimes it is handy to have module B be able to get the objects from
module A that correspond to the object in module B, but you can't import
module A into module B because module B already imports module A.
orm_classpath -- string -- a full python class path (eg, foo.bar.Che)
cls_pk -- mixed -- automatically set the where field of orm_classpath
that references self.orm_class to the value in cls_pk if present
return -- Query()
"""
# split orm from module path
orm_module, orm_class = get_objects(orm_classpath)
# if orm_classpath.startswith("."):
# # we handle relative classpaths by using the orm_class and its parents
# # to find the relative import
# if self.orm_class:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# self.orm_class.__module__
# )
# except ImportError:
# parents = inspect.getmro(self.orm_class)
# if parents:
# for pc in parents[1:-1]:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# pc.__module__
# )
# except ImportError:
# pass
#
# if not orm_module or not orm_class:
# raise ImportError(
# "Unable to resolve relative ref using {}".format(
# self.orm_class.__module__
# )
# )
#
# else:
# raise ImportError("trying relative ref without orm_class")
#
# else:
# orm_module, orm_class = get_objects(orm_classpath)
# if isinstance(orm_classpath, basestring):
# orm_module, orm_class = get_objects(orm_classpath)
# else:
# orm_module, orm_class = get_objects(orm_classpath[0], orm_classpath[1])
q = orm_class.query
if cls_pk:
found = False
for fn, f in orm_class.schema.fields.items():
cls_ref_s = f.schema
if cls_ref_s and self.schema == cls_ref_s:
q.is_field(fn, cls_pk)
found = True
break
if not found:
raise ValueError("Did not find a foreign key field for [{}] in [{}]".format(
self.orm_class.table_name,
orm_class.table_name,
))
return q
|
python
|
def ref(self, orm_classpath, cls_pk=None):
"""
takes a classpath to allow query-ing from another Orm class
the reason why it takes string paths is to avoid infinite recursion import
problems because an orm class from module A might have a ref from module B
and sometimes it is handy to have module B be able to get the objects from
module A that correspond to the object in module B, but you can't import
module A into module B because module B already imports module A.
orm_classpath -- string -- a full python class path (eg, foo.bar.Che)
cls_pk -- mixed -- automatically set the where field of orm_classpath
that references self.orm_class to the value in cls_pk if present
return -- Query()
"""
# split orm from module path
orm_module, orm_class = get_objects(orm_classpath)
# if orm_classpath.startswith("."):
# # we handle relative classpaths by using the orm_class and its parents
# # to find the relative import
# if self.orm_class:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# self.orm_class.__module__
# )
# except ImportError:
# parents = inspect.getmro(self.orm_class)
# if parents:
# for pc in parents[1:-1]:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# pc.__module__
# )
# except ImportError:
# pass
#
# if not orm_module or not orm_class:
# raise ImportError(
# "Unable to resolve relative ref using {}".format(
# self.orm_class.__module__
# )
# )
#
# else:
# raise ImportError("trying relative ref without orm_class")
#
# else:
# orm_module, orm_class = get_objects(orm_classpath)
# if isinstance(orm_classpath, basestring):
# orm_module, orm_class = get_objects(orm_classpath)
# else:
# orm_module, orm_class = get_objects(orm_classpath[0], orm_classpath[1])
q = orm_class.query
if cls_pk:
found = False
for fn, f in orm_class.schema.fields.items():
cls_ref_s = f.schema
if cls_ref_s and self.schema == cls_ref_s:
q.is_field(fn, cls_pk)
found = True
break
if not found:
raise ValueError("Did not find a foreign key field for [{}] in [{}]".format(
self.orm_class.table_name,
orm_class.table_name,
))
return q
|
[
"def",
"ref",
"(",
"self",
",",
"orm_classpath",
",",
"cls_pk",
"=",
"None",
")",
":",
"# split orm from module path",
"orm_module",
",",
"orm_class",
"=",
"get_objects",
"(",
"orm_classpath",
")",
"# if orm_classpath.startswith(\".\"):",
"# # we handle relative classpaths by using the orm_class and its parents",
"# # to find the relative import",
"# if self.orm_class:",
"# try:",
"# orm_module, orm_class = get_objects(",
"# orm_classpath,",
"# self.orm_class.__module__",
"# )",
"# except ImportError:",
"# parents = inspect.getmro(self.orm_class)",
"# if parents:",
"# for pc in parents[1:-1]:",
"# try:",
"# orm_module, orm_class = get_objects(",
"# orm_classpath,",
"# pc.__module__",
"# )",
"# except ImportError:",
"# pass",
"# ",
"# if not orm_module or not orm_class:",
"# raise ImportError(",
"# \"Unable to resolve relative ref using {}\".format(",
"# self.orm_class.__module__",
"# )",
"# )",
"# ",
"# else:",
"# raise ImportError(\"trying relative ref without orm_class\")",
"# ",
"# else:",
"# orm_module, orm_class = get_objects(orm_classpath)",
"# if isinstance(orm_classpath, basestring):",
"# orm_module, orm_class = get_objects(orm_classpath)",
"# else:",
"# orm_module, orm_class = get_objects(orm_classpath[0], orm_classpath[1])",
"q",
"=",
"orm_class",
".",
"query",
"if",
"cls_pk",
":",
"found",
"=",
"False",
"for",
"fn",
",",
"f",
"in",
"orm_class",
".",
"schema",
".",
"fields",
".",
"items",
"(",
")",
":",
"cls_ref_s",
"=",
"f",
".",
"schema",
"if",
"cls_ref_s",
"and",
"self",
".",
"schema",
"==",
"cls_ref_s",
":",
"q",
".",
"is_field",
"(",
"fn",
",",
"cls_pk",
")",
"found",
"=",
"True",
"break",
"if",
"not",
"found",
":",
"raise",
"ValueError",
"(",
"\"Did not find a foreign key field for [{}] in [{}]\"",
".",
"format",
"(",
"self",
".",
"orm_class",
".",
"table_name",
",",
"orm_class",
".",
"table_name",
",",
")",
")",
"return",
"q"
] |
takes a classpath to allow query-ing from another Orm class
the reason why it takes string paths is to avoid infinite recursion import
problems because an orm class from module A might have a ref from module B
and sometimes it is handy to have module B be able to get the objects from
module A that correspond to the object in module B, but you can't import
module A into module B because module B already imports module A.
orm_classpath -- string -- a full python class path (eg, foo.bar.Che)
cls_pk -- mixed -- automatically set the where field of orm_classpath
that references self.orm_class to the value in cls_pk if present
return -- Query()
|
[
"takes",
"a",
"classpath",
"to",
"allow",
"query",
"-",
"ing",
"from",
"another",
"Orm",
"class"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L638-L711
|
Jaymon/prom
|
prom/query.py
|
Query.unique_field
|
def unique_field(self, field_name):
"""set a unique field to be selected, this is automatically called when you do unique_FIELDNAME(...)"""
self.fields_set.options["unique"] = True
return self.select_field(field_name)
|
python
|
def unique_field(self, field_name):
"""set a unique field to be selected, this is automatically called when you do unique_FIELDNAME(...)"""
self.fields_set.options["unique"] = True
return self.select_field(field_name)
|
[
"def",
"unique_field",
"(",
"self",
",",
"field_name",
")",
":",
"self",
".",
"fields_set",
".",
"options",
"[",
"\"unique\"",
"]",
"=",
"True",
"return",
"self",
".",
"select_field",
"(",
"field_name",
")"
] |
set a unique field to be selected, this is automatically called when you do unique_FIELDNAME(...)
|
[
"set",
"a",
"unique",
"field",
"to",
"be",
"selected",
"this",
"is",
"automatically",
"called",
"when",
"you",
"do",
"unique_FIELDNAME",
"(",
"...",
")"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L721-L724
|
Jaymon/prom
|
prom/query.py
|
Query.select_fields
|
def select_fields(self, *fields):
"""set multiple fields to be selected"""
if fields:
if not isinstance(fields[0], basestring):
fields = list(fields[0]) + list(fields)[1:]
for field_name in fields:
field_name = self._normalize_field_name(field_name)
self.select_field(field_name)
return self
|
python
|
def select_fields(self, *fields):
"""set multiple fields to be selected"""
if fields:
if not isinstance(fields[0], basestring):
fields = list(fields[0]) + list(fields)[1:]
for field_name in fields:
field_name = self._normalize_field_name(field_name)
self.select_field(field_name)
return self
|
[
"def",
"select_fields",
"(",
"self",
",",
"*",
"fields",
")",
":",
"if",
"fields",
":",
"if",
"not",
"isinstance",
"(",
"fields",
"[",
"0",
"]",
",",
"basestring",
")",
":",
"fields",
"=",
"list",
"(",
"fields",
"[",
"0",
"]",
")",
"+",
"list",
"(",
"fields",
")",
"[",
"1",
":",
"]",
"for",
"field_name",
"in",
"fields",
":",
"field_name",
"=",
"self",
".",
"_normalize_field_name",
"(",
"field_name",
")",
"self",
".",
"select_field",
"(",
"field_name",
")",
"return",
"self"
] |
set multiple fields to be selected
|
[
"set",
"multiple",
"fields",
"to",
"be",
"selected"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L737-L746
|
Jaymon/prom
|
prom/query.py
|
Query.set_field
|
def set_field(self, field_name, field_val=None):
"""
set a field into .fields attribute
n insert/update queries, these are the fields that will be inserted/updated into the db
"""
field_name = self._normalize_field_name(field_name)
self.fields_set.append(field_name, [field_name, field_val])
return self
|
python
|
def set_field(self, field_name, field_val=None):
"""
set a field into .fields attribute
n insert/update queries, these are the fields that will be inserted/updated into the db
"""
field_name = self._normalize_field_name(field_name)
self.fields_set.append(field_name, [field_name, field_val])
return self
|
[
"def",
"set_field",
"(",
"self",
",",
"field_name",
",",
"field_val",
"=",
"None",
")",
":",
"field_name",
"=",
"self",
".",
"_normalize_field_name",
"(",
"field_name",
")",
"self",
".",
"fields_set",
".",
"append",
"(",
"field_name",
",",
"[",
"field_name",
",",
"field_val",
"]",
")",
"return",
"self"
] |
set a field into .fields attribute
n insert/update queries, these are the fields that will be inserted/updated into the db
|
[
"set",
"a",
"field",
"into",
".",
"fields",
"attribute"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L748-L756
|
Jaymon/prom
|
prom/query.py
|
Query.set_fields
|
def set_fields(self, fields=None, *fields_args, **fields_kwargs):
"""
completely replaces the current .fields with fields and fields_kwargs combined
"""
if fields_args:
fields = [fields]
fields.extend(fields_args)
for field_name in fields:
self.set_field(field_name)
elif fields_kwargs:
fields = make_dict(fields, fields_kwargs)
for field_name, field_val in fields.items():
self.set_field(field_name, field_val)
else:
if isinstance(fields, Mapping):
for field_name, field_val in fields.items():
self.set_field(field_name, field_val)
else:
for field_name in fields:
self.set_field(field_name)
return self
|
python
|
def set_fields(self, fields=None, *fields_args, **fields_kwargs):
"""
completely replaces the current .fields with fields and fields_kwargs combined
"""
if fields_args:
fields = [fields]
fields.extend(fields_args)
for field_name in fields:
self.set_field(field_name)
elif fields_kwargs:
fields = make_dict(fields, fields_kwargs)
for field_name, field_val in fields.items():
self.set_field(field_name, field_val)
else:
if isinstance(fields, Mapping):
for field_name, field_val in fields.items():
self.set_field(field_name, field_val)
else:
for field_name in fields:
self.set_field(field_name)
return self
|
[
"def",
"set_fields",
"(",
"self",
",",
"fields",
"=",
"None",
",",
"*",
"fields_args",
",",
"*",
"*",
"fields_kwargs",
")",
":",
"if",
"fields_args",
":",
"fields",
"=",
"[",
"fields",
"]",
"fields",
".",
"extend",
"(",
"fields_args",
")",
"for",
"field_name",
"in",
"fields",
":",
"self",
".",
"set_field",
"(",
"field_name",
")",
"elif",
"fields_kwargs",
":",
"fields",
"=",
"make_dict",
"(",
"fields",
",",
"fields_kwargs",
")",
"for",
"field_name",
",",
"field_val",
"in",
"fields",
".",
"items",
"(",
")",
":",
"self",
".",
"set_field",
"(",
"field_name",
",",
"field_val",
")",
"else",
":",
"if",
"isinstance",
"(",
"fields",
",",
"Mapping",
")",
":",
"for",
"field_name",
",",
"field_val",
"in",
"fields",
".",
"items",
"(",
")",
":",
"self",
".",
"set_field",
"(",
"field_name",
",",
"field_val",
")",
"else",
":",
"for",
"field_name",
"in",
"fields",
":",
"self",
".",
"set_field",
"(",
"field_name",
")",
"return",
"self"
] |
completely replaces the current .fields with fields and fields_kwargs combined
|
[
"completely",
"replaces",
"the",
"current",
".",
"fields",
"with",
"fields",
"and",
"fields_kwargs",
"combined"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L762-L786
|
Jaymon/prom
|
prom/query.py
|
Query.in_field
|
def in_field(self, field_name, *field_vals, **field_kwargs):
"""
field_vals -- list -- a list of field_val values
"""
field_name = self._normalize_field_name(field_name)
fv = make_list(field_vals[0]) if field_vals else None
if field_kwargs:
for k in field_kwargs:
if not field_kwargs[k]:
raise ValueError("Cannot IN an empty list")
field_kwargs[k] = make_list(field_kwargs[k])
else:
if not fv: self.can_get = False
self.fields_where.append(field_name, ["in", field_name, fv, field_kwargs])
return self
|
python
|
def in_field(self, field_name, *field_vals, **field_kwargs):
"""
field_vals -- list -- a list of field_val values
"""
field_name = self._normalize_field_name(field_name)
fv = make_list(field_vals[0]) if field_vals else None
if field_kwargs:
for k in field_kwargs:
if not field_kwargs[k]:
raise ValueError("Cannot IN an empty list")
field_kwargs[k] = make_list(field_kwargs[k])
else:
if not fv: self.can_get = False
self.fields_where.append(field_name, ["in", field_name, fv, field_kwargs])
return self
|
[
"def",
"in_field",
"(",
"self",
",",
"field_name",
",",
"*",
"field_vals",
",",
"*",
"*",
"field_kwargs",
")",
":",
"field_name",
"=",
"self",
".",
"_normalize_field_name",
"(",
"field_name",
")",
"fv",
"=",
"make_list",
"(",
"field_vals",
"[",
"0",
"]",
")",
"if",
"field_vals",
"else",
"None",
"if",
"field_kwargs",
":",
"for",
"k",
"in",
"field_kwargs",
":",
"if",
"not",
"field_kwargs",
"[",
"k",
"]",
":",
"raise",
"ValueError",
"(",
"\"Cannot IN an empty list\"",
")",
"field_kwargs",
"[",
"k",
"]",
"=",
"make_list",
"(",
"field_kwargs",
"[",
"k",
"]",
")",
"else",
":",
"if",
"not",
"fv",
":",
"self",
".",
"can_get",
"=",
"False",
"self",
".",
"fields_where",
".",
"append",
"(",
"field_name",
",",
"[",
"\"in\"",
",",
"field_name",
",",
"fv",
",",
"field_kwargs",
"]",
")",
"return",
"self"
] |
field_vals -- list -- a list of field_val values
|
[
"field_vals",
"--",
"list",
"--",
"a",
"list",
"of",
"field_val",
"values"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L829-L846
|
Jaymon/prom
|
prom/query.py
|
Query.nlike_field
|
def nlike_field(self, field_name, *field_val, **field_kwargs):
"""Perform a field_name NOT LIKE field_val query
:param field_name: string, the field we are filtering on
:param field_val: string, the like query: %val, %val%, val%
:returns: self, for fluid interface
"""
if not field_val:
raise ValueError("Cannot NOT LIKE nothing")
field_name = self._normalize_field_name(field_name)
fv = field_val[0]
self.fields_where.append(field_name, ["nlike", field_name, fv, field_kwargs])
return self
|
python
|
def nlike_field(self, field_name, *field_val, **field_kwargs):
"""Perform a field_name NOT LIKE field_val query
:param field_name: string, the field we are filtering on
:param field_val: string, the like query: %val, %val%, val%
:returns: self, for fluid interface
"""
if not field_val:
raise ValueError("Cannot NOT LIKE nothing")
field_name = self._normalize_field_name(field_name)
fv = field_val[0]
self.fields_where.append(field_name, ["nlike", field_name, fv, field_kwargs])
return self
|
[
"def",
"nlike_field",
"(",
"self",
",",
"field_name",
",",
"*",
"field_val",
",",
"*",
"*",
"field_kwargs",
")",
":",
"if",
"not",
"field_val",
":",
"raise",
"ValueError",
"(",
"\"Cannot NOT LIKE nothing\"",
")",
"field_name",
"=",
"self",
".",
"_normalize_field_name",
"(",
"field_name",
")",
"fv",
"=",
"field_val",
"[",
"0",
"]",
"self",
".",
"fields_where",
".",
"append",
"(",
"field_name",
",",
"[",
"\"nlike\"",
",",
"field_name",
",",
"fv",
",",
"field_kwargs",
"]",
")",
"return",
"self"
] |
Perform a field_name NOT LIKE field_val query
:param field_name: string, the field we are filtering on
:param field_val: string, the like query: %val, %val%, val%
:returns: self, for fluid interface
|
[
"Perform",
"a",
"field_name",
"NOT",
"LIKE",
"field_val",
"query"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L890-L902
|
Jaymon/prom
|
prom/query.py
|
Query.sort_field
|
def sort_field(self, field_name, direction, field_vals=None):
"""
sort this query by field_name in directrion
field_name -- string -- the field to sort on
direction -- integer -- negative for DESC, positive for ASC
field_vals -- list -- the order the rows should be returned in
"""
field_name = self._normalize_field_name(field_name)
if direction > 0:
direction = 1
elif direction < 0:
direction = -1
else:
raise ValueError("direction {} is undefined".format(direction))
self.fields_sort.append(field_name, [direction, field_name, list(field_vals) if field_vals else field_vals])
return self
|
python
|
def sort_field(self, field_name, direction, field_vals=None):
"""
sort this query by field_name in directrion
field_name -- string -- the field to sort on
direction -- integer -- negative for DESC, positive for ASC
field_vals -- list -- the order the rows should be returned in
"""
field_name = self._normalize_field_name(field_name)
if direction > 0:
direction = 1
elif direction < 0:
direction = -1
else:
raise ValueError("direction {} is undefined".format(direction))
self.fields_sort.append(field_name, [direction, field_name, list(field_vals) if field_vals else field_vals])
return self
|
[
"def",
"sort_field",
"(",
"self",
",",
"field_name",
",",
"direction",
",",
"field_vals",
"=",
"None",
")",
":",
"field_name",
"=",
"self",
".",
"_normalize_field_name",
"(",
"field_name",
")",
"if",
"direction",
">",
"0",
":",
"direction",
"=",
"1",
"elif",
"direction",
"<",
"0",
":",
"direction",
"=",
"-",
"1",
"else",
":",
"raise",
"ValueError",
"(",
"\"direction {} is undefined\"",
".",
"format",
"(",
"direction",
")",
")",
"self",
".",
"fields_sort",
".",
"append",
"(",
"field_name",
",",
"[",
"direction",
",",
"field_name",
",",
"list",
"(",
"field_vals",
")",
"if",
"field_vals",
"else",
"field_vals",
"]",
")",
"return",
"self"
] |
sort this query by field_name in directrion
field_name -- string -- the field to sort on
direction -- integer -- negative for DESC, positive for ASC
field_vals -- list -- the order the rows should be returned in
|
[
"sort",
"this",
"query",
"by",
"field_name",
"in",
"directrion"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L904-L921
|
Jaymon/prom
|
prom/query.py
|
Query.get
|
def get(self, limit=None, page=None):
"""
get results from the db
return -- Iterator()
"""
has_more = False
self.bounds.paginate = True
limit_paginate, offset = self.bounds.get(limit, page)
self.default_val = []
results = self._query('get')
if limit_paginate:
self.bounds.paginate = False
if len(results) == limit_paginate:
has_more = True
results.pop(-1)
it = ResultsIterator(results, orm_class=self.orm_class, has_more=has_more, query=self)
return self.iterator_class(it)
|
python
|
def get(self, limit=None, page=None):
"""
get results from the db
return -- Iterator()
"""
has_more = False
self.bounds.paginate = True
limit_paginate, offset = self.bounds.get(limit, page)
self.default_val = []
results = self._query('get')
if limit_paginate:
self.bounds.paginate = False
if len(results) == limit_paginate:
has_more = True
results.pop(-1)
it = ResultsIterator(results, orm_class=self.orm_class, has_more=has_more, query=self)
return self.iterator_class(it)
|
[
"def",
"get",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"page",
"=",
"None",
")",
":",
"has_more",
"=",
"False",
"self",
".",
"bounds",
".",
"paginate",
"=",
"True",
"limit_paginate",
",",
"offset",
"=",
"self",
".",
"bounds",
".",
"get",
"(",
"limit",
",",
"page",
")",
"self",
".",
"default_val",
"=",
"[",
"]",
"results",
"=",
"self",
".",
"_query",
"(",
"'get'",
")",
"if",
"limit_paginate",
":",
"self",
".",
"bounds",
".",
"paginate",
"=",
"False",
"if",
"len",
"(",
"results",
")",
"==",
"limit_paginate",
":",
"has_more",
"=",
"True",
"results",
".",
"pop",
"(",
"-",
"1",
")",
"it",
"=",
"ResultsIterator",
"(",
"results",
",",
"orm_class",
"=",
"self",
".",
"orm_class",
",",
"has_more",
"=",
"has_more",
",",
"query",
"=",
"self",
")",
"return",
"self",
".",
"iterator_class",
"(",
"it",
")"
] |
get results from the db
return -- Iterator()
|
[
"get",
"results",
"from",
"the",
"db"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1013-L1032
|
Jaymon/prom
|
prom/query.py
|
Query.get_one
|
def get_one(self):
"""get one row from the db"""
self.default_val = None
o = self.default_val
d = self._query('get_one')
if d:
o = self.orm_class(d, hydrate=True)
return o
|
python
|
def get_one(self):
"""get one row from the db"""
self.default_val = None
o = self.default_val
d = self._query('get_one')
if d:
o = self.orm_class(d, hydrate=True)
return o
|
[
"def",
"get_one",
"(",
"self",
")",
":",
"self",
".",
"default_val",
"=",
"None",
"o",
"=",
"self",
".",
"default_val",
"d",
"=",
"self",
".",
"_query",
"(",
"'get_one'",
")",
"if",
"d",
":",
"o",
"=",
"self",
".",
"orm_class",
"(",
"d",
",",
"hydrate",
"=",
"True",
")",
"return",
"o"
] |
get one row from the db
|
[
"get",
"one",
"row",
"from",
"the",
"db"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1048-L1055
|
Jaymon/prom
|
prom/query.py
|
Query.values
|
def values(self, limit=None, page=None):
"""
convenience method to get just the values from the query (same as get().values())
if you want to get all values, you can use: self.all().values()
"""
return self.get(limit=limit, page=page).values()
|
python
|
def values(self, limit=None, page=None):
"""
convenience method to get just the values from the query (same as get().values())
if you want to get all values, you can use: self.all().values()
"""
return self.get(limit=limit, page=page).values()
|
[
"def",
"values",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"page",
"=",
"None",
")",
":",
"return",
"self",
".",
"get",
"(",
"limit",
"=",
"limit",
",",
"page",
"=",
"page",
")",
".",
"values",
"(",
")"
] |
convenience method to get just the values from the query (same as get().values())
if you want to get all values, you can use: self.all().values()
|
[
"convenience",
"method",
"to",
"get",
"just",
"the",
"values",
"from",
"the",
"query",
"(",
"same",
"as",
"get",
"()",
".",
"values",
"()",
")"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1057-L1063
|
Jaymon/prom
|
prom/query.py
|
Query.value
|
def value(self):
"""convenience method to just get one value or tuple of values for the query"""
field_vals = None
field_names = self.fields_select.names()
fcount = len(field_names)
if fcount:
d = self._query('get_one')
if d:
field_vals = [d.get(fn, None) for fn in field_names]
if fcount == 1:
field_vals = field_vals[0]
else:
raise ValueError("no select fields were set, so cannot return value")
return field_vals
|
python
|
def value(self):
"""convenience method to just get one value or tuple of values for the query"""
field_vals = None
field_names = self.fields_select.names()
fcount = len(field_names)
if fcount:
d = self._query('get_one')
if d:
field_vals = [d.get(fn, None) for fn in field_names]
if fcount == 1:
field_vals = field_vals[0]
else:
raise ValueError("no select fields were set, so cannot return value")
return field_vals
|
[
"def",
"value",
"(",
"self",
")",
":",
"field_vals",
"=",
"None",
"field_names",
"=",
"self",
".",
"fields_select",
".",
"names",
"(",
")",
"fcount",
"=",
"len",
"(",
"field_names",
")",
"if",
"fcount",
":",
"d",
"=",
"self",
".",
"_query",
"(",
"'get_one'",
")",
"if",
"d",
":",
"field_vals",
"=",
"[",
"d",
".",
"get",
"(",
"fn",
",",
"None",
")",
"for",
"fn",
"in",
"field_names",
"]",
"if",
"fcount",
"==",
"1",
":",
"field_vals",
"=",
"field_vals",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"no select fields were set, so cannot return value\"",
")",
"return",
"field_vals"
] |
convenience method to just get one value or tuple of values for the query
|
[
"convenience",
"method",
"to",
"just",
"get",
"one",
"value",
"or",
"tuple",
"of",
"values",
"for",
"the",
"query"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1065-L1080
|
Jaymon/prom
|
prom/query.py
|
Query.pks
|
def pks(self, limit=None, page=None):
"""convenience method for setting select_pk().values() since this is so common"""
self.fields_set.reset()
return self.select_pk().values(limit, page)
|
python
|
def pks(self, limit=None, page=None):
"""convenience method for setting select_pk().values() since this is so common"""
self.fields_set.reset()
return self.select_pk().values(limit, page)
|
[
"def",
"pks",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"page",
"=",
"None",
")",
":",
"self",
".",
"fields_set",
".",
"reset",
"(",
")",
"return",
"self",
".",
"select_pk",
"(",
")",
".",
"values",
"(",
"limit",
",",
"page",
")"
] |
convenience method for setting select_pk().values() since this is so common
|
[
"convenience",
"method",
"for",
"setting",
"select_pk",
"()",
".",
"values",
"()",
"since",
"this",
"is",
"so",
"common"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1082-L1085
|
Jaymon/prom
|
prom/query.py
|
Query.get_pks
|
def get_pks(self, field_vals):
"""convenience method for running in__id([...]).get() since this is so common"""
field_name = self.schema.pk.name
return self.in_field(field_name, field_vals).get()
|
python
|
def get_pks(self, field_vals):
"""convenience method for running in__id([...]).get() since this is so common"""
field_name = self.schema.pk.name
return self.in_field(field_name, field_vals).get()
|
[
"def",
"get_pks",
"(",
"self",
",",
"field_vals",
")",
":",
"field_name",
"=",
"self",
".",
"schema",
".",
"pk",
".",
"name",
"return",
"self",
".",
"in_field",
"(",
"field_name",
",",
"field_vals",
")",
".",
"get",
"(",
")"
] |
convenience method for running in__id([...]).get() since this is so common
|
[
"convenience",
"method",
"for",
"running",
"in__id",
"(",
"[",
"...",
"]",
")",
".",
"get",
"()",
"since",
"this",
"is",
"so",
"common"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1092-L1095
|
Jaymon/prom
|
prom/query.py
|
Query.get_pk
|
def get_pk(self, field_val):
"""convenience method for running is_pk(_id).get_one() since this is so common"""
field_name = self.schema.pk.name
return self.is_field(field_name, field_val).get_one()
|
python
|
def get_pk(self, field_val):
"""convenience method for running is_pk(_id).get_one() since this is so common"""
field_name = self.schema.pk.name
return self.is_field(field_name, field_val).get_one()
|
[
"def",
"get_pk",
"(",
"self",
",",
"field_val",
")",
":",
"field_name",
"=",
"self",
".",
"schema",
".",
"pk",
".",
"name",
"return",
"self",
".",
"is_field",
"(",
"field_name",
",",
"field_val",
")",
".",
"get_one",
"(",
")"
] |
convenience method for running is_pk(_id).get_one() since this is so common
|
[
"convenience",
"method",
"for",
"running",
"is_pk",
"(",
"_id",
")",
".",
"get_one",
"()",
"since",
"this",
"is",
"so",
"common"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1097-L1100
|
Jaymon/prom
|
prom/query.py
|
Query.count
|
def count(self):
"""return the count of the criteria"""
# count queries shouldn't care about sorting
fields_sort = self.fields_sort
self.fields_sort = self.fields_sort_class()
self.default_val = 0
ret = self._query('count')
# restore previous values now that count is done
self.fields_sort = fields_sort
return ret
|
python
|
def count(self):
"""return the count of the criteria"""
# count queries shouldn't care about sorting
fields_sort = self.fields_sort
self.fields_sort = self.fields_sort_class()
self.default_val = 0
ret = self._query('count')
# restore previous values now that count is done
self.fields_sort = fields_sort
return ret
|
[
"def",
"count",
"(",
"self",
")",
":",
"# count queries shouldn't care about sorting",
"fields_sort",
"=",
"self",
".",
"fields_sort",
"self",
".",
"fields_sort",
"=",
"self",
".",
"fields_sort_class",
"(",
")",
"self",
".",
"default_val",
"=",
"0",
"ret",
"=",
"self",
".",
"_query",
"(",
"'count'",
")",
"# restore previous values now that count is done",
"self",
".",
"fields_sort",
"=",
"fields_sort",
"return",
"ret"
] |
return the count of the criteria
|
[
"return",
"the",
"count",
"of",
"the",
"criteria"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1110-L1123
|
Jaymon/prom
|
prom/query.py
|
Query.insert
|
def insert(self):
"""persist the .fields"""
self.default_val = 0
#fields = self.fields
#fields = self.orm_class.depart(self.fields, is_update=False)
#self.set_fields(fields)
return self.interface.insert(
self.schema,
self.fields
)
return self.interface.insert(self.schema, self.fields)
|
python
|
def insert(self):
"""persist the .fields"""
self.default_val = 0
#fields = self.fields
#fields = self.orm_class.depart(self.fields, is_update=False)
#self.set_fields(fields)
return self.interface.insert(
self.schema,
self.fields
)
return self.interface.insert(self.schema, self.fields)
|
[
"def",
"insert",
"(",
"self",
")",
":",
"self",
".",
"default_val",
"=",
"0",
"#fields = self.fields",
"#fields = self.orm_class.depart(self.fields, is_update=False)",
"#self.set_fields(fields)",
"return",
"self",
".",
"interface",
".",
"insert",
"(",
"self",
".",
"schema",
",",
"self",
".",
"fields",
")",
"return",
"self",
".",
"interface",
".",
"insert",
"(",
"self",
".",
"schema",
",",
"self",
".",
"fields",
")"
] |
persist the .fields
|
[
"persist",
"the",
".",
"fields"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1130-L1141
|
Jaymon/prom
|
prom/query.py
|
Query.update
|
def update(self):
"""persist the .fields using .fields_where"""
self.default_val = 0
#fields = self.fields
#fields = self.orm_class.depart(self.fields, is_update=True)
#self.set_fields(fields)
return self.interface.update(
self.schema,
self.fields,
self
)
|
python
|
def update(self):
"""persist the .fields using .fields_where"""
self.default_val = 0
#fields = self.fields
#fields = self.orm_class.depart(self.fields, is_update=True)
#self.set_fields(fields)
return self.interface.update(
self.schema,
self.fields,
self
)
|
[
"def",
"update",
"(",
"self",
")",
":",
"self",
".",
"default_val",
"=",
"0",
"#fields = self.fields",
"#fields = self.orm_class.depart(self.fields, is_update=True)",
"#self.set_fields(fields)",
"return",
"self",
".",
"interface",
".",
"update",
"(",
"self",
".",
"schema",
",",
"self",
".",
"fields",
",",
"self",
")"
] |
persist the .fields using .fields_where
|
[
"persist",
"the",
".",
"fields",
"using",
".",
"fields_where"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1143-L1153
|
Jaymon/prom
|
prom/query.py
|
Query.raw
|
def raw(self, query_str, *query_args, **query_options):
"""
use the interface.query() method to pass in your own raw query without
any processing
NOTE -- This will allow you to make any raw query and will usually return
raw results, it won't wrap those results in a prom.Orm iterator instance
like other methods like .all() and .get()
query_str -- string -- the raw query for whatever the backend interface is
query_args -- list -- if you have named parameters, you can pass in the values
**query_options -- dict -- key:val options for the backend, these are very backend specific
return -- mixed -- depends on the backend and the type of query
"""
i = self.interface
return i.query(query_str, *query_args, **query_options)
|
python
|
def raw(self, query_str, *query_args, **query_options):
"""
use the interface.query() method to pass in your own raw query without
any processing
NOTE -- This will allow you to make any raw query and will usually return
raw results, it won't wrap those results in a prom.Orm iterator instance
like other methods like .all() and .get()
query_str -- string -- the raw query for whatever the backend interface is
query_args -- list -- if you have named parameters, you can pass in the values
**query_options -- dict -- key:val options for the backend, these are very backend specific
return -- mixed -- depends on the backend and the type of query
"""
i = self.interface
return i.query(query_str, *query_args, **query_options)
|
[
"def",
"raw",
"(",
"self",
",",
"query_str",
",",
"*",
"query_args",
",",
"*",
"*",
"query_options",
")",
":",
"i",
"=",
"self",
".",
"interface",
"return",
"i",
".",
"query",
"(",
"query_str",
",",
"*",
"query_args",
",",
"*",
"*",
"query_options",
")"
] |
use the interface.query() method to pass in your own raw query without
any processing
NOTE -- This will allow you to make any raw query and will usually return
raw results, it won't wrap those results in a prom.Orm iterator instance
like other methods like .all() and .get()
query_str -- string -- the raw query for whatever the backend interface is
query_args -- list -- if you have named parameters, you can pass in the values
**query_options -- dict -- key:val options for the backend, these are very backend specific
return -- mixed -- depends on the backend and the type of query
|
[
"use",
"the",
"interface",
".",
"query",
"()",
"method",
"to",
"pass",
"in",
"your",
"own",
"raw",
"query",
"without",
"any",
"processing"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1161-L1176
|
Jaymon/prom
|
prom/query.py
|
Query.reduce
|
def reduce(self, target_map, target_reduce, threads=0):
"""map/reduce this query among a bunch of processes
:param target_map: callable, this function will be called once for each
row this query pulls out of the db, if you want something about the row
to be seen by the target_reduce function return that value from this function
and it will be queued for the target_reduce function to process it
:param target_reduce: callable, this function will be called for any non
None value that the target_map function returns
:param threads: integer, if not passed in this will be pegged to how many
cpus python detects, which is almost always what you want
"""
if not threads:
threads = multiprocessing.cpu_count()
# we subtract one for the main process
map_threads = threads - 1 if threads > 1 else 1
q = self.copy()
limit = q.bounds.limit
offset = q.bounds.offset
total_count = limit if limit else q.count()
limit_count = int(math.ceil(float(total_count) / float(map_threads)))
logger.info("{} processes will handle {} rows each for a total of {}".format(
map_threads,
limit_count,
total_count
))
queue = multiprocessing.JoinableQueue()
# close all open db global connections just in case, because we can't be sure
# what the target_map methods are going to do, we want them to re-open connections
# that they need
interfaces = get_interfaces()
for name, inter in interfaces.items():
inter.close()
# just in case we also close the query connection since it can in theory
# be non-global
q.interface.close()
ts = []
for page in range(map_threads):
q = self.copy()
q.limit(limit_count).offset(offset + (limit_count * page))
t = ReduceThread(
target=target_map,
query=q,
queue=queue,
)
t.start()
ts.append(t)
while ts or not queue.empty():
try:
val = queue.get(True, 1.0)
target_reduce(val)
except queues.Empty:
pass
else:
queue.task_done()
# faster than using any((t.is_alive() for t in mts))
ts = [t for t in ts if t.is_alive()]
|
python
|
def reduce(self, target_map, target_reduce, threads=0):
"""map/reduce this query among a bunch of processes
:param target_map: callable, this function will be called once for each
row this query pulls out of the db, if you want something about the row
to be seen by the target_reduce function return that value from this function
and it will be queued for the target_reduce function to process it
:param target_reduce: callable, this function will be called for any non
None value that the target_map function returns
:param threads: integer, if not passed in this will be pegged to how many
cpus python detects, which is almost always what you want
"""
if not threads:
threads = multiprocessing.cpu_count()
# we subtract one for the main process
map_threads = threads - 1 if threads > 1 else 1
q = self.copy()
limit = q.bounds.limit
offset = q.bounds.offset
total_count = limit if limit else q.count()
limit_count = int(math.ceil(float(total_count) / float(map_threads)))
logger.info("{} processes will handle {} rows each for a total of {}".format(
map_threads,
limit_count,
total_count
))
queue = multiprocessing.JoinableQueue()
# close all open db global connections just in case, because we can't be sure
# what the target_map methods are going to do, we want them to re-open connections
# that they need
interfaces = get_interfaces()
for name, inter in interfaces.items():
inter.close()
# just in case we also close the query connection since it can in theory
# be non-global
q.interface.close()
ts = []
for page in range(map_threads):
q = self.copy()
q.limit(limit_count).offset(offset + (limit_count * page))
t = ReduceThread(
target=target_map,
query=q,
queue=queue,
)
t.start()
ts.append(t)
while ts or not queue.empty():
try:
val = queue.get(True, 1.0)
target_reduce(val)
except queues.Empty:
pass
else:
queue.task_done()
# faster than using any((t.is_alive() for t in mts))
ts = [t for t in ts if t.is_alive()]
|
[
"def",
"reduce",
"(",
"self",
",",
"target_map",
",",
"target_reduce",
",",
"threads",
"=",
"0",
")",
":",
"if",
"not",
"threads",
":",
"threads",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"# we subtract one for the main process",
"map_threads",
"=",
"threads",
"-",
"1",
"if",
"threads",
">",
"1",
"else",
"1",
"q",
"=",
"self",
".",
"copy",
"(",
")",
"limit",
"=",
"q",
".",
"bounds",
".",
"limit",
"offset",
"=",
"q",
".",
"bounds",
".",
"offset",
"total_count",
"=",
"limit",
"if",
"limit",
"else",
"q",
".",
"count",
"(",
")",
"limit_count",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"total_count",
")",
"/",
"float",
"(",
"map_threads",
")",
")",
")",
"logger",
".",
"info",
"(",
"\"{} processes will handle {} rows each for a total of {}\"",
".",
"format",
"(",
"map_threads",
",",
"limit_count",
",",
"total_count",
")",
")",
"queue",
"=",
"multiprocessing",
".",
"JoinableQueue",
"(",
")",
"# close all open db global connections just in case, because we can't be sure",
"# what the target_map methods are going to do, we want them to re-open connections",
"# that they need",
"interfaces",
"=",
"get_interfaces",
"(",
")",
"for",
"name",
",",
"inter",
"in",
"interfaces",
".",
"items",
"(",
")",
":",
"inter",
".",
"close",
"(",
")",
"# just in case we also close the query connection since it can in theory",
"# be non-global",
"q",
".",
"interface",
".",
"close",
"(",
")",
"ts",
"=",
"[",
"]",
"for",
"page",
"in",
"range",
"(",
"map_threads",
")",
":",
"q",
"=",
"self",
".",
"copy",
"(",
")",
"q",
".",
"limit",
"(",
"limit_count",
")",
".",
"offset",
"(",
"offset",
"+",
"(",
"limit_count",
"*",
"page",
")",
")",
"t",
"=",
"ReduceThread",
"(",
"target",
"=",
"target_map",
",",
"query",
"=",
"q",
",",
"queue",
"=",
"queue",
",",
")",
"t",
".",
"start",
"(",
")",
"ts",
".",
"append",
"(",
"t",
")",
"while",
"ts",
"or",
"not",
"queue",
".",
"empty",
"(",
")",
":",
"try",
":",
"val",
"=",
"queue",
".",
"get",
"(",
"True",
",",
"1.0",
")",
"target_reduce",
"(",
"val",
")",
"except",
"queues",
".",
"Empty",
":",
"pass",
"else",
":",
"queue",
".",
"task_done",
"(",
")",
"# faster than using any((t.is_alive() for t in mts))",
"ts",
"=",
"[",
"t",
"for",
"t",
"in",
"ts",
"if",
"t",
".",
"is_alive",
"(",
")",
"]"
] |
map/reduce this query among a bunch of processes
:param target_map: callable, this function will be called once for each
row this query pulls out of the db, if you want something about the row
to be seen by the target_reduce function return that value from this function
and it will be queued for the target_reduce function to process it
:param target_reduce: callable, this function will be called for any non
None value that the target_map function returns
:param threads: integer, if not passed in this will be pegged to how many
cpus python detects, which is almost always what you want
|
[
"map",
"/",
"reduce",
"this",
"query",
"among",
"a",
"bunch",
"of",
"processes"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1178-L1245
|
Jaymon/prom
|
prom/query.py
|
BaseCacheQuery.cache_key
|
def cache_key(self, method_name):
"""decides if this query is cacheable, returns a key if it is, otherwise empty"""
key = ""
method = getattr(self, "cache_key_{}".format(method_name), None)
if method:
key = method()
return key
|
python
|
def cache_key(self, method_name):
"""decides if this query is cacheable, returns a key if it is, otherwise empty"""
key = ""
method = getattr(self, "cache_key_{}".format(method_name), None)
if method:
key = method()
return key
|
[
"def",
"cache_key",
"(",
"self",
",",
"method_name",
")",
":",
"key",
"=",
"\"\"",
"method",
"=",
"getattr",
"(",
"self",
",",
"\"cache_key_{}\"",
".",
"format",
"(",
"method_name",
")",
",",
"None",
")",
"if",
"method",
":",
"key",
"=",
"method",
"(",
")",
"return",
"key"
] |
decides if this query is cacheable, returns a key if it is, otherwise empty
|
[
"decides",
"if",
"this",
"query",
"is",
"cacheable",
"returns",
"a",
"key",
"if",
"it",
"is",
"otherwise",
"empty"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1356-L1363
|
Jaymon/prom
|
prom/query.py
|
CacheNamespace.ttl
|
def ttl(self):
"""how long you should cache results for cacheable queries"""
ret = 3600
cn = self.get_process()
if "ttl" in cn:
ret = cn["ttl"]
return ret
|
python
|
def ttl(self):
"""how long you should cache results for cacheable queries"""
ret = 3600
cn = self.get_process()
if "ttl" in cn:
ret = cn["ttl"]
return ret
|
[
"def",
"ttl",
"(",
"self",
")",
":",
"ret",
"=",
"3600",
"cn",
"=",
"self",
".",
"get_process",
"(",
")",
"if",
"\"ttl\"",
"in",
"cn",
":",
"ret",
"=",
"cn",
"[",
"\"ttl\"",
"]",
"return",
"ret"
] |
how long you should cache results for cacheable queries
|
[
"how",
"long",
"you",
"should",
"cache",
"results",
"for",
"cacheable",
"queries"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1440-L1446
|
thombashi/tabledata
|
tabledata/normalizer.py
|
AbstractTableDataNormalizer.normalize
|
def normalize(self):
"""
:return: Sanitized table data.
:rtype: tabledata.TableData
"""
logger.debug("normalize: {}".format(type(self).__name__))
normalize_headers = self._normalize_headers()
return TableData(
self.__normalize_table_name(),
normalize_headers,
self._normalize_rows(normalize_headers),
dp_extractor=self._tabledata.dp_extractor,
type_hints=self._type_hints,
)
|
python
|
def normalize(self):
"""
:return: Sanitized table data.
:rtype: tabledata.TableData
"""
logger.debug("normalize: {}".format(type(self).__name__))
normalize_headers = self._normalize_headers()
return TableData(
self.__normalize_table_name(),
normalize_headers,
self._normalize_rows(normalize_headers),
dp_extractor=self._tabledata.dp_extractor,
type_hints=self._type_hints,
)
|
[
"def",
"normalize",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"normalize: {}\"",
".",
"format",
"(",
"type",
"(",
"self",
")",
".",
"__name__",
")",
")",
"normalize_headers",
"=",
"self",
".",
"_normalize_headers",
"(",
")",
"return",
"TableData",
"(",
"self",
".",
"__normalize_table_name",
"(",
")",
",",
"normalize_headers",
",",
"self",
".",
"_normalize_rows",
"(",
"normalize_headers",
")",
",",
"dp_extractor",
"=",
"self",
".",
"_tabledata",
".",
"dp_extractor",
",",
"type_hints",
"=",
"self",
".",
"_type_hints",
",",
")"
] |
:return: Sanitized table data.
:rtype: tabledata.TableData
|
[
":",
"return",
":",
"Sanitized",
"table",
"data",
".",
":",
"rtype",
":",
"tabledata",
".",
"TableData"
] |
train
|
https://github.com/thombashi/tabledata/blob/03d623be30fc62381f1b7fb2aa0e17a0e26ad473/tabledata/normalizer.py#L51-L67
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
_apply_units_to_numpy_data_readers
|
def _apply_units_to_numpy_data_readers(parameters, data):
"""
Apply units to data originally loaded by :class:`NumPyLoadTxtReader` or
:class:`NumPyGenFromTxtReader`.
:param parameters: Dictionary of data source parameters read from JSON
file.
:type parameters: dict
:param data: Dictionary of data read
"""
# apply header units
header_param = parameters.get('header') # default is None
# check for headers
if header_param:
fields = header_param['fields'] # header fields
# dictionary of header field parameters
header_fields = {field[0]: field[1:] for field in fields}
# loop over fieldnames
for k, val in header_fields.iteritems():
# check for units in header field parameters
if len(val) > 1:
data[k] *= UREG(str(val[1])) # apply units
# apply other data units
data_units = parameters['data'].get('units') # default is None
if data_units:
for k, val in data_units.iteritems():
data[k] *= UREG(str(val)) # apply units
return data
|
python
|
def _apply_units_to_numpy_data_readers(parameters, data):
"""
Apply units to data originally loaded by :class:`NumPyLoadTxtReader` or
:class:`NumPyGenFromTxtReader`.
:param parameters: Dictionary of data source parameters read from JSON
file.
:type parameters: dict
:param data: Dictionary of data read
"""
# apply header units
header_param = parameters.get('header') # default is None
# check for headers
if header_param:
fields = header_param['fields'] # header fields
# dictionary of header field parameters
header_fields = {field[0]: field[1:] for field in fields}
# loop over fieldnames
for k, val in header_fields.iteritems():
# check for units in header field parameters
if len(val) > 1:
data[k] *= UREG(str(val[1])) # apply units
# apply other data units
data_units = parameters['data'].get('units') # default is None
if data_units:
for k, val in data_units.iteritems():
data[k] *= UREG(str(val)) # apply units
return data
|
[
"def",
"_apply_units_to_numpy_data_readers",
"(",
"parameters",
",",
"data",
")",
":",
"# apply header units",
"header_param",
"=",
"parameters",
".",
"get",
"(",
"'header'",
")",
"# default is None",
"# check for headers",
"if",
"header_param",
":",
"fields",
"=",
"header_param",
"[",
"'fields'",
"]",
"# header fields",
"# dictionary of header field parameters",
"header_fields",
"=",
"{",
"field",
"[",
"0",
"]",
":",
"field",
"[",
"1",
":",
"]",
"for",
"field",
"in",
"fields",
"}",
"# loop over fieldnames",
"for",
"k",
",",
"val",
"in",
"header_fields",
".",
"iteritems",
"(",
")",
":",
"# check for units in header field parameters",
"if",
"len",
"(",
"val",
")",
">",
"1",
":",
"data",
"[",
"k",
"]",
"*=",
"UREG",
"(",
"str",
"(",
"val",
"[",
"1",
"]",
")",
")",
"# apply units",
"# apply other data units",
"data_units",
"=",
"parameters",
"[",
"'data'",
"]",
".",
"get",
"(",
"'units'",
")",
"# default is None",
"if",
"data_units",
":",
"for",
"k",
",",
"val",
"in",
"data_units",
".",
"iteritems",
"(",
")",
":",
"data",
"[",
"k",
"]",
"*=",
"UREG",
"(",
"str",
"(",
"val",
")",
")",
"# apply units",
"return",
"data"
] |
Apply units to data originally loaded by :class:`NumPyLoadTxtReader` or
:class:`NumPyGenFromTxtReader`.
:param parameters: Dictionary of data source parameters read from JSON
file.
:type parameters: dict
:param data: Dictionary of data read
|
[
"Apply",
"units",
"to",
"data",
"originally",
"loaded",
"by",
":",
"class",
":",
"NumPyLoadTxtReader",
"or",
":",
"class",
":",
"NumPyGenFromTxtReader",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L506-L533
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
_read_header
|
def _read_header(f, header_param):
"""
Read and parse data from 1st line of a file.
:param f: :func:`file` or :class:`~StringIO.StringIO` object from which to
read 1st line.
:type f: file
:param header_param: Parameters used to parse the data from the header.
Contains "delimiter" and "fields".
:type header_param: dict
:returns: Dictionary of data read from header.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
The **header_param** argument contains keys to read the 1st line of **f**.
If "delimiter" is ``None`` or missing, the default delimiter is a comma,
otherwise "delimiter" can be any single character, integer or sequence of
``int``.
* single character -- a delimiter
* single integer -- uniform fixed width
* sequence of ``int`` -- fixed widths, the number of fields should \
correspond to the length of the sequence.
The "fields" key is a list of (parameter-name, parameter-type[, parameter-
units]) lists.
"""
# default delimiter is a comma, can't be None
header_delim = str(header_param.get('delimiter', ','))
# don't allow unnamed fields
if 'fields' not in header_param:
raise UnnamedDataError(f.name)
header_fields = {field[0]: field[1:] for field in header_param['fields']}
# header_names can't be generator b/c DictReader needs list, and can't be
# dictionary b/c must be same order as 'fields' to match data readby csv
header_names = [field[0] for field in header_param['fields']]
# read header
header_str = StringIO(f.readline()) # read the 1st line
# use csv because it will preserve quoted fields with commas
# make a csv.DictReader from header string, use header names for
# fieldnames and set delimiter to header delimiter
header_reader = csv.DictReader(header_str, header_names,
delimiter=header_delim,
skipinitialspace=True)
data = header_reader.next() # parse the header dictionary
# iterate over items in data
for k, v in data.iteritems():
header_type = header_fields[k][0] # spec'd type
# whitelist header types
if isinstance(header_type, basestring):
if header_type.lower().startswith('int'):
header_type = int # coerce to integer
elif header_type.lower().startswith('long'):
header_type = long # coerce to long integer
elif header_type.lower().startswith('float'):
header_type = float # to floating decimal point
elif header_type.lower().startswith('str'):
header_type = str # coerce to string
elif header_type.lower().startswith('bool'):
header_type = bool # coerce to boolean
else:
raise TypeError('"%s" is not a supported type.' % header_type)
# WARNING! Use of `eval` considered harmful. `header_type` is read
# from JSON file, not secure input, could be used to exploit system
data[k] = header_type(v) # cast v to type
# check for units in 3rd element
if len(header_fields[k]) > 1:
units = UREG(str(header_fields[k][1])) # spec'd units
data[k] = data[k] * units # apply units
return data
|
python
|
def _read_header(f, header_param):
"""
Read and parse data from 1st line of a file.
:param f: :func:`file` or :class:`~StringIO.StringIO` object from which to
read 1st line.
:type f: file
:param header_param: Parameters used to parse the data from the header.
Contains "delimiter" and "fields".
:type header_param: dict
:returns: Dictionary of data read from header.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
The **header_param** argument contains keys to read the 1st line of **f**.
If "delimiter" is ``None`` or missing, the default delimiter is a comma,
otherwise "delimiter" can be any single character, integer or sequence of
``int``.
* single character -- a delimiter
* single integer -- uniform fixed width
* sequence of ``int`` -- fixed widths, the number of fields should \
correspond to the length of the sequence.
The "fields" key is a list of (parameter-name, parameter-type[, parameter-
units]) lists.
"""
# default delimiter is a comma, can't be None
header_delim = str(header_param.get('delimiter', ','))
# don't allow unnamed fields
if 'fields' not in header_param:
raise UnnamedDataError(f.name)
header_fields = {field[0]: field[1:] for field in header_param['fields']}
# header_names can't be generator b/c DictReader needs list, and can't be
# dictionary b/c must be same order as 'fields' to match data readby csv
header_names = [field[0] for field in header_param['fields']]
# read header
header_str = StringIO(f.readline()) # read the 1st line
# use csv because it will preserve quoted fields with commas
# make a csv.DictReader from header string, use header names for
# fieldnames and set delimiter to header delimiter
header_reader = csv.DictReader(header_str, header_names,
delimiter=header_delim,
skipinitialspace=True)
data = header_reader.next() # parse the header dictionary
# iterate over items in data
for k, v in data.iteritems():
header_type = header_fields[k][0] # spec'd type
# whitelist header types
if isinstance(header_type, basestring):
if header_type.lower().startswith('int'):
header_type = int # coerce to integer
elif header_type.lower().startswith('long'):
header_type = long # coerce to long integer
elif header_type.lower().startswith('float'):
header_type = float # to floating decimal point
elif header_type.lower().startswith('str'):
header_type = str # coerce to string
elif header_type.lower().startswith('bool'):
header_type = bool # coerce to boolean
else:
raise TypeError('"%s" is not a supported type.' % header_type)
# WARNING! Use of `eval` considered harmful. `header_type` is read
# from JSON file, not secure input, could be used to exploit system
data[k] = header_type(v) # cast v to type
# check for units in 3rd element
if len(header_fields[k]) > 1:
units = UREG(str(header_fields[k][1])) # spec'd units
data[k] = data[k] * units # apply units
return data
|
[
"def",
"_read_header",
"(",
"f",
",",
"header_param",
")",
":",
"# default delimiter is a comma, can't be None",
"header_delim",
"=",
"str",
"(",
"header_param",
".",
"get",
"(",
"'delimiter'",
",",
"','",
")",
")",
"# don't allow unnamed fields",
"if",
"'fields'",
"not",
"in",
"header_param",
":",
"raise",
"UnnamedDataError",
"(",
"f",
".",
"name",
")",
"header_fields",
"=",
"{",
"field",
"[",
"0",
"]",
":",
"field",
"[",
"1",
":",
"]",
"for",
"field",
"in",
"header_param",
"[",
"'fields'",
"]",
"}",
"# header_names can't be generator b/c DictReader needs list, and can't be",
"# dictionary b/c must be same order as 'fields' to match data readby csv",
"header_names",
"=",
"[",
"field",
"[",
"0",
"]",
"for",
"field",
"in",
"header_param",
"[",
"'fields'",
"]",
"]",
"# read header",
"header_str",
"=",
"StringIO",
"(",
"f",
".",
"readline",
"(",
")",
")",
"# read the 1st line",
"# use csv because it will preserve quoted fields with commas",
"# make a csv.DictReader from header string, use header names for",
"# fieldnames and set delimiter to header delimiter",
"header_reader",
"=",
"csv",
".",
"DictReader",
"(",
"header_str",
",",
"header_names",
",",
"delimiter",
"=",
"header_delim",
",",
"skipinitialspace",
"=",
"True",
")",
"data",
"=",
"header_reader",
".",
"next",
"(",
")",
"# parse the header dictionary",
"# iterate over items in data",
"for",
"k",
",",
"v",
"in",
"data",
".",
"iteritems",
"(",
")",
":",
"header_type",
"=",
"header_fields",
"[",
"k",
"]",
"[",
"0",
"]",
"# spec'd type",
"# whitelist header types",
"if",
"isinstance",
"(",
"header_type",
",",
"basestring",
")",
":",
"if",
"header_type",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'int'",
")",
":",
"header_type",
"=",
"int",
"# coerce to integer",
"elif",
"header_type",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'long'",
")",
":",
"header_type",
"=",
"long",
"# coerce to long integer",
"elif",
"header_type",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'float'",
")",
":",
"header_type",
"=",
"float",
"# to floating decimal point",
"elif",
"header_type",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'str'",
")",
":",
"header_type",
"=",
"str",
"# coerce to string",
"elif",
"header_type",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'bool'",
")",
":",
"header_type",
"=",
"bool",
"# coerce to boolean",
"else",
":",
"raise",
"TypeError",
"(",
"'\"%s\" is not a supported type.'",
"%",
"header_type",
")",
"# WARNING! Use of `eval` considered harmful. `header_type` is read",
"# from JSON file, not secure input, could be used to exploit system",
"data",
"[",
"k",
"]",
"=",
"header_type",
"(",
"v",
")",
"# cast v to type",
"# check for units in 3rd element",
"if",
"len",
"(",
"header_fields",
"[",
"k",
"]",
")",
">",
"1",
":",
"units",
"=",
"UREG",
"(",
"str",
"(",
"header_fields",
"[",
"k",
"]",
"[",
"1",
"]",
")",
")",
"# spec'd units",
"data",
"[",
"k",
"]",
"=",
"data",
"[",
"k",
"]",
"*",
"units",
"# apply units",
"return",
"data"
] |
Read and parse data from 1st line of a file.
:param f: :func:`file` or :class:`~StringIO.StringIO` object from which to
read 1st line.
:type f: file
:param header_param: Parameters used to parse the data from the header.
Contains "delimiter" and "fields".
:type header_param: dict
:returns: Dictionary of data read from header.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
The **header_param** argument contains keys to read the 1st line of **f**.
If "delimiter" is ``None`` or missing, the default delimiter is a comma,
otherwise "delimiter" can be any single character, integer or sequence of
``int``.
* single character -- a delimiter
* single integer -- uniform fixed width
* sequence of ``int`` -- fixed widths, the number of fields should \
correspond to the length of the sequence.
The "fields" key is a list of (parameter-name, parameter-type[, parameter-
units]) lists.
|
[
"Read",
"and",
"parse",
"data",
"from",
"1st",
"line",
"of",
"a",
"file",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L536-L605
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
_apply_units
|
def _apply_units(data_data, data_units, fname):
"""
Apply units to data.
:param data_data: NumPy structured array with data from fname.
:type data_data: :class:`numpy.ndarray`
:param data_units: Units of fields in data_data.
:type data_units: dict
:param fname: Name of file from which data_data was read.
:type fname: str
:returns: Dictionary of data with units applied.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
"""
data_names = data_data.dtype.names
# raise error if NumPy data doesn't have names
if not data_names:
raise UnnamedDataError(fname)
data = dict.fromkeys(data_names) # dictionary of data read by NumPy
# iterate over data read by NumPy
for data_name in data_names:
if data_name in data_units:
# if units specified in parameters, then convert to string
units = str(data_units[data_name])
data[data_name] = data_data[data_name] * UREG(units)
elif np.issubdtype(data_data[data_name].dtype, str):
# if no units specified and is string
data[data_name] = data_data[data_name].tolist()
else:
data[data_name] = data_data[data_name]
return data
|
python
|
def _apply_units(data_data, data_units, fname):
"""
Apply units to data.
:param data_data: NumPy structured array with data from fname.
:type data_data: :class:`numpy.ndarray`
:param data_units: Units of fields in data_data.
:type data_units: dict
:param fname: Name of file from which data_data was read.
:type fname: str
:returns: Dictionary of data with units applied.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
"""
data_names = data_data.dtype.names
# raise error if NumPy data doesn't have names
if not data_names:
raise UnnamedDataError(fname)
data = dict.fromkeys(data_names) # dictionary of data read by NumPy
# iterate over data read by NumPy
for data_name in data_names:
if data_name in data_units:
# if units specified in parameters, then convert to string
units = str(data_units[data_name])
data[data_name] = data_data[data_name] * UREG(units)
elif np.issubdtype(data_data[data_name].dtype, str):
# if no units specified and is string
data[data_name] = data_data[data_name].tolist()
else:
data[data_name] = data_data[data_name]
return data
|
[
"def",
"_apply_units",
"(",
"data_data",
",",
"data_units",
",",
"fname",
")",
":",
"data_names",
"=",
"data_data",
".",
"dtype",
".",
"names",
"# raise error if NumPy data doesn't have names",
"if",
"not",
"data_names",
":",
"raise",
"UnnamedDataError",
"(",
"fname",
")",
"data",
"=",
"dict",
".",
"fromkeys",
"(",
"data_names",
")",
"# dictionary of data read by NumPy",
"# iterate over data read by NumPy",
"for",
"data_name",
"in",
"data_names",
":",
"if",
"data_name",
"in",
"data_units",
":",
"# if units specified in parameters, then convert to string",
"units",
"=",
"str",
"(",
"data_units",
"[",
"data_name",
"]",
")",
"data",
"[",
"data_name",
"]",
"=",
"data_data",
"[",
"data_name",
"]",
"*",
"UREG",
"(",
"units",
")",
"elif",
"np",
".",
"issubdtype",
"(",
"data_data",
"[",
"data_name",
"]",
".",
"dtype",
",",
"str",
")",
":",
"# if no units specified and is string",
"data",
"[",
"data_name",
"]",
"=",
"data_data",
"[",
"data_name",
"]",
".",
"tolist",
"(",
")",
"else",
":",
"data",
"[",
"data_name",
"]",
"=",
"data_data",
"[",
"data_name",
"]",
"return",
"data"
] |
Apply units to data.
:param data_data: NumPy structured array with data from fname.
:type data_data: :class:`numpy.ndarray`
:param data_units: Units of fields in data_data.
:type data_units: dict
:param fname: Name of file from which data_data was read.
:type fname: str
:returns: Dictionary of data with units applied.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
|
[
"Apply",
"units",
"to",
"data",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L608-L638
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
_utf8_list_to_ascii_tuple
|
def _utf8_list_to_ascii_tuple(utf8_list):
"""
Convert unicode strings in a list of lists to ascii in a list of tuples.
:param utf8_list: A nested list of unicode strings.
:type utf8_list: list
"""
for n, utf8 in enumerate(utf8_list):
utf8_list[n][0] = str(utf8[0])
utf8_list[n][1] = str(utf8[1])
utf8_list[n] = tuple(utf8)
|
python
|
def _utf8_list_to_ascii_tuple(utf8_list):
"""
Convert unicode strings in a list of lists to ascii in a list of tuples.
:param utf8_list: A nested list of unicode strings.
:type utf8_list: list
"""
for n, utf8 in enumerate(utf8_list):
utf8_list[n][0] = str(utf8[0])
utf8_list[n][1] = str(utf8[1])
utf8_list[n] = tuple(utf8)
|
[
"def",
"_utf8_list_to_ascii_tuple",
"(",
"utf8_list",
")",
":",
"for",
"n",
",",
"utf8",
"in",
"enumerate",
"(",
"utf8_list",
")",
":",
"utf8_list",
"[",
"n",
"]",
"[",
"0",
"]",
"=",
"str",
"(",
"utf8",
"[",
"0",
"]",
")",
"utf8_list",
"[",
"n",
"]",
"[",
"1",
"]",
"=",
"str",
"(",
"utf8",
"[",
"1",
"]",
")",
"utf8_list",
"[",
"n",
"]",
"=",
"tuple",
"(",
"utf8",
")"
] |
Convert unicode strings in a list of lists to ascii in a list of tuples.
:param utf8_list: A nested list of unicode strings.
:type utf8_list: list
|
[
"Convert",
"unicode",
"strings",
"in",
"a",
"list",
"of",
"lists",
"to",
"ascii",
"in",
"a",
"list",
"of",
"tuples",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L641-L651
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
JSONReader.load_data
|
def load_data(self, filename, *args, **kwargs):
"""
Load JSON data.
:param filename: name of JSON file with data
:type filename: str
:return: data
:rtype: dict
"""
# append .json extension if needed
if not filename.endswith('.json'):
filename += '.json' # append "json" to filename
# open file and load JSON data
with open(filename, 'r') as fid:
json_data = json.load(fid)
# if JSONReader is the original reader then apply units and return
if (not self.orig_data_reader or
isinstance(self, self.orig_data_reader)):
return self.apply_units_to_cache(json_data['data'])
# last modification since JSON file was saved
utc_mod_time = json_data.get('utc_mod_time')
# instance of original data reader with original parameters
orig_data_reader_obj = self.orig_data_reader(self.parameters, self.meta)
# check if file has been modified since saved as JSON file
if utc_mod_time:
# convert to ordered tuple
utc_mod_time = time.struct_time(utc_mod_time)
orig_filename = filename[:-5] # original filename
# use original file if it's been modified since JSON file saved
if utc_mod_time < time.gmtime(os.path.getmtime(orig_filename)):
os.remove(filename) # delete JSON file
return orig_data_reader_obj.load_data(orig_filename)
# use JSON file if original file hasn't been modified
return orig_data_reader_obj.apply_units_to_cache(json_data['data'])
|
python
|
def load_data(self, filename, *args, **kwargs):
"""
Load JSON data.
:param filename: name of JSON file with data
:type filename: str
:return: data
:rtype: dict
"""
# append .json extension if needed
if not filename.endswith('.json'):
filename += '.json' # append "json" to filename
# open file and load JSON data
with open(filename, 'r') as fid:
json_data = json.load(fid)
# if JSONReader is the original reader then apply units and return
if (not self.orig_data_reader or
isinstance(self, self.orig_data_reader)):
return self.apply_units_to_cache(json_data['data'])
# last modification since JSON file was saved
utc_mod_time = json_data.get('utc_mod_time')
# instance of original data reader with original parameters
orig_data_reader_obj = self.orig_data_reader(self.parameters, self.meta)
# check if file has been modified since saved as JSON file
if utc_mod_time:
# convert to ordered tuple
utc_mod_time = time.struct_time(utc_mod_time)
orig_filename = filename[:-5] # original filename
# use original file if it's been modified since JSON file saved
if utc_mod_time < time.gmtime(os.path.getmtime(orig_filename)):
os.remove(filename) # delete JSON file
return orig_data_reader_obj.load_data(orig_filename)
# use JSON file if original file hasn't been modified
return orig_data_reader_obj.apply_units_to_cache(json_data['data'])
|
[
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# append .json extension if needed",
"if",
"not",
"filename",
".",
"endswith",
"(",
"'.json'",
")",
":",
"filename",
"+=",
"'.json'",
"# append \"json\" to filename",
"# open file and load JSON data",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fid",
":",
"json_data",
"=",
"json",
".",
"load",
"(",
"fid",
")",
"# if JSONReader is the original reader then apply units and return",
"if",
"(",
"not",
"self",
".",
"orig_data_reader",
"or",
"isinstance",
"(",
"self",
",",
"self",
".",
"orig_data_reader",
")",
")",
":",
"return",
"self",
".",
"apply_units_to_cache",
"(",
"json_data",
"[",
"'data'",
"]",
")",
"# last modification since JSON file was saved",
"utc_mod_time",
"=",
"json_data",
".",
"get",
"(",
"'utc_mod_time'",
")",
"# instance of original data reader with original parameters",
"orig_data_reader_obj",
"=",
"self",
".",
"orig_data_reader",
"(",
"self",
".",
"parameters",
",",
"self",
".",
"meta",
")",
"# check if file has been modified since saved as JSON file",
"if",
"utc_mod_time",
":",
"# convert to ordered tuple",
"utc_mod_time",
"=",
"time",
".",
"struct_time",
"(",
"utc_mod_time",
")",
"orig_filename",
"=",
"filename",
"[",
":",
"-",
"5",
"]",
"# original filename",
"# use original file if it's been modified since JSON file saved",
"if",
"utc_mod_time",
"<",
"time",
".",
"gmtime",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"orig_filename",
")",
")",
":",
"os",
".",
"remove",
"(",
"filename",
")",
"# delete JSON file",
"return",
"orig_data_reader_obj",
".",
"load_data",
"(",
"orig_filename",
")",
"# use JSON file if original file hasn't been modified",
"return",
"orig_data_reader_obj",
".",
"apply_units_to_cache",
"(",
"json_data",
"[",
"'data'",
"]",
")"
] |
Load JSON data.
:param filename: name of JSON file with data
:type filename: str
:return: data
:rtype: dict
|
[
"Load",
"JSON",
"data",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L130-L163
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
JSONReader.apply_units_to_cache
|
def apply_units_to_cache(self, data):
"""
Apply units to data read using :class:`JSONReader`.
:param data: cached data
:return: data with units applied
:rtype: :class:`~pint.unit.Quantity`
"""
for k, val in self.parameters.iteritems():
if 'units' in val:
data[k] = Q_(data[k], val.get('units'))
return data
|
python
|
def apply_units_to_cache(self, data):
"""
Apply units to data read using :class:`JSONReader`.
:param data: cached data
:return: data with units applied
:rtype: :class:`~pint.unit.Quantity`
"""
for k, val in self.parameters.iteritems():
if 'units' in val:
data[k] = Q_(data[k], val.get('units'))
return data
|
[
"def",
"apply_units_to_cache",
"(",
"self",
",",
"data",
")",
":",
"for",
"k",
",",
"val",
"in",
"self",
".",
"parameters",
".",
"iteritems",
"(",
")",
":",
"if",
"'units'",
"in",
"val",
":",
"data",
"[",
"k",
"]",
"=",
"Q_",
"(",
"data",
"[",
"k",
"]",
",",
"val",
".",
"get",
"(",
"'units'",
")",
")",
"return",
"data"
] |
Apply units to data read using :class:`JSONReader`.
:param data: cached data
:return: data with units applied
:rtype: :class:`~pint.unit.Quantity`
|
[
"Apply",
"units",
"to",
"data",
"read",
"using",
":",
"class",
":",
"JSONReader",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L165-L176
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
XLRDReader.load_data
|
def load_data(self, filename, *args, **kwargs):
"""
Load parameters from Excel spreadsheet.
:param filename: Name of Excel workbook with data.
:type filename: str
:returns: Data read from Excel workbook.
:rtype: dict
"""
# workbook read from file
workbook = open_workbook(filename, verbosity=True)
data = {} # an empty dictionary to store data
# iterate through sheets in parameters
# iterate through the parameters on each sheet
for param, pval in self.parameters.iteritems():
sheet = pval['extras']['sheet']
# get each worksheet from the workbook
worksheet = workbook.sheet_by_name(sheet)
# split the parameter's range elements
prng0, prng1 = pval['extras']['range']
# missing "units", json ``null`` and Python ``None`` all OK!
# convert to str from unicode, None to '' (dimensionless)
punits = str(pval.get('units') or '')
# replace None with empty list
if prng0 is None:
prng0 = []
if prng1 is None:
prng1 = []
# FIXME: Use duck-typing here instead of type-checking!
# if both elements in range are `int` then parameter is a cell
if isinstance(prng0, int) and isinstance(prng1, int):
datum = worksheet.cell_value(prng0, prng1)
# if the either element is a `list` then parameter is a slice
elif isinstance(prng0, list) and isinstance(prng1, int):
datum = worksheet.col_values(prng1, *prng0)
elif isinstance(prng0, int) and isinstance(prng1, list):
datum = worksheet.row_values(prng0, *prng1)
# if both elements are `list` then parameter is 2-D
else:
datum = []
for col in xrange(prng0[1], prng1[1]):
datum.append(worksheet.col_values(col, prng0[0],
prng1[0]))
# duck typing that datum is real
try:
npdatum = np.array(datum, dtype=np.float)
except ValueError as err:
# check for iterable:
# if `datum` can't be coerced to float, then it must be
# *string* & strings *are* iterables, so don't check!
# check for strings:
# data must be real or *all* strings!
# empty string, None or JSON null also OK
# all([]) == True but any([]) == False
if not datum:
data[param] = None # convert empty to None
elif all(isinstance(_, basestring) for _ in datum):
data[param] = datum # all str is OK (EG all 'TMY')
elif all(not _ for _ in datum):
data[param] = None # convert list of empty to None
else:
raise err # raise ValueError if not all real or str
else:
data[param] = npdatum * UREG(punits)
# FYI: only put one statement into try-except test otherwise
# might catch different error than expected. use ``else`` as
# option to execute only if exception *not* raised.
return data
|
python
|
def load_data(self, filename, *args, **kwargs):
"""
Load parameters from Excel spreadsheet.
:param filename: Name of Excel workbook with data.
:type filename: str
:returns: Data read from Excel workbook.
:rtype: dict
"""
# workbook read from file
workbook = open_workbook(filename, verbosity=True)
data = {} # an empty dictionary to store data
# iterate through sheets in parameters
# iterate through the parameters on each sheet
for param, pval in self.parameters.iteritems():
sheet = pval['extras']['sheet']
# get each worksheet from the workbook
worksheet = workbook.sheet_by_name(sheet)
# split the parameter's range elements
prng0, prng1 = pval['extras']['range']
# missing "units", json ``null`` and Python ``None`` all OK!
# convert to str from unicode, None to '' (dimensionless)
punits = str(pval.get('units') or '')
# replace None with empty list
if prng0 is None:
prng0 = []
if prng1 is None:
prng1 = []
# FIXME: Use duck-typing here instead of type-checking!
# if both elements in range are `int` then parameter is a cell
if isinstance(prng0, int) and isinstance(prng1, int):
datum = worksheet.cell_value(prng0, prng1)
# if the either element is a `list` then parameter is a slice
elif isinstance(prng0, list) and isinstance(prng1, int):
datum = worksheet.col_values(prng1, *prng0)
elif isinstance(prng0, int) and isinstance(prng1, list):
datum = worksheet.row_values(prng0, *prng1)
# if both elements are `list` then parameter is 2-D
else:
datum = []
for col in xrange(prng0[1], prng1[1]):
datum.append(worksheet.col_values(col, prng0[0],
prng1[0]))
# duck typing that datum is real
try:
npdatum = np.array(datum, dtype=np.float)
except ValueError as err:
# check for iterable:
# if `datum` can't be coerced to float, then it must be
# *string* & strings *are* iterables, so don't check!
# check for strings:
# data must be real or *all* strings!
# empty string, None or JSON null also OK
# all([]) == True but any([]) == False
if not datum:
data[param] = None # convert empty to None
elif all(isinstance(_, basestring) for _ in datum):
data[param] = datum # all str is OK (EG all 'TMY')
elif all(not _ for _ in datum):
data[param] = None # convert list of empty to None
else:
raise err # raise ValueError if not all real or str
else:
data[param] = npdatum * UREG(punits)
# FYI: only put one statement into try-except test otherwise
# might catch different error than expected. use ``else`` as
# option to execute only if exception *not* raised.
return data
|
[
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# workbook read from file",
"workbook",
"=",
"open_workbook",
"(",
"filename",
",",
"verbosity",
"=",
"True",
")",
"data",
"=",
"{",
"}",
"# an empty dictionary to store data",
"# iterate through sheets in parameters",
"# iterate through the parameters on each sheet",
"for",
"param",
",",
"pval",
"in",
"self",
".",
"parameters",
".",
"iteritems",
"(",
")",
":",
"sheet",
"=",
"pval",
"[",
"'extras'",
"]",
"[",
"'sheet'",
"]",
"# get each worksheet from the workbook",
"worksheet",
"=",
"workbook",
".",
"sheet_by_name",
"(",
"sheet",
")",
"# split the parameter's range elements",
"prng0",
",",
"prng1",
"=",
"pval",
"[",
"'extras'",
"]",
"[",
"'range'",
"]",
"# missing \"units\", json ``null`` and Python ``None`` all OK!",
"# convert to str from unicode, None to '' (dimensionless)",
"punits",
"=",
"str",
"(",
"pval",
".",
"get",
"(",
"'units'",
")",
"or",
"''",
")",
"# replace None with empty list",
"if",
"prng0",
"is",
"None",
":",
"prng0",
"=",
"[",
"]",
"if",
"prng1",
"is",
"None",
":",
"prng1",
"=",
"[",
"]",
"# FIXME: Use duck-typing here instead of type-checking!",
"# if both elements in range are `int` then parameter is a cell",
"if",
"isinstance",
"(",
"prng0",
",",
"int",
")",
"and",
"isinstance",
"(",
"prng1",
",",
"int",
")",
":",
"datum",
"=",
"worksheet",
".",
"cell_value",
"(",
"prng0",
",",
"prng1",
")",
"# if the either element is a `list` then parameter is a slice",
"elif",
"isinstance",
"(",
"prng0",
",",
"list",
")",
"and",
"isinstance",
"(",
"prng1",
",",
"int",
")",
":",
"datum",
"=",
"worksheet",
".",
"col_values",
"(",
"prng1",
",",
"*",
"prng0",
")",
"elif",
"isinstance",
"(",
"prng0",
",",
"int",
")",
"and",
"isinstance",
"(",
"prng1",
",",
"list",
")",
":",
"datum",
"=",
"worksheet",
".",
"row_values",
"(",
"prng0",
",",
"*",
"prng1",
")",
"# if both elements are `list` then parameter is 2-D",
"else",
":",
"datum",
"=",
"[",
"]",
"for",
"col",
"in",
"xrange",
"(",
"prng0",
"[",
"1",
"]",
",",
"prng1",
"[",
"1",
"]",
")",
":",
"datum",
".",
"append",
"(",
"worksheet",
".",
"col_values",
"(",
"col",
",",
"prng0",
"[",
"0",
"]",
",",
"prng1",
"[",
"0",
"]",
")",
")",
"# duck typing that datum is real",
"try",
":",
"npdatum",
"=",
"np",
".",
"array",
"(",
"datum",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"except",
"ValueError",
"as",
"err",
":",
"# check for iterable:",
"# if `datum` can't be coerced to float, then it must be",
"# *string* & strings *are* iterables, so don't check!",
"# check for strings:",
"# data must be real or *all* strings!",
"# empty string, None or JSON null also OK",
"# all([]) == True but any([]) == False",
"if",
"not",
"datum",
":",
"data",
"[",
"param",
"]",
"=",
"None",
"# convert empty to None",
"elif",
"all",
"(",
"isinstance",
"(",
"_",
",",
"basestring",
")",
"for",
"_",
"in",
"datum",
")",
":",
"data",
"[",
"param",
"]",
"=",
"datum",
"# all str is OK (EG all 'TMY')",
"elif",
"all",
"(",
"not",
"_",
"for",
"_",
"in",
"datum",
")",
":",
"data",
"[",
"param",
"]",
"=",
"None",
"# convert list of empty to None",
"else",
":",
"raise",
"err",
"# raise ValueError if not all real or str",
"else",
":",
"data",
"[",
"param",
"]",
"=",
"npdatum",
"*",
"UREG",
"(",
"punits",
")",
"# FYI: only put one statement into try-except test otherwise",
"# might catch different error than expected. use ``else`` as",
"# option to execute only if exception *not* raised.",
"return",
"data"
] |
Load parameters from Excel spreadsheet.
:param filename: Name of Excel workbook with data.
:type filename: str
:returns: Data read from Excel workbook.
:rtype: dict
|
[
"Load",
"parameters",
"from",
"Excel",
"spreadsheet",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L243-L310
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
XLRDReader.apply_units_to_cache
|
def apply_units_to_cache(self, data):
"""
Apply units to cached data read using :class:`JSONReader`.
:param data: Cached data.
:type data: dict
:return: data with units
"""
# iterate through sheets in parameters
# iterate through the parameters on each sheet
for param, pval in self.parameters.iteritems():
# try to apply units
try:
data[param] *= UREG(str(pval.get('units') or ''))
except TypeError:
continue
return data
|
python
|
def apply_units_to_cache(self, data):
"""
Apply units to cached data read using :class:`JSONReader`.
:param data: Cached data.
:type data: dict
:return: data with units
"""
# iterate through sheets in parameters
# iterate through the parameters on each sheet
for param, pval in self.parameters.iteritems():
# try to apply units
try:
data[param] *= UREG(str(pval.get('units') or ''))
except TypeError:
continue
return data
|
[
"def",
"apply_units_to_cache",
"(",
"self",
",",
"data",
")",
":",
"# iterate through sheets in parameters",
"# iterate through the parameters on each sheet",
"for",
"param",
",",
"pval",
"in",
"self",
".",
"parameters",
".",
"iteritems",
"(",
")",
":",
"# try to apply units",
"try",
":",
"data",
"[",
"param",
"]",
"*=",
"UREG",
"(",
"str",
"(",
"pval",
".",
"get",
"(",
"'units'",
")",
"or",
"''",
")",
")",
"except",
"TypeError",
":",
"continue",
"return",
"data"
] |
Apply units to cached data read using :class:`JSONReader`.
:param data: Cached data.
:type data: dict
:return: data with units
|
[
"Apply",
"units",
"to",
"cached",
"data",
"read",
"using",
":",
"class",
":",
"JSONReader",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L312-L328
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
NumPyLoadTxtReader.load_data
|
def load_data(self, filename, *args, **kwargs):
"""
load data from text file.
:param filename: name of text file to read
:type filename: str
:returns: data read from file using :func:`numpy.loadtxt`
:rtype: dict
"""
# header keys
header_param = self.parameters.get('header') # default is None
# data keys
data_param = self.parameters['data'] # raises KeyError if no 'data'
dtype = data_param['dtype'] # raises KeyError if no 'dtype'
# convert to tuple and normal ASCII
_utf8_list_to_ascii_tuple(dtype) if dtype else None # -> tuple of str
delimiter = data_param.get('delimiter') # default is None
skiprows = data_param.get('skiprows') # default is None
data_units = data_param.get('units', {}) # default is an empty dict
data = {} # a dictionary for data
# open file for reading
with open(filename, 'r') as fid:
# read header
if header_param:
data.update(_read_header(fid, header_param))
fid.seek(0) # move cursor back to beginning
# read data
data_data = np.loadtxt(fid, dtype, delimiter=delimiter,
skiprows=skiprows)
# apply units
data.update(_apply_units(data_data, data_units, fid.name))
return data
|
python
|
def load_data(self, filename, *args, **kwargs):
"""
load data from text file.
:param filename: name of text file to read
:type filename: str
:returns: data read from file using :func:`numpy.loadtxt`
:rtype: dict
"""
# header keys
header_param = self.parameters.get('header') # default is None
# data keys
data_param = self.parameters['data'] # raises KeyError if no 'data'
dtype = data_param['dtype'] # raises KeyError if no 'dtype'
# convert to tuple and normal ASCII
_utf8_list_to_ascii_tuple(dtype) if dtype else None # -> tuple of str
delimiter = data_param.get('delimiter') # default is None
skiprows = data_param.get('skiprows') # default is None
data_units = data_param.get('units', {}) # default is an empty dict
data = {} # a dictionary for data
# open file for reading
with open(filename, 'r') as fid:
# read header
if header_param:
data.update(_read_header(fid, header_param))
fid.seek(0) # move cursor back to beginning
# read data
data_data = np.loadtxt(fid, dtype, delimiter=delimiter,
skiprows=skiprows)
# apply units
data.update(_apply_units(data_data, data_units, fid.name))
return data
|
[
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# header keys",
"header_param",
"=",
"self",
".",
"parameters",
".",
"get",
"(",
"'header'",
")",
"# default is None",
"# data keys",
"data_param",
"=",
"self",
".",
"parameters",
"[",
"'data'",
"]",
"# raises KeyError if no 'data'",
"dtype",
"=",
"data_param",
"[",
"'dtype'",
"]",
"# raises KeyError if no 'dtype'",
"# convert to tuple and normal ASCII",
"_utf8_list_to_ascii_tuple",
"(",
"dtype",
")",
"if",
"dtype",
"else",
"None",
"# -> tuple of str",
"delimiter",
"=",
"data_param",
".",
"get",
"(",
"'delimiter'",
")",
"# default is None",
"skiprows",
"=",
"data_param",
".",
"get",
"(",
"'skiprows'",
")",
"# default is None",
"data_units",
"=",
"data_param",
".",
"get",
"(",
"'units'",
",",
"{",
"}",
")",
"# default is an empty dict",
"data",
"=",
"{",
"}",
"# a dictionary for data",
"# open file for reading",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fid",
":",
"# read header",
"if",
"header_param",
":",
"data",
".",
"update",
"(",
"_read_header",
"(",
"fid",
",",
"header_param",
")",
")",
"fid",
".",
"seek",
"(",
"0",
")",
"# move cursor back to beginning",
"# read data",
"data_data",
"=",
"np",
".",
"loadtxt",
"(",
"fid",
",",
"dtype",
",",
"delimiter",
"=",
"delimiter",
",",
"skiprows",
"=",
"skiprows",
")",
"# apply units",
"data",
".",
"update",
"(",
"_apply_units",
"(",
"data_data",
",",
"data_units",
",",
"fid",
".",
"name",
")",
")",
"return",
"data"
] |
load data from text file.
:param filename: name of text file to read
:type filename: str
:returns: data read from file using :func:`numpy.loadtxt`
:rtype: dict
|
[
"load",
"data",
"from",
"text",
"file",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L371-L402
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
NumPyGenFromTxtReader.load_data
|
def load_data(self, filename, *args, **kwargs):
"""
load data from text file.
:param filename: name of file to read
:type filename: str
:returns: data read from file using :func:`numpy.genfromtxt`
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
"""
# header keys
header_param = self.parameters.get('header') # default is None
# data keys
data_param = self.parameters['data'] # raises KeyError if no 'data'
dtype = data_param.get('dtype') # default is None
# if not None convert to tuple and normal ASCII
_utf8_list_to_ascii_tuple(dtype) if dtype else None # -> tuple of str
delimiter = data_param.get('delimiter') # default is None
skip_header = data_param.get('skip_header') # default is None
usecols = data_param.get('usecols') # default is None
names = data_param.get('names') # default is None
names = [str(_) for _ in names] if names else None # -> str
excludelist = data_param.get('excludelist') # default is None
deletechars = data_param.get('deletechars') # default is None
data_units = data_param.get('units', {}) # default is an empty dict
# either dtype or names must be specified
if not (dtype or names):
raise UnnamedDataError(filename)
data = {} # a dictionary for data
# open file for reading
with open(filename, 'r') as fid:
# read header
if header_param:
data.update(_read_header(fid, header_param))
fid.seek(0) # move cursor back to beginning
# data
data_data = np.genfromtxt(fid, dtype, delimiter=delimiter,
skip_header=skip_header, usecols=usecols,
names=names, excludelist=excludelist,
deletechars=deletechars)
# apply units
data.update(_apply_units(data_data, data_units, fid.name))
return data
|
python
|
def load_data(self, filename, *args, **kwargs):
"""
load data from text file.
:param filename: name of file to read
:type filename: str
:returns: data read from file using :func:`numpy.genfromtxt`
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
"""
# header keys
header_param = self.parameters.get('header') # default is None
# data keys
data_param = self.parameters['data'] # raises KeyError if no 'data'
dtype = data_param.get('dtype') # default is None
# if not None convert to tuple and normal ASCII
_utf8_list_to_ascii_tuple(dtype) if dtype else None # -> tuple of str
delimiter = data_param.get('delimiter') # default is None
skip_header = data_param.get('skip_header') # default is None
usecols = data_param.get('usecols') # default is None
names = data_param.get('names') # default is None
names = [str(_) for _ in names] if names else None # -> str
excludelist = data_param.get('excludelist') # default is None
deletechars = data_param.get('deletechars') # default is None
data_units = data_param.get('units', {}) # default is an empty dict
# either dtype or names must be specified
if not (dtype or names):
raise UnnamedDataError(filename)
data = {} # a dictionary for data
# open file for reading
with open(filename, 'r') as fid:
# read header
if header_param:
data.update(_read_header(fid, header_param))
fid.seek(0) # move cursor back to beginning
# data
data_data = np.genfromtxt(fid, dtype, delimiter=delimiter,
skip_header=skip_header, usecols=usecols,
names=names, excludelist=excludelist,
deletechars=deletechars)
# apply units
data.update(_apply_units(data_data, data_units, fid.name))
return data
|
[
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# header keys",
"header_param",
"=",
"self",
".",
"parameters",
".",
"get",
"(",
"'header'",
")",
"# default is None",
"# data keys",
"data_param",
"=",
"self",
".",
"parameters",
"[",
"'data'",
"]",
"# raises KeyError if no 'data'",
"dtype",
"=",
"data_param",
".",
"get",
"(",
"'dtype'",
")",
"# default is None",
"# if not None convert to tuple and normal ASCII",
"_utf8_list_to_ascii_tuple",
"(",
"dtype",
")",
"if",
"dtype",
"else",
"None",
"# -> tuple of str",
"delimiter",
"=",
"data_param",
".",
"get",
"(",
"'delimiter'",
")",
"# default is None",
"skip_header",
"=",
"data_param",
".",
"get",
"(",
"'skip_header'",
")",
"# default is None",
"usecols",
"=",
"data_param",
".",
"get",
"(",
"'usecols'",
")",
"# default is None",
"names",
"=",
"data_param",
".",
"get",
"(",
"'names'",
")",
"# default is None",
"names",
"=",
"[",
"str",
"(",
"_",
")",
"for",
"_",
"in",
"names",
"]",
"if",
"names",
"else",
"None",
"# -> str",
"excludelist",
"=",
"data_param",
".",
"get",
"(",
"'excludelist'",
")",
"# default is None",
"deletechars",
"=",
"data_param",
".",
"get",
"(",
"'deletechars'",
")",
"# default is None",
"data_units",
"=",
"data_param",
".",
"get",
"(",
"'units'",
",",
"{",
"}",
")",
"# default is an empty dict",
"# either dtype or names must be specified",
"if",
"not",
"(",
"dtype",
"or",
"names",
")",
":",
"raise",
"UnnamedDataError",
"(",
"filename",
")",
"data",
"=",
"{",
"}",
"# a dictionary for data",
"# open file for reading",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fid",
":",
"# read header",
"if",
"header_param",
":",
"data",
".",
"update",
"(",
"_read_header",
"(",
"fid",
",",
"header_param",
")",
")",
"fid",
".",
"seek",
"(",
"0",
")",
"# move cursor back to beginning",
"# data",
"data_data",
"=",
"np",
".",
"genfromtxt",
"(",
"fid",
",",
"dtype",
",",
"delimiter",
"=",
"delimiter",
",",
"skip_header",
"=",
"skip_header",
",",
"usecols",
"=",
"usecols",
",",
"names",
"=",
"names",
",",
"excludelist",
"=",
"excludelist",
",",
"deletechars",
"=",
"deletechars",
")",
"# apply units",
"data",
".",
"update",
"(",
"_apply_units",
"(",
"data_data",
",",
"data_units",
",",
"fid",
".",
"name",
")",
")",
"return",
"data"
] |
load data from text file.
:param filename: name of file to read
:type filename: str
:returns: data read from file using :func:`numpy.genfromtxt`
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
|
[
"load",
"data",
"from",
"text",
"file",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L455-L497
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
ParameterizedXLS.load_data
|
def load_data(self, filename, *args, **kwargs):
"""
Load parameterized data from different sheets.
"""
# load parameterized data
data = super(ParameterizedXLS, self).load_data(filename)
# add parameter to data
parameter_name = self.parameterization['parameter']['name']
parameter_values = self.parameterization['parameter']['values']
parameter_units = str(self.parameterization['parameter']['units'])
data[parameter_name] = parameter_values * UREG(parameter_units)
# number of sheets
num_sheets = len(self.parameterization['parameter']['sheets'])
# parse and concatenate parameterized data
for key in self.parameterization['data']:
units = str(self.parameterization['data'][key].get('units')) or ''
datalist = []
for n in xrange(num_sheets):
k = key + '_' + str(n)
datalist.append(data[k].reshape((1, -1)))
data.pop(k) # remove unused data keys
data[key] = np.concatenate(datalist, axis=0) * UREG(units)
return data
|
python
|
def load_data(self, filename, *args, **kwargs):
"""
Load parameterized data from different sheets.
"""
# load parameterized data
data = super(ParameterizedXLS, self).load_data(filename)
# add parameter to data
parameter_name = self.parameterization['parameter']['name']
parameter_values = self.parameterization['parameter']['values']
parameter_units = str(self.parameterization['parameter']['units'])
data[parameter_name] = parameter_values * UREG(parameter_units)
# number of sheets
num_sheets = len(self.parameterization['parameter']['sheets'])
# parse and concatenate parameterized data
for key in self.parameterization['data']:
units = str(self.parameterization['data'][key].get('units')) or ''
datalist = []
for n in xrange(num_sheets):
k = key + '_' + str(n)
datalist.append(data[k].reshape((1, -1)))
data.pop(k) # remove unused data keys
data[key] = np.concatenate(datalist, axis=0) * UREG(units)
return data
|
[
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# load parameterized data",
"data",
"=",
"super",
"(",
"ParameterizedXLS",
",",
"self",
")",
".",
"load_data",
"(",
"filename",
")",
"# add parameter to data",
"parameter_name",
"=",
"self",
".",
"parameterization",
"[",
"'parameter'",
"]",
"[",
"'name'",
"]",
"parameter_values",
"=",
"self",
".",
"parameterization",
"[",
"'parameter'",
"]",
"[",
"'values'",
"]",
"parameter_units",
"=",
"str",
"(",
"self",
".",
"parameterization",
"[",
"'parameter'",
"]",
"[",
"'units'",
"]",
")",
"data",
"[",
"parameter_name",
"]",
"=",
"parameter_values",
"*",
"UREG",
"(",
"parameter_units",
")",
"# number of sheets",
"num_sheets",
"=",
"len",
"(",
"self",
".",
"parameterization",
"[",
"'parameter'",
"]",
"[",
"'sheets'",
"]",
")",
"# parse and concatenate parameterized data",
"for",
"key",
"in",
"self",
".",
"parameterization",
"[",
"'data'",
"]",
":",
"units",
"=",
"str",
"(",
"self",
".",
"parameterization",
"[",
"'data'",
"]",
"[",
"key",
"]",
".",
"get",
"(",
"'units'",
")",
")",
"or",
"''",
"datalist",
"=",
"[",
"]",
"for",
"n",
"in",
"xrange",
"(",
"num_sheets",
")",
":",
"k",
"=",
"key",
"+",
"'_'",
"+",
"str",
"(",
"n",
")",
"datalist",
".",
"append",
"(",
"data",
"[",
"k",
"]",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
")",
"data",
".",
"pop",
"(",
"k",
")",
"# remove unused data keys",
"data",
"[",
"key",
"]",
"=",
"np",
".",
"concatenate",
"(",
"datalist",
",",
"axis",
"=",
"0",
")",
"*",
"UREG",
"(",
"units",
")",
"return",
"data"
] |
Load parameterized data from different sheets.
|
[
"Load",
"parameterized",
"data",
"from",
"different",
"sheets",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L674-L696
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
ParameterizedXLS.apply_units_to_cache
|
def apply_units_to_cache(self, data):
"""
Apply units to :class:`ParameterizedXLS` data reader.
"""
# parameter
parameter_name = self.parameters['parameter']['name']
parameter_units = str(self.parameters['parameter']['units'])
data[parameter_name] *= UREG(parameter_units)
# data
self.parameters.pop('parameter')
return super(ParameterizedXLS, self).apply_units_to_cache(data)
|
python
|
def apply_units_to_cache(self, data):
"""
Apply units to :class:`ParameterizedXLS` data reader.
"""
# parameter
parameter_name = self.parameters['parameter']['name']
parameter_units = str(self.parameters['parameter']['units'])
data[parameter_name] *= UREG(parameter_units)
# data
self.parameters.pop('parameter')
return super(ParameterizedXLS, self).apply_units_to_cache(data)
|
[
"def",
"apply_units_to_cache",
"(",
"self",
",",
"data",
")",
":",
"# parameter",
"parameter_name",
"=",
"self",
".",
"parameters",
"[",
"'parameter'",
"]",
"[",
"'name'",
"]",
"parameter_units",
"=",
"str",
"(",
"self",
".",
"parameters",
"[",
"'parameter'",
"]",
"[",
"'units'",
"]",
")",
"data",
"[",
"parameter_name",
"]",
"*=",
"UREG",
"(",
"parameter_units",
")",
"# data",
"self",
".",
"parameters",
".",
"pop",
"(",
"'parameter'",
")",
"return",
"super",
"(",
"ParameterizedXLS",
",",
"self",
")",
".",
"apply_units_to_cache",
"(",
"data",
")"
] |
Apply units to :class:`ParameterizedXLS` data reader.
|
[
"Apply",
"units",
"to",
":",
"class",
":",
"ParameterizedXLS",
"data",
"reader",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L698-L708
|
BreakingBytes/simkit
|
simkit/core/data_readers.py
|
MixedTextXLS.load_data
|
def load_data(self, filename, *args, **kwargs):
"""
Load text data from different sheets.
"""
# load text data
data = super(MixedTextXLS, self).load_data(filename)
# iterate through sheets in parameters
for sheet_params in self.parameters.itervalues():
# iterate through the parameters on each sheet
for param, pval in sheet_params.iteritems():
pattern = pval.get('pattern', EFG_PATTERN) # get pattern
re_meth = pval.get('method', 'search') # get re method
# whitelist re methods, getattr could be considered harmful
if re_meth in RE_METH:
re_meth = getattr(re, pval.get('method', 'search'))
else:
msg = 'Only', '"%s", ' * len(RE_METH) % tuple(RE_METH)
msg += 'regex methods are allowed.'
raise AttributeError(msg)
# if not isinstance(data[param], basestring):
# re_meth = lambda p, dp: [re_meth(p, d) for d in dp]
match = re_meth(pattern, data[param]) # get matches
if match:
try:
match = match.groups()
except AttributeError:
match = [m.groups() for m in match]
npdata = np.array(match, dtype=float).squeeze()
data[param] = npdata * UREG(str(pval.get('units') or ''))
else:
raise MixedTextNoMatchError(re_meth, pattern, data[param])
return data
|
python
|
def load_data(self, filename, *args, **kwargs):
"""
Load text data from different sheets.
"""
# load text data
data = super(MixedTextXLS, self).load_data(filename)
# iterate through sheets in parameters
for sheet_params in self.parameters.itervalues():
# iterate through the parameters on each sheet
for param, pval in sheet_params.iteritems():
pattern = pval.get('pattern', EFG_PATTERN) # get pattern
re_meth = pval.get('method', 'search') # get re method
# whitelist re methods, getattr could be considered harmful
if re_meth in RE_METH:
re_meth = getattr(re, pval.get('method', 'search'))
else:
msg = 'Only', '"%s", ' * len(RE_METH) % tuple(RE_METH)
msg += 'regex methods are allowed.'
raise AttributeError(msg)
# if not isinstance(data[param], basestring):
# re_meth = lambda p, dp: [re_meth(p, d) for d in dp]
match = re_meth(pattern, data[param]) # get matches
if match:
try:
match = match.groups()
except AttributeError:
match = [m.groups() for m in match]
npdata = np.array(match, dtype=float).squeeze()
data[param] = npdata * UREG(str(pval.get('units') or ''))
else:
raise MixedTextNoMatchError(re_meth, pattern, data[param])
return data
|
[
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# load text data",
"data",
"=",
"super",
"(",
"MixedTextXLS",
",",
"self",
")",
".",
"load_data",
"(",
"filename",
")",
"# iterate through sheets in parameters",
"for",
"sheet_params",
"in",
"self",
".",
"parameters",
".",
"itervalues",
"(",
")",
":",
"# iterate through the parameters on each sheet",
"for",
"param",
",",
"pval",
"in",
"sheet_params",
".",
"iteritems",
"(",
")",
":",
"pattern",
"=",
"pval",
".",
"get",
"(",
"'pattern'",
",",
"EFG_PATTERN",
")",
"# get pattern",
"re_meth",
"=",
"pval",
".",
"get",
"(",
"'method'",
",",
"'search'",
")",
"# get re method",
"# whitelist re methods, getattr could be considered harmful",
"if",
"re_meth",
"in",
"RE_METH",
":",
"re_meth",
"=",
"getattr",
"(",
"re",
",",
"pval",
".",
"get",
"(",
"'method'",
",",
"'search'",
")",
")",
"else",
":",
"msg",
"=",
"'Only'",
",",
"'\"%s\", '",
"*",
"len",
"(",
"RE_METH",
")",
"%",
"tuple",
"(",
"RE_METH",
")",
"msg",
"+=",
"'regex methods are allowed.'",
"raise",
"AttributeError",
"(",
"msg",
")",
"# if not isinstance(data[param], basestring):",
"# re_meth = lambda p, dp: [re_meth(p, d) for d in dp]",
"match",
"=",
"re_meth",
"(",
"pattern",
",",
"data",
"[",
"param",
"]",
")",
"# get matches",
"if",
"match",
":",
"try",
":",
"match",
"=",
"match",
".",
"groups",
"(",
")",
"except",
"AttributeError",
":",
"match",
"=",
"[",
"m",
".",
"groups",
"(",
")",
"for",
"m",
"in",
"match",
"]",
"npdata",
"=",
"np",
".",
"array",
"(",
"match",
",",
"dtype",
"=",
"float",
")",
".",
"squeeze",
"(",
")",
"data",
"[",
"param",
"]",
"=",
"npdata",
"*",
"UREG",
"(",
"str",
"(",
"pval",
".",
"get",
"(",
"'units'",
")",
"or",
"''",
")",
")",
"else",
":",
"raise",
"MixedTextNoMatchError",
"(",
"re_meth",
",",
"pattern",
",",
"data",
"[",
"param",
"]",
")",
"return",
"data"
] |
Load text data from different sheets.
|
[
"Load",
"text",
"data",
"from",
"different",
"sheets",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L774-L805
|
Yelp/uwsgi_metrics
|
uwsgi_metrics/meter.py
|
Meter.mark
|
def mark(self, n=1):
"""Mark the occurrence of a given number of events."""
self.tick_if_necessary()
self.count += n
self.m1_rate.update(n)
self.m5_rate.update(n)
self.m15_rate.update(n)
|
python
|
def mark(self, n=1):
"""Mark the occurrence of a given number of events."""
self.tick_if_necessary()
self.count += n
self.m1_rate.update(n)
self.m5_rate.update(n)
self.m15_rate.update(n)
|
[
"def",
"mark",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"self",
".",
"tick_if_necessary",
"(",
")",
"self",
".",
"count",
"+=",
"n",
"self",
".",
"m1_rate",
".",
"update",
"(",
"n",
")",
"self",
".",
"m5_rate",
".",
"update",
"(",
"n",
")",
"self",
".",
"m15_rate",
".",
"update",
"(",
"n",
")"
] |
Mark the occurrence of a given number of events.
|
[
"Mark",
"the",
"occurrence",
"of",
"a",
"given",
"number",
"of",
"events",
"."
] |
train
|
https://github.com/Yelp/uwsgi_metrics/blob/534966fd461ff711aecd1e3d4caaafdc23ac33f0/uwsgi_metrics/meter.py#L32-L38
|
BreakingBytes/simkit
|
simkit/core/models.py
|
Model._load
|
def _load(self, layer=None):
"""
Load or update all or part of :attr:`model`.
:param layer: Optionally load only specified layer.
:type layer: str
"""
# open model file for reading and convert JSON object to dictionary
# read and load JSON parameter map file as "parameters"
with open(self.param_file, 'r') as param_file:
file_params = json.load(param_file)
for layer, params in file_params.iteritems():
# update parameters from file
self.parameters[layer] = ModelParameter(**params)
# if layer argument spec'd then only update/load spec'd layer
if not layer or not self.model:
# update/load model if layer not spec'd or if no model exists yet
# TODO: separate model and parameters according to comments in #78
self.model = copy.deepcopy(self.parameters)
else:
# convert non-sequence to tuple
layers = _listify(layer)
# update/load layers
for layer in layers:
self.model[layer] = copy.deepcopy(self.parameters[layer])
|
python
|
def _load(self, layer=None):
"""
Load or update all or part of :attr:`model`.
:param layer: Optionally load only specified layer.
:type layer: str
"""
# open model file for reading and convert JSON object to dictionary
# read and load JSON parameter map file as "parameters"
with open(self.param_file, 'r') as param_file:
file_params = json.load(param_file)
for layer, params in file_params.iteritems():
# update parameters from file
self.parameters[layer] = ModelParameter(**params)
# if layer argument spec'd then only update/load spec'd layer
if not layer or not self.model:
# update/load model if layer not spec'd or if no model exists yet
# TODO: separate model and parameters according to comments in #78
self.model = copy.deepcopy(self.parameters)
else:
# convert non-sequence to tuple
layers = _listify(layer)
# update/load layers
for layer in layers:
self.model[layer] = copy.deepcopy(self.parameters[layer])
|
[
"def",
"_load",
"(",
"self",
",",
"layer",
"=",
"None",
")",
":",
"# open model file for reading and convert JSON object to dictionary",
"# read and load JSON parameter map file as \"parameters\"",
"with",
"open",
"(",
"self",
".",
"param_file",
",",
"'r'",
")",
"as",
"param_file",
":",
"file_params",
"=",
"json",
".",
"load",
"(",
"param_file",
")",
"for",
"layer",
",",
"params",
"in",
"file_params",
".",
"iteritems",
"(",
")",
":",
"# update parameters from file",
"self",
".",
"parameters",
"[",
"layer",
"]",
"=",
"ModelParameter",
"(",
"*",
"*",
"params",
")",
"# if layer argument spec'd then only update/load spec'd layer",
"if",
"not",
"layer",
"or",
"not",
"self",
".",
"model",
":",
"# update/load model if layer not spec'd or if no model exists yet",
"# TODO: separate model and parameters according to comments in #78",
"self",
".",
"model",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"parameters",
")",
"else",
":",
"# convert non-sequence to tuple",
"layers",
"=",
"_listify",
"(",
"layer",
")",
"# update/load layers",
"for",
"layer",
"in",
"layers",
":",
"self",
".",
"model",
"[",
"layer",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"parameters",
"[",
"layer",
"]",
")"
] |
Load or update all or part of :attr:`model`.
:param layer: Optionally load only specified layer.
:type layer: str
|
[
"Load",
"or",
"update",
"all",
"or",
"part",
"of",
":",
"attr",
":",
"model",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L125-L149
|
BreakingBytes/simkit
|
simkit/core/models.py
|
Model._update
|
def _update(self, layer=None):
"""
Update layers in model.
"""
meta = getattr(self, ModelBase._meta_attr)
if not layer:
layers = self.layers
else:
# convert non-sequence to tuple
layers = _listify(layer)
for layer in layers:
# relative path to layer files from model file
path = os.path.abspath(os.path.join(meta.modelpath, layer))
getattr(self, layer).load(path)
|
python
|
def _update(self, layer=None):
"""
Update layers in model.
"""
meta = getattr(self, ModelBase._meta_attr)
if not layer:
layers = self.layers
else:
# convert non-sequence to tuple
layers = _listify(layer)
for layer in layers:
# relative path to layer files from model file
path = os.path.abspath(os.path.join(meta.modelpath, layer))
getattr(self, layer).load(path)
|
[
"def",
"_update",
"(",
"self",
",",
"layer",
"=",
"None",
")",
":",
"meta",
"=",
"getattr",
"(",
"self",
",",
"ModelBase",
".",
"_meta_attr",
")",
"if",
"not",
"layer",
":",
"layers",
"=",
"self",
".",
"layers",
"else",
":",
"# convert non-sequence to tuple",
"layers",
"=",
"_listify",
"(",
"layer",
")",
"for",
"layer",
"in",
"layers",
":",
"# relative path to layer files from model file",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"meta",
".",
"modelpath",
",",
"layer",
")",
")",
"getattr",
"(",
"self",
",",
"layer",
")",
".",
"load",
"(",
"path",
")"
] |
Update layers in model.
|
[
"Update",
"layers",
"in",
"model",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L151-L164
|
BreakingBytes/simkit
|
simkit/core/models.py
|
Model._initialize
|
def _initialize(self):
"""
Initialize model and layers.
"""
meta = getattr(self, ModelBase._meta_attr)
# read modelfile, convert JSON and load/update model
if self.param_file is not None:
self._load()
LOGGER.debug('model:\n%r', self.model)
# initialize layers
# FIXME: move import inside loop for custom layers in different modules
mod = importlib.import_module(meta.layers_mod, meta.layers_pkg)
src_model = {}
for layer, value in self.model.iteritems():
# from layers module get the layer's class definition
layer_cls = getattr(mod, meta.layer_cls_names[layer]) # class def
self.layers[layer] = layer_cls # add layer class def to model
# check if model layers are classes
src_value = {} # layer value generated from source classes
for src in value['sources']:
# check if source has keyword arguments
try:
src, kwargs = src
except (TypeError, ValueError):
kwargs = {} # no key work arguments
# skip if not a source class
if isinstance(src, basestring):
continue
# generate layer value from source class
src_value[src.__name__] = {'module': src.__module__,
'package': None}
# update layer keyword arguments
src_value[src.__name__].update(kwargs)
# use layer values generated from source class
if src_value:
value = src_model[layer] = src_value
else:
srcmod, srcpkg = value.get('module'), value.get('package')
try:
value = dict(value['sources'])
except ValueError:
value = dict.fromkeys(value['sources'], {})
for src in value.viewkeys():
if srcmod is not None:
value[src]['module'] = srcmod
if srcpkg is not None:
value[src]['package'] = srcpkg
# set layer attribute with model data
setattr(self, layer, layer_cls(value))
# update model with layer values generated from source classes
if src_model:
self.model.update(src_model)
self._update()
self._state = 'initialized'
|
python
|
def _initialize(self):
"""
Initialize model and layers.
"""
meta = getattr(self, ModelBase._meta_attr)
# read modelfile, convert JSON and load/update model
if self.param_file is not None:
self._load()
LOGGER.debug('model:\n%r', self.model)
# initialize layers
# FIXME: move import inside loop for custom layers in different modules
mod = importlib.import_module(meta.layers_mod, meta.layers_pkg)
src_model = {}
for layer, value in self.model.iteritems():
# from layers module get the layer's class definition
layer_cls = getattr(mod, meta.layer_cls_names[layer]) # class def
self.layers[layer] = layer_cls # add layer class def to model
# check if model layers are classes
src_value = {} # layer value generated from source classes
for src in value['sources']:
# check if source has keyword arguments
try:
src, kwargs = src
except (TypeError, ValueError):
kwargs = {} # no key work arguments
# skip if not a source class
if isinstance(src, basestring):
continue
# generate layer value from source class
src_value[src.__name__] = {'module': src.__module__,
'package': None}
# update layer keyword arguments
src_value[src.__name__].update(kwargs)
# use layer values generated from source class
if src_value:
value = src_model[layer] = src_value
else:
srcmod, srcpkg = value.get('module'), value.get('package')
try:
value = dict(value['sources'])
except ValueError:
value = dict.fromkeys(value['sources'], {})
for src in value.viewkeys():
if srcmod is not None:
value[src]['module'] = srcmod
if srcpkg is not None:
value[src]['package'] = srcpkg
# set layer attribute with model data
setattr(self, layer, layer_cls(value))
# update model with layer values generated from source classes
if src_model:
self.model.update(src_model)
self._update()
self._state = 'initialized'
|
[
"def",
"_initialize",
"(",
"self",
")",
":",
"meta",
"=",
"getattr",
"(",
"self",
",",
"ModelBase",
".",
"_meta_attr",
")",
"# read modelfile, convert JSON and load/update model",
"if",
"self",
".",
"param_file",
"is",
"not",
"None",
":",
"self",
".",
"_load",
"(",
")",
"LOGGER",
".",
"debug",
"(",
"'model:\\n%r'",
",",
"self",
".",
"model",
")",
"# initialize layers",
"# FIXME: move import inside loop for custom layers in different modules",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"meta",
".",
"layers_mod",
",",
"meta",
".",
"layers_pkg",
")",
"src_model",
"=",
"{",
"}",
"for",
"layer",
",",
"value",
"in",
"self",
".",
"model",
".",
"iteritems",
"(",
")",
":",
"# from layers module get the layer's class definition",
"layer_cls",
"=",
"getattr",
"(",
"mod",
",",
"meta",
".",
"layer_cls_names",
"[",
"layer",
"]",
")",
"# class def",
"self",
".",
"layers",
"[",
"layer",
"]",
"=",
"layer_cls",
"# add layer class def to model",
"# check if model layers are classes",
"src_value",
"=",
"{",
"}",
"# layer value generated from source classes",
"for",
"src",
"in",
"value",
"[",
"'sources'",
"]",
":",
"# check if source has keyword arguments",
"try",
":",
"src",
",",
"kwargs",
"=",
"src",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"kwargs",
"=",
"{",
"}",
"# no key work arguments",
"# skip if not a source class",
"if",
"isinstance",
"(",
"src",
",",
"basestring",
")",
":",
"continue",
"# generate layer value from source class",
"src_value",
"[",
"src",
".",
"__name__",
"]",
"=",
"{",
"'module'",
":",
"src",
".",
"__module__",
",",
"'package'",
":",
"None",
"}",
"# update layer keyword arguments",
"src_value",
"[",
"src",
".",
"__name__",
"]",
".",
"update",
"(",
"kwargs",
")",
"# use layer values generated from source class",
"if",
"src_value",
":",
"value",
"=",
"src_model",
"[",
"layer",
"]",
"=",
"src_value",
"else",
":",
"srcmod",
",",
"srcpkg",
"=",
"value",
".",
"get",
"(",
"'module'",
")",
",",
"value",
".",
"get",
"(",
"'package'",
")",
"try",
":",
"value",
"=",
"dict",
"(",
"value",
"[",
"'sources'",
"]",
")",
"except",
"ValueError",
":",
"value",
"=",
"dict",
".",
"fromkeys",
"(",
"value",
"[",
"'sources'",
"]",
",",
"{",
"}",
")",
"for",
"src",
"in",
"value",
".",
"viewkeys",
"(",
")",
":",
"if",
"srcmod",
"is",
"not",
"None",
":",
"value",
"[",
"src",
"]",
"[",
"'module'",
"]",
"=",
"srcmod",
"if",
"srcpkg",
"is",
"not",
"None",
":",
"value",
"[",
"src",
"]",
"[",
"'package'",
"]",
"=",
"srcpkg",
"# set layer attribute with model data",
"setattr",
"(",
"self",
",",
"layer",
",",
"layer_cls",
"(",
"value",
")",
")",
"# update model with layer values generated from source classes",
"if",
"src_model",
":",
"self",
".",
"model",
".",
"update",
"(",
"src_model",
")",
"self",
".",
"_update",
"(",
")",
"self",
".",
"_state",
"=",
"'initialized'"
] |
Initialize model and layers.
|
[
"Initialize",
"model",
"and",
"layers",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L166-L219
|
BreakingBytes/simkit
|
simkit/core/models.py
|
Model.load
|
def load(self, modelfile, layer=None):
"""
Load or update a model or layers in a model.
:param modelfile: The name of the json file to load.
:type modelfile: str
:param layer: Optionally load only specified layer.
:type layer: str
"""
# read modelfile, convert JSON and load/update model
self.param_file = modelfile
self._load(layer)
self._update(layer)
|
python
|
def load(self, modelfile, layer=None):
"""
Load or update a model or layers in a model.
:param modelfile: The name of the json file to load.
:type modelfile: str
:param layer: Optionally load only specified layer.
:type layer: str
"""
# read modelfile, convert JSON and load/update model
self.param_file = modelfile
self._load(layer)
self._update(layer)
|
[
"def",
"load",
"(",
"self",
",",
"modelfile",
",",
"layer",
"=",
"None",
")",
":",
"# read modelfile, convert JSON and load/update model",
"self",
".",
"param_file",
"=",
"modelfile",
"self",
".",
"_load",
"(",
"layer",
")",
"self",
".",
"_update",
"(",
"layer",
")"
] |
Load or update a model or layers in a model.
:param modelfile: The name of the json file to load.
:type modelfile: str
:param layer: Optionally load only specified layer.
:type layer: str
|
[
"Load",
"or",
"update",
"a",
"model",
"or",
"layers",
"in",
"a",
"model",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L221-L233
|
BreakingBytes/simkit
|
simkit/core/models.py
|
Model.edit
|
def edit(self, layer, item, delete=False):
"""
Edit model.
:param layer: Layer of model to edit
:type layer: str
:param item: Items to edit.
:type item: dict
:param delete: Flag to return
:class:`~simkit.core.layers.Layer` to delete item.
:type delete: bool
"""
# get layer attribute with model data
if hasattr(self, layer):
layer_obj = getattr(self, layer)
else:
raise AttributeError('missing layer: %s', layer)
if delete:
return layer_obj
# iterate over items and edit layer
for k, v in item.iteritems():
if k in layer_obj.layer:
layer_obj.edit(k, v) # edit layer
else:
raise AttributeError('missing layer item: %s', k)
# update model data
if k in self.model[layer]:
self.model[layer][k].update(v)
else:
raise AttributeError('missing model layer item: %s', k)
|
python
|
def edit(self, layer, item, delete=False):
"""
Edit model.
:param layer: Layer of model to edit
:type layer: str
:param item: Items to edit.
:type item: dict
:param delete: Flag to return
:class:`~simkit.core.layers.Layer` to delete item.
:type delete: bool
"""
# get layer attribute with model data
if hasattr(self, layer):
layer_obj = getattr(self, layer)
else:
raise AttributeError('missing layer: %s', layer)
if delete:
return layer_obj
# iterate over items and edit layer
for k, v in item.iteritems():
if k in layer_obj.layer:
layer_obj.edit(k, v) # edit layer
else:
raise AttributeError('missing layer item: %s', k)
# update model data
if k in self.model[layer]:
self.model[layer][k].update(v)
else:
raise AttributeError('missing model layer item: %s', k)
|
[
"def",
"edit",
"(",
"self",
",",
"layer",
",",
"item",
",",
"delete",
"=",
"False",
")",
":",
"# get layer attribute with model data",
"if",
"hasattr",
"(",
"self",
",",
"layer",
")",
":",
"layer_obj",
"=",
"getattr",
"(",
"self",
",",
"layer",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"'missing layer: %s'",
",",
"layer",
")",
"if",
"delete",
":",
"return",
"layer_obj",
"# iterate over items and edit layer",
"for",
"k",
",",
"v",
"in",
"item",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
"in",
"layer_obj",
".",
"layer",
":",
"layer_obj",
".",
"edit",
"(",
"k",
",",
"v",
")",
"# edit layer",
"else",
":",
"raise",
"AttributeError",
"(",
"'missing layer item: %s'",
",",
"k",
")",
"# update model data",
"if",
"k",
"in",
"self",
".",
"model",
"[",
"layer",
"]",
":",
"self",
".",
"model",
"[",
"layer",
"]",
"[",
"k",
"]",
".",
"update",
"(",
"v",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"'missing model layer item: %s'",
",",
"k",
")"
] |
Edit model.
:param layer: Layer of model to edit
:type layer: str
:param item: Items to edit.
:type item: dict
:param delete: Flag to return
:class:`~simkit.core.layers.Layer` to delete item.
:type delete: bool
|
[
"Edit",
"model",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L235-L264
|
BreakingBytes/simkit
|
simkit/core/models.py
|
Model.add
|
def add(self, layer, items):
"""
Add items in model.
"""
for k in items.iterkeys():
if k in self.model[layer]:
raise Exception('item %s is already in layer %s' % (k, layer))
self.model[layer].update(items)
# this should also update Layer.layer, the layer data
# same as calling layer constructor
# so now just need to add items to the layer
for k, v in items.iteritems():
getattr(self, layer).add(k, v['module'], v.get('package'))
|
python
|
def add(self, layer, items):
"""
Add items in model.
"""
for k in items.iterkeys():
if k in self.model[layer]:
raise Exception('item %s is already in layer %s' % (k, layer))
self.model[layer].update(items)
# this should also update Layer.layer, the layer data
# same as calling layer constructor
# so now just need to add items to the layer
for k, v in items.iteritems():
getattr(self, layer).add(k, v['module'], v.get('package'))
|
[
"def",
"add",
"(",
"self",
",",
"layer",
",",
"items",
")",
":",
"for",
"k",
"in",
"items",
".",
"iterkeys",
"(",
")",
":",
"if",
"k",
"in",
"self",
".",
"model",
"[",
"layer",
"]",
":",
"raise",
"Exception",
"(",
"'item %s is already in layer %s'",
"%",
"(",
"k",
",",
"layer",
")",
")",
"self",
".",
"model",
"[",
"layer",
"]",
".",
"update",
"(",
"items",
")",
"# this should also update Layer.layer, the layer data",
"# same as calling layer constructor",
"# so now just need to add items to the layer",
"for",
"k",
",",
"v",
"in",
"items",
".",
"iteritems",
"(",
")",
":",
"getattr",
"(",
"self",
",",
"layer",
")",
".",
"add",
"(",
"k",
",",
"v",
"[",
"'module'",
"]",
",",
"v",
".",
"get",
"(",
"'package'",
")",
")"
] |
Add items in model.
|
[
"Add",
"items",
"in",
"model",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L266-L278
|
BreakingBytes/simkit
|
simkit/core/models.py
|
Model.delete
|
def delete(self, layer, items):
"""
Delete items in model.
"""
# Use edit to get the layer obj containing item
items = _listify(items) # make items a list if it's not
layer_obj = self.edit(layer, dict.fromkeys(items), delete=True)
for k in items:
if k in layer_obj.layer:
layer_obj.delete(k)
else:
raise AttributeError('item %s missing from layer %s' %
(k, layer))
|
python
|
def delete(self, layer, items):
"""
Delete items in model.
"""
# Use edit to get the layer obj containing item
items = _listify(items) # make items a list if it's not
layer_obj = self.edit(layer, dict.fromkeys(items), delete=True)
for k in items:
if k in layer_obj.layer:
layer_obj.delete(k)
else:
raise AttributeError('item %s missing from layer %s' %
(k, layer))
|
[
"def",
"delete",
"(",
"self",
",",
"layer",
",",
"items",
")",
":",
"# Use edit to get the layer obj containing item",
"items",
"=",
"_listify",
"(",
"items",
")",
"# make items a list if it's not",
"layer_obj",
"=",
"self",
".",
"edit",
"(",
"layer",
",",
"dict",
".",
"fromkeys",
"(",
"items",
")",
",",
"delete",
"=",
"True",
")",
"for",
"k",
"in",
"items",
":",
"if",
"k",
"in",
"layer_obj",
".",
"layer",
":",
"layer_obj",
".",
"delete",
"(",
"k",
")",
"else",
":",
"raise",
"AttributeError",
"(",
"'item %s missing from layer %s'",
"%",
"(",
"k",
",",
"layer",
")",
")"
] |
Delete items in model.
|
[
"Delete",
"items",
"in",
"model",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L280-L292
|
BreakingBytes/simkit
|
simkit/core/models.py
|
Model.save
|
def save(self, modelfile, layer=None):
"""
Save a model file.
:param modelfile: The name of the json file to save.
:type modelfile: str
:param layer: Optionally save only specified layer.
:type layer: str
"""
if layer:
obj = {layer: self.model[layer]}
else:
obj = self.model
with open(modelfile, 'w') as fp:
json.dump(obj, fp, indent=2, sort_keys=True)
|
python
|
def save(self, modelfile, layer=None):
"""
Save a model file.
:param modelfile: The name of the json file to save.
:type modelfile: str
:param layer: Optionally save only specified layer.
:type layer: str
"""
if layer:
obj = {layer: self.model[layer]}
else:
obj = self.model
with open(modelfile, 'w') as fp:
json.dump(obj, fp, indent=2, sort_keys=True)
|
[
"def",
"save",
"(",
"self",
",",
"modelfile",
",",
"layer",
"=",
"None",
")",
":",
"if",
"layer",
":",
"obj",
"=",
"{",
"layer",
":",
"self",
".",
"model",
"[",
"layer",
"]",
"}",
"else",
":",
"obj",
"=",
"self",
".",
"model",
"with",
"open",
"(",
"modelfile",
",",
"'w'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"obj",
",",
"fp",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
")"
] |
Save a model file.
:param modelfile: The name of the json file to save.
:type modelfile: str
:param layer: Optionally save only specified layer.
:type layer: str
|
[
"Save",
"a",
"model",
"file",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L301-L315
|
BreakingBytes/simkit
|
simkit/core/models.py
|
Model.command
|
def command(self, cmd, progress_hook=None, *args, **kwargs):
"""
Execute a model command.
:param cmd: Name of the command.
:param progress_hook: A function to which progress updates are passed.
"""
cmds = cmd.split(None, 1) # split commands and simulations
sim_names = cmds[1:] # simulations
if not sim_names:
sim_names = self.cmd_layer.reg.iterkeys()
for sim_name in sim_names:
sim_cmd = getattr(self.cmd_layer.reg[sim_name], cmd)
sim_cmd(self, progress_hook=progress_hook, *args, **kwargs)
|
python
|
def command(self, cmd, progress_hook=None, *args, **kwargs):
"""
Execute a model command.
:param cmd: Name of the command.
:param progress_hook: A function to which progress updates are passed.
"""
cmds = cmd.split(None, 1) # split commands and simulations
sim_names = cmds[1:] # simulations
if not sim_names:
sim_names = self.cmd_layer.reg.iterkeys()
for sim_name in sim_names:
sim_cmd = getattr(self.cmd_layer.reg[sim_name], cmd)
sim_cmd(self, progress_hook=progress_hook, *args, **kwargs)
|
[
"def",
"command",
"(",
"self",
",",
"cmd",
",",
"progress_hook",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cmds",
"=",
"cmd",
".",
"split",
"(",
"None",
",",
"1",
")",
"# split commands and simulations",
"sim_names",
"=",
"cmds",
"[",
"1",
":",
"]",
"# simulations",
"if",
"not",
"sim_names",
":",
"sim_names",
"=",
"self",
".",
"cmd_layer",
".",
"reg",
".",
"iterkeys",
"(",
")",
"for",
"sim_name",
"in",
"sim_names",
":",
"sim_cmd",
"=",
"getattr",
"(",
"self",
".",
"cmd_layer",
".",
"reg",
"[",
"sim_name",
"]",
",",
"cmd",
")",
"sim_cmd",
"(",
"self",
",",
"progress_hook",
"=",
"progress_hook",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Execute a model command.
:param cmd: Name of the command.
:param progress_hook: A function to which progress updates are passed.
|
[
"Execute",
"a",
"model",
"command",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L331-L344
|
volafiled/python-volapi
|
volapi/user.py
|
User.login
|
def login(self, password):
"""Attempts to log in as the current user with given password"""
if self.logged_in:
raise RuntimeError("User already logged in!")
params = {"name": self.nick, "password": password}
resp = self.conn.make_api_call("login", params)
if "error" in resp:
raise RuntimeError(
f"Login failed: {resp['error'].get('message') or resp['error']}"
)
self.session = resp["session"]
self.conn.make_call("useSession", self.session)
self.conn.cookies.update({"session": self.session})
self.logged_in = True
return True
|
python
|
def login(self, password):
"""Attempts to log in as the current user with given password"""
if self.logged_in:
raise RuntimeError("User already logged in!")
params = {"name": self.nick, "password": password}
resp = self.conn.make_api_call("login", params)
if "error" in resp:
raise RuntimeError(
f"Login failed: {resp['error'].get('message') or resp['error']}"
)
self.session = resp["session"]
self.conn.make_call("useSession", self.session)
self.conn.cookies.update({"session": self.session})
self.logged_in = True
return True
|
[
"def",
"login",
"(",
"self",
",",
"password",
")",
":",
"if",
"self",
".",
"logged_in",
":",
"raise",
"RuntimeError",
"(",
"\"User already logged in!\"",
")",
"params",
"=",
"{",
"\"name\"",
":",
"self",
".",
"nick",
",",
"\"password\"",
":",
"password",
"}",
"resp",
"=",
"self",
".",
"conn",
".",
"make_api_call",
"(",
"\"login\"",
",",
"params",
")",
"if",
"\"error\"",
"in",
"resp",
":",
"raise",
"RuntimeError",
"(",
"f\"Login failed: {resp['error'].get('message') or resp['error']}\"",
")",
"self",
".",
"session",
"=",
"resp",
"[",
"\"session\"",
"]",
"self",
".",
"conn",
".",
"make_call",
"(",
"\"useSession\"",
",",
"self",
".",
"session",
")",
"self",
".",
"conn",
".",
"cookies",
".",
"update",
"(",
"{",
"\"session\"",
":",
"self",
".",
"session",
"}",
")",
"self",
".",
"logged_in",
"=",
"True",
"return",
"True"
] |
Attempts to log in as the current user with given password
|
[
"Attempts",
"to",
"log",
"in",
"as",
"the",
"current",
"user",
"with",
"given",
"password"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L18-L34
|
volafiled/python-volapi
|
volapi/user.py
|
User.login_transplant
|
def login_transplant(self, other):
"""Attempts to carry over the login state from another room"""
if not other.logged_in:
raise ValueError("Other room is not logged in")
cookie = other.session
if not cookie:
raise ValueError("Other room has no cookie")
self.conn.cookies.update({"session": cookie})
self.session = cookie
self.logged_in = True
return True
|
python
|
def login_transplant(self, other):
"""Attempts to carry over the login state from another room"""
if not other.logged_in:
raise ValueError("Other room is not logged in")
cookie = other.session
if not cookie:
raise ValueError("Other room has no cookie")
self.conn.cookies.update({"session": cookie})
self.session = cookie
self.logged_in = True
return True
|
[
"def",
"login_transplant",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"other",
".",
"logged_in",
":",
"raise",
"ValueError",
"(",
"\"Other room is not logged in\"",
")",
"cookie",
"=",
"other",
".",
"session",
"if",
"not",
"cookie",
":",
"raise",
"ValueError",
"(",
"\"Other room has no cookie\"",
")",
"self",
".",
"conn",
".",
"cookies",
".",
"update",
"(",
"{",
"\"session\"",
":",
"cookie",
"}",
")",
"self",
".",
"session",
"=",
"cookie",
"self",
".",
"logged_in",
"=",
"True",
"return",
"True"
] |
Attempts to carry over the login state from another room
|
[
"Attempts",
"to",
"carry",
"over",
"the",
"login",
"state",
"from",
"another",
"room"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L36-L47
|
volafiled/python-volapi
|
volapi/user.py
|
User.logout
|
def logout(self):
"""Logs your user out"""
if not self.logged_in:
raise RuntimeError("User is not logged in")
if self.conn.connected:
params = {"room": self.conn.room.room_id}
resp = self.conn.make_api_call("logout", params)
if not resp.get("success", False):
raise RuntimeError(
f"Logout unsuccessful: "
f"{resp['error'].get('message') or resp['error']}"
)
self.conn.make_call("logout", params)
self.conn.cookies.pop("session")
self.logged_in = False
|
python
|
def logout(self):
"""Logs your user out"""
if not self.logged_in:
raise RuntimeError("User is not logged in")
if self.conn.connected:
params = {"room": self.conn.room.room_id}
resp = self.conn.make_api_call("logout", params)
if not resp.get("success", False):
raise RuntimeError(
f"Logout unsuccessful: "
f"{resp['error'].get('message') or resp['error']}"
)
self.conn.make_call("logout", params)
self.conn.cookies.pop("session")
self.logged_in = False
|
[
"def",
"logout",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"logged_in",
":",
"raise",
"RuntimeError",
"(",
"\"User is not logged in\"",
")",
"if",
"self",
".",
"conn",
".",
"connected",
":",
"params",
"=",
"{",
"\"room\"",
":",
"self",
".",
"conn",
".",
"room",
".",
"room_id",
"}",
"resp",
"=",
"self",
".",
"conn",
".",
"make_api_call",
"(",
"\"logout\"",
",",
"params",
")",
"if",
"not",
"resp",
".",
"get",
"(",
"\"success\"",
",",
"False",
")",
":",
"raise",
"RuntimeError",
"(",
"f\"Logout unsuccessful: \"",
"f\"{resp['error'].get('message') or resp['error']}\"",
")",
"self",
".",
"conn",
".",
"make_call",
"(",
"\"logout\"",
",",
"params",
")",
"self",
".",
"conn",
".",
"cookies",
".",
"pop",
"(",
"\"session\"",
")",
"self",
".",
"logged_in",
"=",
"False"
] |
Logs your user out
|
[
"Logs",
"your",
"user",
"out"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L49-L64
|
volafiled/python-volapi
|
volapi/user.py
|
User.change_nick
|
def change_nick(self, new_nick):
"""Change the name of your user
Note: Must be logged out to change nick"""
if self.logged_in:
raise RuntimeError("User must be logged out")
self.__verify_username(new_nick)
self.conn.make_call("command", self.nick, "nick", new_nick)
self.nick = new_nick
|
python
|
def change_nick(self, new_nick):
"""Change the name of your user
Note: Must be logged out to change nick"""
if self.logged_in:
raise RuntimeError("User must be logged out")
self.__verify_username(new_nick)
self.conn.make_call("command", self.nick, "nick", new_nick)
self.nick = new_nick
|
[
"def",
"change_nick",
"(",
"self",
",",
"new_nick",
")",
":",
"if",
"self",
".",
"logged_in",
":",
"raise",
"RuntimeError",
"(",
"\"User must be logged out\"",
")",
"self",
".",
"__verify_username",
"(",
"new_nick",
")",
"self",
".",
"conn",
".",
"make_call",
"(",
"\"command\"",
",",
"self",
".",
"nick",
",",
"\"nick\"",
",",
"new_nick",
")",
"self",
".",
"nick",
"=",
"new_nick"
] |
Change the name of your user
Note: Must be logged out to change nick
|
[
"Change",
"the",
"name",
"of",
"your",
"user",
"Note",
":",
"Must",
"be",
"logged",
"out",
"to",
"change",
"nick"
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L66-L75
|
volafiled/python-volapi
|
volapi/user.py
|
User.register
|
def register(self, password):
"""Registers the current user with the given password."""
if len(password) < 8:
raise ValueError("Password must be at least 8 characters.")
params = {"name": self.nick, "password": password}
resp = self.conn.make_api_call("register", params)
if "error" in resp:
raise RuntimeError(f"{resp['error'].get('message') or resp['error']}")
self.conn.make_call("useSession", resp["session"])
self.conn.cookies.update({"session": resp["session"]})
self.logged_in = True
|
python
|
def register(self, password):
"""Registers the current user with the given password."""
if len(password) < 8:
raise ValueError("Password must be at least 8 characters.")
params = {"name": self.nick, "password": password}
resp = self.conn.make_api_call("register", params)
if "error" in resp:
raise RuntimeError(f"{resp['error'].get('message') or resp['error']}")
self.conn.make_call("useSession", resp["session"])
self.conn.cookies.update({"session": resp["session"]})
self.logged_in = True
|
[
"def",
"register",
"(",
"self",
",",
"password",
")",
":",
"if",
"len",
"(",
"password",
")",
"<",
"8",
":",
"raise",
"ValueError",
"(",
"\"Password must be at least 8 characters.\"",
")",
"params",
"=",
"{",
"\"name\"",
":",
"self",
".",
"nick",
",",
"\"password\"",
":",
"password",
"}",
"resp",
"=",
"self",
".",
"conn",
".",
"make_api_call",
"(",
"\"register\"",
",",
"params",
")",
"if",
"\"error\"",
"in",
"resp",
":",
"raise",
"RuntimeError",
"(",
"f\"{resp['error'].get('message') or resp['error']}\"",
")",
"self",
".",
"conn",
".",
"make_call",
"(",
"\"useSession\"",
",",
"resp",
"[",
"\"session\"",
"]",
")",
"self",
".",
"conn",
".",
"cookies",
".",
"update",
"(",
"{",
"\"session\"",
":",
"resp",
"[",
"\"session\"",
"]",
"}",
")",
"self",
".",
"logged_in",
"=",
"True"
] |
Registers the current user with the given password.
|
[
"Registers",
"the",
"current",
"user",
"with",
"the",
"given",
"password",
"."
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L77-L91
|
volafiled/python-volapi
|
volapi/user.py
|
User.__verify_username
|
def __verify_username(self, username):
"""Raises an exception if the given username is not valid."""
if len(username) > self.__max_length or len(username) < 3:
raise ValueError(
f"Username must be between 3 and {self.__max_length} characters."
)
if any(c not in string.ascii_letters + string.digits for c in username):
raise ValueError("Usernames can only contain alphanumeric characters.")
|
python
|
def __verify_username(self, username):
"""Raises an exception if the given username is not valid."""
if len(username) > self.__max_length or len(username) < 3:
raise ValueError(
f"Username must be between 3 and {self.__max_length} characters."
)
if any(c not in string.ascii_letters + string.digits for c in username):
raise ValueError("Usernames can only contain alphanumeric characters.")
|
[
"def",
"__verify_username",
"(",
"self",
",",
"username",
")",
":",
"if",
"len",
"(",
"username",
")",
">",
"self",
".",
"__max_length",
"or",
"len",
"(",
"username",
")",
"<",
"3",
":",
"raise",
"ValueError",
"(",
"f\"Username must be between 3 and {self.__max_length} characters.\"",
")",
"if",
"any",
"(",
"c",
"not",
"in",
"string",
".",
"ascii_letters",
"+",
"string",
".",
"digits",
"for",
"c",
"in",
"username",
")",
":",
"raise",
"ValueError",
"(",
"\"Usernames can only contain alphanumeric characters.\"",
")"
] |
Raises an exception if the given username is not valid.
|
[
"Raises",
"an",
"exception",
"if",
"the",
"given",
"username",
"is",
"not",
"valid",
"."
] |
train
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/user.py#L93-L101
|
Yelp/uwsgi_metrics
|
uwsgi_metrics/reservoir.py
|
Reservoir.update
|
def update(self, value, timestamp=None):
"""Add a value to the reservoir.
:param value: the value to be added
:param timestamp: the epoch timestamp of the value in seconds, defaults
to the current timestamp if not specified.
"""
if timestamp is None:
timestamp = self.current_time_in_fractional_seconds()
self.rescale_if_needed()
priority = self.weight(timestamp - self.start_time) / random.random()
self.values[priority] = value
if len(self.values) > self.size:
self.values.remove_min()
|
python
|
def update(self, value, timestamp=None):
"""Add a value to the reservoir.
:param value: the value to be added
:param timestamp: the epoch timestamp of the value in seconds, defaults
to the current timestamp if not specified.
"""
if timestamp is None:
timestamp = self.current_time_in_fractional_seconds()
self.rescale_if_needed()
priority = self.weight(timestamp - self.start_time) / random.random()
self.values[priority] = value
if len(self.values) > self.size:
self.values.remove_min()
|
[
"def",
"update",
"(",
"self",
",",
"value",
",",
"timestamp",
"=",
"None",
")",
":",
"if",
"timestamp",
"is",
"None",
":",
"timestamp",
"=",
"self",
".",
"current_time_in_fractional_seconds",
"(",
")",
"self",
".",
"rescale_if_needed",
"(",
")",
"priority",
"=",
"self",
".",
"weight",
"(",
"timestamp",
"-",
"self",
".",
"start_time",
")",
"/",
"random",
".",
"random",
"(",
")",
"self",
".",
"values",
"[",
"priority",
"]",
"=",
"value",
"if",
"len",
"(",
"self",
".",
"values",
")",
">",
"self",
".",
"size",
":",
"self",
".",
"values",
".",
"remove_min",
"(",
")"
] |
Add a value to the reservoir.
:param value: the value to be added
:param timestamp: the epoch timestamp of the value in seconds, defaults
to the current timestamp if not specified.
|
[
"Add",
"a",
"value",
"to",
"the",
"reservoir",
"."
] |
train
|
https://github.com/Yelp/uwsgi_metrics/blob/534966fd461ff711aecd1e3d4caaafdc23ac33f0/uwsgi_metrics/reservoir.py#L66-L80
|
Yelp/uwsgi_metrics
|
uwsgi_metrics/reservoir.py
|
Reservoir.rescale
|
def rescale(self, now):
""" "A common feature of the above techniques—indeed, the key technique
that allows us to track the decayed weights efficiently—is that they
maintain counts and other quantities based on g(ti − L), and only scale
by g(t − L) at query time. But while g(ti −L)/g(t−L) is guaranteed to
lie between zero and one, the intermediate values of g(ti − L) could
become very large. For polynomial functions, these values should not
grow too large, and should be effectively represented in practice by
floating point values without loss of precision. For exponential
functions, these values could grow quite large as new values of
(ti − L) become large, and potentially exceed the capacity of common
floating point types. However, since the values stored by the
algorithms are linear combinations of g values (scaled sums), they
can be rescaled relative to a new landmark. That is, by the analysis
of exponential decay in Section VI-A, the choice of L does not affect
the final result. We can therefore multiply each value based on L by a
factor of exp(−α(L′ − L)), and obtain the correct value as if we had
instead computed relative to a new landmark L′ (and then use this new
L′ at query time). This can be done with a linear pass over whatever
data structure is being used."
"""
old_start_time = self.start_time
self.start_time = self.current_time_in_fractional_seconds()
self.next_scale_time = now + RESCALE_THRESHOLD_S
new_values = treap.treap()
for key in self.values.keys():
new_key = key * math.exp(
-self.alpha * (self.start_time - old_start_time))
new_values[new_key] = self.values[key]
self.values = new_values
|
python
|
def rescale(self, now):
""" "A common feature of the above techniques—indeed, the key technique
that allows us to track the decayed weights efficiently—is that they
maintain counts and other quantities based on g(ti − L), and only scale
by g(t − L) at query time. But while g(ti −L)/g(t−L) is guaranteed to
lie between zero and one, the intermediate values of g(ti − L) could
become very large. For polynomial functions, these values should not
grow too large, and should be effectively represented in practice by
floating point values without loss of precision. For exponential
functions, these values could grow quite large as new values of
(ti − L) become large, and potentially exceed the capacity of common
floating point types. However, since the values stored by the
algorithms are linear combinations of g values (scaled sums), they
can be rescaled relative to a new landmark. That is, by the analysis
of exponential decay in Section VI-A, the choice of L does not affect
the final result. We can therefore multiply each value based on L by a
factor of exp(−α(L′ − L)), and obtain the correct value as if we had
instead computed relative to a new landmark L′ (and then use this new
L′ at query time). This can be done with a linear pass over whatever
data structure is being used."
"""
old_start_time = self.start_time
self.start_time = self.current_time_in_fractional_seconds()
self.next_scale_time = now + RESCALE_THRESHOLD_S
new_values = treap.treap()
for key in self.values.keys():
new_key = key * math.exp(
-self.alpha * (self.start_time - old_start_time))
new_values[new_key] = self.values[key]
self.values = new_values
|
[
"def",
"rescale",
"(",
"self",
",",
"now",
")",
":",
"old_start_time",
"=",
"self",
".",
"start_time",
"self",
".",
"start_time",
"=",
"self",
".",
"current_time_in_fractional_seconds",
"(",
")",
"self",
".",
"next_scale_time",
"=",
"now",
"+",
"RESCALE_THRESHOLD_S",
"new_values",
"=",
"treap",
".",
"treap",
"(",
")",
"for",
"key",
"in",
"self",
".",
"values",
".",
"keys",
"(",
")",
":",
"new_key",
"=",
"key",
"*",
"math",
".",
"exp",
"(",
"-",
"self",
".",
"alpha",
"*",
"(",
"self",
".",
"start_time",
"-",
"old_start_time",
")",
")",
"new_values",
"[",
"new_key",
"]",
"=",
"self",
".",
"values",
"[",
"key",
"]",
"self",
".",
"values",
"=",
"new_values"
] |
"A common feature of the above techniques—indeed, the key technique
that allows us to track the decayed weights efficiently—is that they
maintain counts and other quantities based on g(ti − L), and only scale
by g(t − L) at query time. But while g(ti −L)/g(t−L) is guaranteed to
lie between zero and one, the intermediate values of g(ti − L) could
become very large. For polynomial functions, these values should not
grow too large, and should be effectively represented in practice by
floating point values without loss of precision. For exponential
functions, these values could grow quite large as new values of
(ti − L) become large, and potentially exceed the capacity of common
floating point types. However, since the values stored by the
algorithms are linear combinations of g values (scaled sums), they
can be rescaled relative to a new landmark. That is, by the analysis
of exponential decay in Section VI-A, the choice of L does not affect
the final result. We can therefore multiply each value based on L by a
factor of exp(−α(L′ − L)), and obtain the correct value as if we had
instead computed relative to a new landmark L′ (and then use this new
L′ at query time). This can be done with a linear pass over whatever
data structure is being used."
|
[
"A",
"common",
"feature",
"of",
"the",
"above",
"techniques—indeed",
"the",
"key",
"technique",
"that",
"allows",
"us",
"to",
"track",
"the",
"decayed",
"weights",
"efficiently—is",
"that",
"they",
"maintain",
"counts",
"and",
"other",
"quantities",
"based",
"on",
"g",
"(",
"ti",
"−",
"L",
")",
"and",
"only",
"scale",
"by",
"g",
"(",
"t",
"−",
"L",
")",
"at",
"query",
"time",
".",
"But",
"while",
"g",
"(",
"ti",
"−L",
")",
"/",
"g",
"(",
"t−L",
")",
"is",
"guaranteed",
"to",
"lie",
"between",
"zero",
"and",
"one",
"the",
"intermediate",
"values",
"of",
"g",
"(",
"ti",
"−",
"L",
")",
"could",
"become",
"very",
"large",
".",
"For",
"polynomial",
"functions",
"these",
"values",
"should",
"not",
"grow",
"too",
"large",
"and",
"should",
"be",
"effectively",
"represented",
"in",
"practice",
"by",
"floating",
"point",
"values",
"without",
"loss",
"of",
"precision",
".",
"For",
"exponential",
"functions",
"these",
"values",
"could",
"grow",
"quite",
"large",
"as",
"new",
"values",
"of",
"(",
"ti",
"−",
"L",
")",
"become",
"large",
"and",
"potentially",
"exceed",
"the",
"capacity",
"of",
"common",
"floating",
"point",
"types",
".",
"However",
"since",
"the",
"values",
"stored",
"by",
"the",
"algorithms",
"are",
"linear",
"combinations",
"of",
"g",
"values",
"(",
"scaled",
"sums",
")",
"they",
"can",
"be",
"rescaled",
"relative",
"to",
"a",
"new",
"landmark",
".",
"That",
"is",
"by",
"the",
"analysis",
"of",
"exponential",
"decay",
"in",
"Section",
"VI",
"-",
"A",
"the",
"choice",
"of",
"L",
"does",
"not",
"affect",
"the",
"final",
"result",
".",
"We",
"can",
"therefore",
"multiply",
"each",
"value",
"based",
"on",
"L",
"by",
"a",
"factor",
"of",
"exp",
"(",
"−α",
"(",
"L′",
"−",
"L",
"))",
"and",
"obtain",
"the",
"correct",
"value",
"as",
"if",
"we",
"had",
"instead",
"computed",
"relative",
"to",
"a",
"new",
"landmark",
"L′",
"(",
"and",
"then",
"use",
"this",
"new",
"L′",
"at",
"query",
"time",
")",
".",
"This",
"can",
"be",
"done",
"with",
"a",
"linear",
"pass",
"over",
"whatever",
"data",
"structure",
"is",
"being",
"used",
"."
] |
train
|
https://github.com/Yelp/uwsgi_metrics/blob/534966fd461ff711aecd1e3d4caaafdc23ac33f0/uwsgi_metrics/reservoir.py#L87-L117
|
Jaymon/prom
|
prom/utils.py
|
get_objects
|
def get_objects(classpath, calling_classpath=""):
"""
given a classpath like foo.bar.Baz return module foo.bar and class Baz
objects
.. seealso::
https://docs.python.org/2.5/whatsnew/pep-328.html
https://www.python.org/dev/peps/pep-0328/
:param classpath: string, the full python class path (includes modules), a classpath
is something like foo.bar.Che where Che is the class definied in the foo.bar
module
:param calling_classpath: string, if classpath is relative (eg, ..foo.Bar) then
this is needed to resolve the relative classpath, it is usually the path of
the module that is calling get_objects()
:returns: tuple, (module, class)
"""
# if classpath.startswith("."):
# rel_count = len(re.match("^\.+", classpath).group(0))
# if calling_classpath:
# calling_count = calling_classpath.count(".")
# if rel_count > calling_count:
# raise ValueError(
# "Attempting relative import passed calling_classpath {}".format(
# calling_classpath
# )
# )
#
# bits = calling_classpath.rsplit('.', rel_count)
# parent_classpath = bits[0]
# classpath = ".".join([parent_classpath, classpath[rel_count:]])
#
# else:
# raise ValueError("Attempting relative import without calling_classpath")
#
module_name, class_name = classpath.rsplit('.', 1)
module = importlib.import_module(module_name, calling_classpath)
try:
klass = getattr(module, class_name)
except AttributeError:
raise AttributeError("module {} has no attribute {} parsing {}".format(
module.__name__,
class_name,
classpath
))
return module, klass
|
python
|
def get_objects(classpath, calling_classpath=""):
"""
given a classpath like foo.bar.Baz return module foo.bar and class Baz
objects
.. seealso::
https://docs.python.org/2.5/whatsnew/pep-328.html
https://www.python.org/dev/peps/pep-0328/
:param classpath: string, the full python class path (includes modules), a classpath
is something like foo.bar.Che where Che is the class definied in the foo.bar
module
:param calling_classpath: string, if classpath is relative (eg, ..foo.Bar) then
this is needed to resolve the relative classpath, it is usually the path of
the module that is calling get_objects()
:returns: tuple, (module, class)
"""
# if classpath.startswith("."):
# rel_count = len(re.match("^\.+", classpath).group(0))
# if calling_classpath:
# calling_count = calling_classpath.count(".")
# if rel_count > calling_count:
# raise ValueError(
# "Attempting relative import passed calling_classpath {}".format(
# calling_classpath
# )
# )
#
# bits = calling_classpath.rsplit('.', rel_count)
# parent_classpath = bits[0]
# classpath = ".".join([parent_classpath, classpath[rel_count:]])
#
# else:
# raise ValueError("Attempting relative import without calling_classpath")
#
module_name, class_name = classpath.rsplit('.', 1)
module = importlib.import_module(module_name, calling_classpath)
try:
klass = getattr(module, class_name)
except AttributeError:
raise AttributeError("module {} has no attribute {} parsing {}".format(
module.__name__,
class_name,
classpath
))
return module, klass
|
[
"def",
"get_objects",
"(",
"classpath",
",",
"calling_classpath",
"=",
"\"\"",
")",
":",
"# if classpath.startswith(\".\"):",
"# rel_count = len(re.match(\"^\\.+\", classpath).group(0))",
"# if calling_classpath:",
"# calling_count = calling_classpath.count(\".\")",
"# if rel_count > calling_count:",
"# raise ValueError(",
"# \"Attempting relative import passed calling_classpath {}\".format(",
"# calling_classpath",
"# )",
"# )",
"# ",
"# bits = calling_classpath.rsplit('.', rel_count)",
"# parent_classpath = bits[0]",
"# classpath = \".\".join([parent_classpath, classpath[rel_count:]])",
"# ",
"# else:",
"# raise ValueError(\"Attempting relative import without calling_classpath\")",
"# ",
"module_name",
",",
"class_name",
"=",
"classpath",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
",",
"calling_classpath",
")",
"try",
":",
"klass",
"=",
"getattr",
"(",
"module",
",",
"class_name",
")",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"\"module {} has no attribute {} parsing {}\"",
".",
"format",
"(",
"module",
".",
"__name__",
",",
"class_name",
",",
"classpath",
")",
")",
"return",
"module",
",",
"klass"
] |
given a classpath like foo.bar.Baz return module foo.bar and class Baz
objects
.. seealso::
https://docs.python.org/2.5/whatsnew/pep-328.html
https://www.python.org/dev/peps/pep-0328/
:param classpath: string, the full python class path (includes modules), a classpath
is something like foo.bar.Che where Che is the class definied in the foo.bar
module
:param calling_classpath: string, if classpath is relative (eg, ..foo.Bar) then
this is needed to resolve the relative classpath, it is usually the path of
the module that is calling get_objects()
:returns: tuple, (module, class)
|
[
"given",
"a",
"classpath",
"like",
"foo",
".",
"bar",
".",
"Baz",
"return",
"module",
"foo",
".",
"bar",
"and",
"class",
"Baz",
"objects"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L178-L224
|
Jaymon/prom
|
prom/utils.py
|
make_dict
|
def make_dict(fields, fields_kwargs):
"""lot's of methods take a dict or kwargs, this combines those
Basically, we do a lot of def method(fields, **kwargs) and we want to merge
those into one super dict with kwargs taking precedence, this does that
fields -- dict -- a passed in dict
fields_kwargs -- dict -- usually a **kwargs dict from another function
return -- dict -- a merged fields and fields_kwargs
"""
ret = {}
if fields:
ret.update(fields)
if fields_kwargs:
ret.update(fields_kwargs)
return ret
|
python
|
def make_dict(fields, fields_kwargs):
"""lot's of methods take a dict or kwargs, this combines those
Basically, we do a lot of def method(fields, **kwargs) and we want to merge
those into one super dict with kwargs taking precedence, this does that
fields -- dict -- a passed in dict
fields_kwargs -- dict -- usually a **kwargs dict from another function
return -- dict -- a merged fields and fields_kwargs
"""
ret = {}
if fields:
ret.update(fields)
if fields_kwargs:
ret.update(fields_kwargs)
return ret
|
[
"def",
"make_dict",
"(",
"fields",
",",
"fields_kwargs",
")",
":",
"ret",
"=",
"{",
"}",
"if",
"fields",
":",
"ret",
".",
"update",
"(",
"fields",
")",
"if",
"fields_kwargs",
":",
"ret",
".",
"update",
"(",
"fields_kwargs",
")",
"return",
"ret"
] |
lot's of methods take a dict or kwargs, this combines those
Basically, we do a lot of def method(fields, **kwargs) and we want to merge
those into one super dict with kwargs taking precedence, this does that
fields -- dict -- a passed in dict
fields_kwargs -- dict -- usually a **kwargs dict from another function
return -- dict -- a merged fields and fields_kwargs
|
[
"lot",
"s",
"of",
"methods",
"take",
"a",
"dict",
"or",
"kwargs",
"this",
"combines",
"those"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L237-L255
|
Jaymon/prom
|
prom/utils.py
|
Stream.write_line
|
def write_line(self, line, count=1):
"""writes the line and count newlines after the line"""
self.write(line)
self.write_newlines(count)
|
python
|
def write_line(self, line, count=1):
"""writes the line and count newlines after the line"""
self.write(line)
self.write_newlines(count)
|
[
"def",
"write_line",
"(",
"self",
",",
"line",
",",
"count",
"=",
"1",
")",
":",
"self",
".",
"write",
"(",
"line",
")",
"self",
".",
"write_newlines",
"(",
"count",
")"
] |
writes the line and count newlines after the line
|
[
"writes",
"the",
"line",
"and",
"count",
"newlines",
"after",
"the",
"line"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L35-L38
|
Jaymon/prom
|
prom/utils.py
|
PriorityQueue.add
|
def add(self, key, val, priority=None):
"""add a value to the queue with priority, using the key to know uniqueness
key -- str -- this is used to determine if val already exists in the queue,
if key is already in the queue, then the val will be replaced in the
queue with the new priority
val -- mixed -- the value to add to the queue
priority -- int -- the priority of val
"""
if key in self.item_finder:
self.remove(key)
else:
# keep the queue contained
if self.full():
raise OverflowError("Queue is full")
if priority is None:
priority = next(self.counter)
item = [priority, key, val]
self.item_finder[key] = item
heapq.heappush(self.pq, item)
|
python
|
def add(self, key, val, priority=None):
"""add a value to the queue with priority, using the key to know uniqueness
key -- str -- this is used to determine if val already exists in the queue,
if key is already in the queue, then the val will be replaced in the
queue with the new priority
val -- mixed -- the value to add to the queue
priority -- int -- the priority of val
"""
if key in self.item_finder:
self.remove(key)
else:
# keep the queue contained
if self.full():
raise OverflowError("Queue is full")
if priority is None:
priority = next(self.counter)
item = [priority, key, val]
self.item_finder[key] = item
heapq.heappush(self.pq, item)
|
[
"def",
"add",
"(",
"self",
",",
"key",
",",
"val",
",",
"priority",
"=",
"None",
")",
":",
"if",
"key",
"in",
"self",
".",
"item_finder",
":",
"self",
".",
"remove",
"(",
"key",
")",
"else",
":",
"# keep the queue contained",
"if",
"self",
".",
"full",
"(",
")",
":",
"raise",
"OverflowError",
"(",
"\"Queue is full\"",
")",
"if",
"priority",
"is",
"None",
":",
"priority",
"=",
"next",
"(",
"self",
".",
"counter",
")",
"item",
"=",
"[",
"priority",
",",
"key",
",",
"val",
"]",
"self",
".",
"item_finder",
"[",
"key",
"]",
"=",
"item",
"heapq",
".",
"heappush",
"(",
"self",
".",
"pq",
",",
"item",
")"
] |
add a value to the queue with priority, using the key to know uniqueness
key -- str -- this is used to determine if val already exists in the queue,
if key is already in the queue, then the val will be replaced in the
queue with the new priority
val -- mixed -- the value to add to the queue
priority -- int -- the priority of val
|
[
"add",
"a",
"value",
"to",
"the",
"queue",
"with",
"priority",
"using",
"the",
"key",
"to",
"know",
"uniqueness"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L111-L134
|
Jaymon/prom
|
prom/utils.py
|
PriorityQueue.remove
|
def remove(self, key):
"""remove the value found at key from the queue"""
item = self.item_finder.pop(key)
item[-1] = None
self.removed_count += 1
|
python
|
def remove(self, key):
"""remove the value found at key from the queue"""
item = self.item_finder.pop(key)
item[-1] = None
self.removed_count += 1
|
[
"def",
"remove",
"(",
"self",
",",
"key",
")",
":",
"item",
"=",
"self",
".",
"item_finder",
".",
"pop",
"(",
"key",
")",
"item",
"[",
"-",
"1",
"]",
"=",
"None",
"self",
".",
"removed_count",
"+=",
"1"
] |
remove the value found at key from the queue
|
[
"remove",
"the",
"value",
"found",
"at",
"key",
"from",
"the",
"queue"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L136-L140
|
Jaymon/prom
|
prom/utils.py
|
PriorityQueue.popitem
|
def popitem(self):
"""remove the next prioritized [key, val, priority] and return it"""
pq = self.pq
while pq:
priority, key, val = heapq.heappop(pq)
if val is None:
self.removed_count -= 1
else:
del self.item_finder[key]
return key, val, priority
raise KeyError("pop from an empty priority queue")
|
python
|
def popitem(self):
"""remove the next prioritized [key, val, priority] and return it"""
pq = self.pq
while pq:
priority, key, val = heapq.heappop(pq)
if val is None:
self.removed_count -= 1
else:
del self.item_finder[key]
return key, val, priority
raise KeyError("pop from an empty priority queue")
|
[
"def",
"popitem",
"(",
"self",
")",
":",
"pq",
"=",
"self",
".",
"pq",
"while",
"pq",
":",
"priority",
",",
"key",
",",
"val",
"=",
"heapq",
".",
"heappop",
"(",
"pq",
")",
"if",
"val",
"is",
"None",
":",
"self",
".",
"removed_count",
"-=",
"1",
"else",
":",
"del",
"self",
".",
"item_finder",
"[",
"key",
"]",
"return",
"key",
",",
"val",
",",
"priority",
"raise",
"KeyError",
"(",
"\"pop from an empty priority queue\"",
")"
] |
remove the next prioritized [key, val, priority] and return it
|
[
"remove",
"the",
"next",
"prioritized",
"[",
"key",
"val",
"priority",
"]",
"and",
"return",
"it"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L142-L154
|
Jaymon/prom
|
prom/utils.py
|
PriorityQueue.full
|
def full(self):
"""Return True if the queue is full"""
if not self.size: return False
return len(self.pq) == (self.size + self.removed_count)
|
python
|
def full(self):
"""Return True if the queue is full"""
if not self.size: return False
return len(self.pq) == (self.size + self.removed_count)
|
[
"def",
"full",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"size",
":",
"return",
"False",
"return",
"len",
"(",
"self",
".",
"pq",
")",
"==",
"(",
"self",
".",
"size",
"+",
"self",
".",
"removed_count",
")"
] |
Return True if the queue is full
|
[
"Return",
"True",
"if",
"the",
"queue",
"is",
"full"
] |
train
|
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/utils.py#L161-L164
|
guaix-ucm/pyemir
|
emirdrp/tools/slitlet_boundaries_from_continuum.py
|
extract_slitlet2d
|
def extract_slitlet2d(image_2k2k, sltlim):
"""Extract slitlet 2d image from image with original EMIR dimensions.
Parameters
----------
image_2k2k : 2d numpy array, float
Original image (dimensions NAXIS1 * NAXIS2)
sltlim : instance of SlitLimits class
Object containing relevant information concerning the slitlet
region to be extracted.
Returns
-------
slitlet2d : 2d numpy array, float
Image corresponding to the slitlet region defined by its
bounding box.
"""
# extract slitlet region
slitlet2d = image_2k2k[(sltlim.bb_ns1_orig - 1):sltlim.bb_ns2_orig,
(sltlim.bb_nc1_orig - 1):sltlim.bb_nc2_orig]
# transform to float
slitlet2d = slitlet2d.astype(np.float)
# return slitlet image
return slitlet2d
|
python
|
def extract_slitlet2d(image_2k2k, sltlim):
"""Extract slitlet 2d image from image with original EMIR dimensions.
Parameters
----------
image_2k2k : 2d numpy array, float
Original image (dimensions NAXIS1 * NAXIS2)
sltlim : instance of SlitLimits class
Object containing relevant information concerning the slitlet
region to be extracted.
Returns
-------
slitlet2d : 2d numpy array, float
Image corresponding to the slitlet region defined by its
bounding box.
"""
# extract slitlet region
slitlet2d = image_2k2k[(sltlim.bb_ns1_orig - 1):sltlim.bb_ns2_orig,
(sltlim.bb_nc1_orig - 1):sltlim.bb_nc2_orig]
# transform to float
slitlet2d = slitlet2d.astype(np.float)
# return slitlet image
return slitlet2d
|
[
"def",
"extract_slitlet2d",
"(",
"image_2k2k",
",",
"sltlim",
")",
":",
"# extract slitlet region",
"slitlet2d",
"=",
"image_2k2k",
"[",
"(",
"sltlim",
".",
"bb_ns1_orig",
"-",
"1",
")",
":",
"sltlim",
".",
"bb_ns2_orig",
",",
"(",
"sltlim",
".",
"bb_nc1_orig",
"-",
"1",
")",
":",
"sltlim",
".",
"bb_nc2_orig",
"]",
"# transform to float",
"slitlet2d",
"=",
"slitlet2d",
".",
"astype",
"(",
"np",
".",
"float",
")",
"# return slitlet image",
"return",
"slitlet2d"
] |
Extract slitlet 2d image from image with original EMIR dimensions.
Parameters
----------
image_2k2k : 2d numpy array, float
Original image (dimensions NAXIS1 * NAXIS2)
sltlim : instance of SlitLimits class
Object containing relevant information concerning the slitlet
region to be extracted.
Returns
-------
slitlet2d : 2d numpy array, float
Image corresponding to the slitlet region defined by its
bounding box.
|
[
"Extract",
"slitlet",
"2d",
"image",
"from",
"image",
"with",
"original",
"EMIR",
"dimensions",
"."
] |
train
|
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/tools/slitlet_boundaries_from_continuum.py#L217-L244
|
guaix-ucm/pyemir
|
emirdrp/tools/slitlet_boundaries_from_continuum.py
|
compute_slitlet_boundaries
|
def compute_slitlet_boundaries(
filename, grism, spfilter, list_slitlets,
size_x_medfilt, size_y_savgol,
times_sigma_threshold,
bounddict, debugplot=0):
"""Compute slitlet boundaries using continuum lamp images.
Parameters
----------
filename : string
Input continumm lamp image.
grism : string
Grism name. It must be one in EMIR_VALID_GRISMS.
spfilter : string
Filter name. It must be one in EMIR_VALID_FILTERS.
list_slitlets : list of integers
Number of slitlets to be updated.
size_x_medfilt : int
Window in the X (spectral) direction, in pixels, to apply
the 1d median filter in order to remove bad pixels.
size_y_savgol : int
Window in the Y (spatial) direction to be used when using the
1d Savitzky-Golay filter.
times_sigma_threshold : float
Times sigma to detect peaks in derivatives.
bounddict : dictionary of dictionaries
Structure to store the boundaries.
debugplot : int
Determines whether intermediate computations and/or plots are
displayed.
"""
# read 2D image
hdulist = fits.open(filename)
image_header = hdulist[0].header
image2d = hdulist[0].data
naxis2, naxis1 = image2d.shape
hdulist.close()
if debugplot >= 10:
print('>>> NAXIS1:', naxis1)
print('>>> NAXIS2:', naxis2)
# ToDo: replace this by application of cosmetic defect mask!
for j in range(1024):
image2d[1024, j] = (image2d[1023, j] + image2d[1025, j]) / 2
image2d[1023, j + 1024] = (image2d[1022, j + 1024] +
image2d[1024, j + 1024]) / 2
# remove path from filename
sfilename = os.path.basename(filename)
# check that the FITS file has been obtained with EMIR
instrument = image_header['instrume']
if instrument != 'EMIR':
raise ValueError("INSTRUME keyword is not 'EMIR'!")
# read CSU configuration from FITS header
csu_config = CsuConfiguration.define_from_fits(filename)
# read DTU configuration from FITS header
dtu_config = DtuConfiguration.define_from_fits(filename)
# read grism
grism_in_header = image_header['grism']
if grism != grism_in_header:
raise ValueError("GRISM keyword=" + grism_in_header +
" is not the expected value=" + grism)
# read filter
spfilter_in_header = image_header['filter']
if spfilter != spfilter_in_header:
raise ValueError("FILTER keyword=" + spfilter_in_header +
" is not the expected value=" + spfilter)
# read rotator position angle
rotang = image_header['rotang']
# read date-obs
date_obs = image_header['date-obs']
for islitlet in list_slitlets:
if debugplot < 10:
sys.stdout.write('.')
sys.stdout.flush()
sltlim = SlitletLimits(grism, spfilter, islitlet)
# extract slitlet2d
slitlet2d = extract_slitlet2d(image2d, sltlim)
if debugplot % 10 != 0:
ximshow(slitlet2d,
title=sfilename + " [original]"
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig, sltlim.bb_ns1_orig),
debugplot=debugplot)
# apply 1d median filtering (along the spectral direction)
# to remove bad pixels
size_x = size_x_medfilt
size_y = 1
slitlet2d_smooth = ndimage.filters.median_filter(
slitlet2d, size=(size_y, size_x))
if debugplot % 10 != 0:
ximshow(slitlet2d_smooth,
title=sfilename + " [smoothed]"
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig, sltlim.bb_ns1_orig),
debugplot=debugplot)
# apply 1d Savitzky-Golay filter (along the spatial direction)
# to compute first derivative
slitlet2d_savgol = savgol_filter(
slitlet2d_smooth, window_length=size_y_savgol, polyorder=2,
deriv=1, axis=0)
# compute basic statistics
q25, q50, q75 = np.percentile(slitlet2d_savgol, q=[25.0, 50.0, 75.0])
sigmag = 0.7413 * (q75 - q25) # robust standard deviation
if debugplot >= 10:
print("q50, sigmag:", q50, sigmag)
if debugplot % 10 != 0:
ximshow(slitlet2d_savgol,
title=sfilename + " [S.-G.filt.]"
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig, sltlim.bb_ns1_orig),
z1z2=(q50-times_sigma_threshold*sigmag,
q50+times_sigma_threshold*sigmag),
debugplot=debugplot)
# identify objects in slitlet2d_savgol: pixels with positive
# derivatives are identify independently from pixels with
# negative derivaties; then the two set of potential features
# are merged; this approach avoids some problems when, in
# nearby regions, there are pixels with positive and negative
# derivatives (in those circumstances a single search as
# np.logical_or(
# slitlet2d_savgol < q50 - times_sigma_threshold * sigmag,
# slitlet2d_savgol > q50 + times_sigma_threshold * sigmag)
# led to erroneous detections!)
#
# search for positive derivatives
labels2d_objects_pos, no_objects_pos = ndimage.label(
slitlet2d_savgol > q50 + times_sigma_threshold * sigmag)
# search for negative derivatives
labels2d_objects_neg, no_objects_neg = ndimage.label(
slitlet2d_savgol < q50 - times_sigma_threshold * sigmag)
# merge both sets
non_zero_neg = np.where(labels2d_objects_neg > 0)
labels2d_objects = np.copy(labels2d_objects_pos)
labels2d_objects[non_zero_neg] += \
labels2d_objects_neg[non_zero_neg] + no_objects_pos
no_objects = no_objects_pos + no_objects_neg
if debugplot >= 10:
print("Number of objects with positive derivative:",
no_objects_pos)
print("Number of objects with negative derivative:",
no_objects_neg)
print("Total number of objects initially found...:", no_objects)
if debugplot % 10 != 0:
ximshow(labels2d_objects,
z1z2=(0, no_objects),
title=sfilename + " [objects]"
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig, sltlim.bb_ns1_orig),
cbar_label="Object number",
debugplot=debugplot)
# select boundaries as the largest objects found with
# positive and negative derivatives
n_der_pos = 0 # number of pixels covered by the object with deriv > 0
i_der_pos = 0 # id of the object with deriv > 0
n_der_neg = 0 # number of pixels covered by the object with deriv < 0
i_der_neg = 0 # id of the object with deriv < 0
for i in range(1, no_objects+1):
xy_tmp = np.where(labels2d_objects == i)
n_pix = len(xy_tmp[0])
if i <= no_objects_pos:
if n_pix > n_der_pos:
i_der_pos = i
n_der_pos = n_pix
else:
if n_pix > n_der_neg:
i_der_neg = i
n_der_neg = n_pix
# determine which boundary is lower and which is upper
y_center_mass_der_pos = ndimage.center_of_mass(
slitlet2d_savgol, labels2d_objects, [i_der_pos])[0][0]
y_center_mass_der_neg = ndimage.center_of_mass(
slitlet2d_savgol, labels2d_objects, [i_der_neg])[0][0]
if y_center_mass_der_pos < y_center_mass_der_neg:
i_lower = i_der_pos
i_upper = i_der_neg
if debugplot >= 10:
print("-> lower boundary has positive derivatives")
else:
i_lower = i_der_neg
i_upper = i_der_pos
if debugplot >= 10:
print("-> lower boundary has negative derivatives")
list_slices_ok = [i_lower, i_upper]
# adjust individual boundaries passing the selection:
# - select points in the image belonging to a given boundary
# - compute weighted mean of the pixels of the boundary, column
# by column (this reduces dramatically the number of points
# to be fitted to determine the boundary)
list_boundaries = []
for k in range(2): # k=0 lower boundary, k=1 upper boundary
# select points to be fitted for a particular boundary
# (note: be careful with array indices and pixel
# coordinates)
xy_tmp = np.where(labels2d_objects == list_slices_ok[k])
xmin = xy_tmp[1].min() # array indices (integers)
xmax = xy_tmp[1].max() # array indices (integers)
xfit = []
yfit = []
# fix range for fit
if k == 0:
xmineff = max(sltlim.xmin_lower_boundary_fit, xmin)
xmaxeff = min(sltlim.xmax_lower_boundary_fit, xmax)
else:
xmineff = max(sltlim.xmin_upper_boundary_fit, xmin)
xmaxeff = min(sltlim.xmax_upper_boundary_fit, xmax)
# loop in columns of the image belonging to the boundary
for xdum in range(xmineff, xmaxeff + 1): # array indices (integer)
iok = np.where(xy_tmp[1] == xdum)
y_tmp = xy_tmp[0][iok] + sltlim.bb_ns1_orig # image pixel
weight = slitlet2d_savgol[xy_tmp[0][iok], xy_tmp[1][iok]]
y_wmean = sum(y_tmp * weight) / sum(weight)
xfit.append(xdum + sltlim.bb_nc1_orig)
yfit.append(y_wmean)
xfit = np.array(xfit)
yfit = np.array(yfit)
# declare new SpectrumTrail instance
boundary = SpectrumTrail()
# define new boundary
boundary.fit(x=xfit, y=yfit, deg=sltlim.deg_boundary,
times_sigma_reject=10,
title="slit:" + str(sltlim.islitlet) +
", deg=" + str(sltlim.deg_boundary),
debugplot=0)
list_boundaries.append(boundary)
if debugplot % 10 != 0:
for tmp_img, tmp_label in zip(
[slitlet2d_savgol, slitlet2d],
[' [S.-G.filt.]', ' [original]']
):
ax = ximshow(tmp_img,
title=sfilename + tmp_label +
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig,
sltlim.bb_ns1_orig),
show=False,
debugplot=debugplot)
for k in range(2):
xpol, ypol = list_boundaries[k].linspace_pix(
start=1, stop=EMIR_NAXIS1)
ax.plot(xpol, ypol, 'b--', linewidth=1)
for k in range(2):
xpol, ypol = list_boundaries[k].linspace_pix()
ax.plot(xpol, ypol, 'g--', linewidth=4)
# show plot
pause_debugplot(debugplot, pltshow=True)
# update bounddict
tmp_dict = {
'boundary_coef_lower':
list_boundaries[0].poly_funct.coef.tolist(),
'boundary_xmin_lower': list_boundaries[0].xlower_line,
'boundary_xmax_lower': list_boundaries[0].xupper_line,
'boundary_coef_upper':
list_boundaries[1].poly_funct.coef.tolist(),
'boundary_xmin_upper': list_boundaries[1].xlower_line,
'boundary_xmax_upper': list_boundaries[1].xupper_line,
'csu_bar_left': csu_config.csu_bar_left(islitlet),
'csu_bar_right': csu_config.csu_bar_right(islitlet),
'csu_bar_slit_center': csu_config.csu_bar_slit_center(islitlet),
'csu_bar_slit_width': csu_config.csu_bar_slit_width(islitlet),
'rotang': rotang,
'xdtu': dtu_config.xdtu,
'ydtu': dtu_config.ydtu,
'zdtu': dtu_config.zdtu,
'xdtu_0': dtu_config.xdtu_0,
'ydtu_0': dtu_config.ydtu_0,
'zdtu_0': dtu_config.zdtu_0,
'zzz_info1': os.getlogin() + '@' + socket.gethostname(),
'zzz_info2': datetime.now().isoformat()
}
slitlet_label = "slitlet" + str(islitlet).zfill(2)
if slitlet_label not in bounddict['contents']:
bounddict['contents'][slitlet_label] = {}
bounddict['contents'][slitlet_label][date_obs] = tmp_dict
if debugplot < 10:
print("")
|
python
|
def compute_slitlet_boundaries(
filename, grism, spfilter, list_slitlets,
size_x_medfilt, size_y_savgol,
times_sigma_threshold,
bounddict, debugplot=0):
"""Compute slitlet boundaries using continuum lamp images.
Parameters
----------
filename : string
Input continumm lamp image.
grism : string
Grism name. It must be one in EMIR_VALID_GRISMS.
spfilter : string
Filter name. It must be one in EMIR_VALID_FILTERS.
list_slitlets : list of integers
Number of slitlets to be updated.
size_x_medfilt : int
Window in the X (spectral) direction, in pixels, to apply
the 1d median filter in order to remove bad pixels.
size_y_savgol : int
Window in the Y (spatial) direction to be used when using the
1d Savitzky-Golay filter.
times_sigma_threshold : float
Times sigma to detect peaks in derivatives.
bounddict : dictionary of dictionaries
Structure to store the boundaries.
debugplot : int
Determines whether intermediate computations and/or plots are
displayed.
"""
# read 2D image
hdulist = fits.open(filename)
image_header = hdulist[0].header
image2d = hdulist[0].data
naxis2, naxis1 = image2d.shape
hdulist.close()
if debugplot >= 10:
print('>>> NAXIS1:', naxis1)
print('>>> NAXIS2:', naxis2)
# ToDo: replace this by application of cosmetic defect mask!
for j in range(1024):
image2d[1024, j] = (image2d[1023, j] + image2d[1025, j]) / 2
image2d[1023, j + 1024] = (image2d[1022, j + 1024] +
image2d[1024, j + 1024]) / 2
# remove path from filename
sfilename = os.path.basename(filename)
# check that the FITS file has been obtained with EMIR
instrument = image_header['instrume']
if instrument != 'EMIR':
raise ValueError("INSTRUME keyword is not 'EMIR'!")
# read CSU configuration from FITS header
csu_config = CsuConfiguration.define_from_fits(filename)
# read DTU configuration from FITS header
dtu_config = DtuConfiguration.define_from_fits(filename)
# read grism
grism_in_header = image_header['grism']
if grism != grism_in_header:
raise ValueError("GRISM keyword=" + grism_in_header +
" is not the expected value=" + grism)
# read filter
spfilter_in_header = image_header['filter']
if spfilter != spfilter_in_header:
raise ValueError("FILTER keyword=" + spfilter_in_header +
" is not the expected value=" + spfilter)
# read rotator position angle
rotang = image_header['rotang']
# read date-obs
date_obs = image_header['date-obs']
for islitlet in list_slitlets:
if debugplot < 10:
sys.stdout.write('.')
sys.stdout.flush()
sltlim = SlitletLimits(grism, spfilter, islitlet)
# extract slitlet2d
slitlet2d = extract_slitlet2d(image2d, sltlim)
if debugplot % 10 != 0:
ximshow(slitlet2d,
title=sfilename + " [original]"
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig, sltlim.bb_ns1_orig),
debugplot=debugplot)
# apply 1d median filtering (along the spectral direction)
# to remove bad pixels
size_x = size_x_medfilt
size_y = 1
slitlet2d_smooth = ndimage.filters.median_filter(
slitlet2d, size=(size_y, size_x))
if debugplot % 10 != 0:
ximshow(slitlet2d_smooth,
title=sfilename + " [smoothed]"
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig, sltlim.bb_ns1_orig),
debugplot=debugplot)
# apply 1d Savitzky-Golay filter (along the spatial direction)
# to compute first derivative
slitlet2d_savgol = savgol_filter(
slitlet2d_smooth, window_length=size_y_savgol, polyorder=2,
deriv=1, axis=0)
# compute basic statistics
q25, q50, q75 = np.percentile(slitlet2d_savgol, q=[25.0, 50.0, 75.0])
sigmag = 0.7413 * (q75 - q25) # robust standard deviation
if debugplot >= 10:
print("q50, sigmag:", q50, sigmag)
if debugplot % 10 != 0:
ximshow(slitlet2d_savgol,
title=sfilename + " [S.-G.filt.]"
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig, sltlim.bb_ns1_orig),
z1z2=(q50-times_sigma_threshold*sigmag,
q50+times_sigma_threshold*sigmag),
debugplot=debugplot)
# identify objects in slitlet2d_savgol: pixels with positive
# derivatives are identify independently from pixels with
# negative derivaties; then the two set of potential features
# are merged; this approach avoids some problems when, in
# nearby regions, there are pixels with positive and negative
# derivatives (in those circumstances a single search as
# np.logical_or(
# slitlet2d_savgol < q50 - times_sigma_threshold * sigmag,
# slitlet2d_savgol > q50 + times_sigma_threshold * sigmag)
# led to erroneous detections!)
#
# search for positive derivatives
labels2d_objects_pos, no_objects_pos = ndimage.label(
slitlet2d_savgol > q50 + times_sigma_threshold * sigmag)
# search for negative derivatives
labels2d_objects_neg, no_objects_neg = ndimage.label(
slitlet2d_savgol < q50 - times_sigma_threshold * sigmag)
# merge both sets
non_zero_neg = np.where(labels2d_objects_neg > 0)
labels2d_objects = np.copy(labels2d_objects_pos)
labels2d_objects[non_zero_neg] += \
labels2d_objects_neg[non_zero_neg] + no_objects_pos
no_objects = no_objects_pos + no_objects_neg
if debugplot >= 10:
print("Number of objects with positive derivative:",
no_objects_pos)
print("Number of objects with negative derivative:",
no_objects_neg)
print("Total number of objects initially found...:", no_objects)
if debugplot % 10 != 0:
ximshow(labels2d_objects,
z1z2=(0, no_objects),
title=sfilename + " [objects]"
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig, sltlim.bb_ns1_orig),
cbar_label="Object number",
debugplot=debugplot)
# select boundaries as the largest objects found with
# positive and negative derivatives
n_der_pos = 0 # number of pixels covered by the object with deriv > 0
i_der_pos = 0 # id of the object with deriv > 0
n_der_neg = 0 # number of pixels covered by the object with deriv < 0
i_der_neg = 0 # id of the object with deriv < 0
for i in range(1, no_objects+1):
xy_tmp = np.where(labels2d_objects == i)
n_pix = len(xy_tmp[0])
if i <= no_objects_pos:
if n_pix > n_der_pos:
i_der_pos = i
n_der_pos = n_pix
else:
if n_pix > n_der_neg:
i_der_neg = i
n_der_neg = n_pix
# determine which boundary is lower and which is upper
y_center_mass_der_pos = ndimage.center_of_mass(
slitlet2d_savgol, labels2d_objects, [i_der_pos])[0][0]
y_center_mass_der_neg = ndimage.center_of_mass(
slitlet2d_savgol, labels2d_objects, [i_der_neg])[0][0]
if y_center_mass_der_pos < y_center_mass_der_neg:
i_lower = i_der_pos
i_upper = i_der_neg
if debugplot >= 10:
print("-> lower boundary has positive derivatives")
else:
i_lower = i_der_neg
i_upper = i_der_pos
if debugplot >= 10:
print("-> lower boundary has negative derivatives")
list_slices_ok = [i_lower, i_upper]
# adjust individual boundaries passing the selection:
# - select points in the image belonging to a given boundary
# - compute weighted mean of the pixels of the boundary, column
# by column (this reduces dramatically the number of points
# to be fitted to determine the boundary)
list_boundaries = []
for k in range(2): # k=0 lower boundary, k=1 upper boundary
# select points to be fitted for a particular boundary
# (note: be careful with array indices and pixel
# coordinates)
xy_tmp = np.where(labels2d_objects == list_slices_ok[k])
xmin = xy_tmp[1].min() # array indices (integers)
xmax = xy_tmp[1].max() # array indices (integers)
xfit = []
yfit = []
# fix range for fit
if k == 0:
xmineff = max(sltlim.xmin_lower_boundary_fit, xmin)
xmaxeff = min(sltlim.xmax_lower_boundary_fit, xmax)
else:
xmineff = max(sltlim.xmin_upper_boundary_fit, xmin)
xmaxeff = min(sltlim.xmax_upper_boundary_fit, xmax)
# loop in columns of the image belonging to the boundary
for xdum in range(xmineff, xmaxeff + 1): # array indices (integer)
iok = np.where(xy_tmp[1] == xdum)
y_tmp = xy_tmp[0][iok] + sltlim.bb_ns1_orig # image pixel
weight = slitlet2d_savgol[xy_tmp[0][iok], xy_tmp[1][iok]]
y_wmean = sum(y_tmp * weight) / sum(weight)
xfit.append(xdum + sltlim.bb_nc1_orig)
yfit.append(y_wmean)
xfit = np.array(xfit)
yfit = np.array(yfit)
# declare new SpectrumTrail instance
boundary = SpectrumTrail()
# define new boundary
boundary.fit(x=xfit, y=yfit, deg=sltlim.deg_boundary,
times_sigma_reject=10,
title="slit:" + str(sltlim.islitlet) +
", deg=" + str(sltlim.deg_boundary),
debugplot=0)
list_boundaries.append(boundary)
if debugplot % 10 != 0:
for tmp_img, tmp_label in zip(
[slitlet2d_savgol, slitlet2d],
[' [S.-G.filt.]', ' [original]']
):
ax = ximshow(tmp_img,
title=sfilename + tmp_label +
"\nslitlet=" + str(islitlet) +
", grism=" + grism +
", filter=" + spfilter +
", rotang=" + str(round(rotang, 2)),
first_pixel=(sltlim.bb_nc1_orig,
sltlim.bb_ns1_orig),
show=False,
debugplot=debugplot)
for k in range(2):
xpol, ypol = list_boundaries[k].linspace_pix(
start=1, stop=EMIR_NAXIS1)
ax.plot(xpol, ypol, 'b--', linewidth=1)
for k in range(2):
xpol, ypol = list_boundaries[k].linspace_pix()
ax.plot(xpol, ypol, 'g--', linewidth=4)
# show plot
pause_debugplot(debugplot, pltshow=True)
# update bounddict
tmp_dict = {
'boundary_coef_lower':
list_boundaries[0].poly_funct.coef.tolist(),
'boundary_xmin_lower': list_boundaries[0].xlower_line,
'boundary_xmax_lower': list_boundaries[0].xupper_line,
'boundary_coef_upper':
list_boundaries[1].poly_funct.coef.tolist(),
'boundary_xmin_upper': list_boundaries[1].xlower_line,
'boundary_xmax_upper': list_boundaries[1].xupper_line,
'csu_bar_left': csu_config.csu_bar_left(islitlet),
'csu_bar_right': csu_config.csu_bar_right(islitlet),
'csu_bar_slit_center': csu_config.csu_bar_slit_center(islitlet),
'csu_bar_slit_width': csu_config.csu_bar_slit_width(islitlet),
'rotang': rotang,
'xdtu': dtu_config.xdtu,
'ydtu': dtu_config.ydtu,
'zdtu': dtu_config.zdtu,
'xdtu_0': dtu_config.xdtu_0,
'ydtu_0': dtu_config.ydtu_0,
'zdtu_0': dtu_config.zdtu_0,
'zzz_info1': os.getlogin() + '@' + socket.gethostname(),
'zzz_info2': datetime.now().isoformat()
}
slitlet_label = "slitlet" + str(islitlet).zfill(2)
if slitlet_label not in bounddict['contents']:
bounddict['contents'][slitlet_label] = {}
bounddict['contents'][slitlet_label][date_obs] = tmp_dict
if debugplot < 10:
print("")
|
[
"def",
"compute_slitlet_boundaries",
"(",
"filename",
",",
"grism",
",",
"spfilter",
",",
"list_slitlets",
",",
"size_x_medfilt",
",",
"size_y_savgol",
",",
"times_sigma_threshold",
",",
"bounddict",
",",
"debugplot",
"=",
"0",
")",
":",
"# read 2D image",
"hdulist",
"=",
"fits",
".",
"open",
"(",
"filename",
")",
"image_header",
"=",
"hdulist",
"[",
"0",
"]",
".",
"header",
"image2d",
"=",
"hdulist",
"[",
"0",
"]",
".",
"data",
"naxis2",
",",
"naxis1",
"=",
"image2d",
".",
"shape",
"hdulist",
".",
"close",
"(",
")",
"if",
"debugplot",
">=",
"10",
":",
"print",
"(",
"'>>> NAXIS1:'",
",",
"naxis1",
")",
"print",
"(",
"'>>> NAXIS2:'",
",",
"naxis2",
")",
"# ToDo: replace this by application of cosmetic defect mask!",
"for",
"j",
"in",
"range",
"(",
"1024",
")",
":",
"image2d",
"[",
"1024",
",",
"j",
"]",
"=",
"(",
"image2d",
"[",
"1023",
",",
"j",
"]",
"+",
"image2d",
"[",
"1025",
",",
"j",
"]",
")",
"/",
"2",
"image2d",
"[",
"1023",
",",
"j",
"+",
"1024",
"]",
"=",
"(",
"image2d",
"[",
"1022",
",",
"j",
"+",
"1024",
"]",
"+",
"image2d",
"[",
"1024",
",",
"j",
"+",
"1024",
"]",
")",
"/",
"2",
"# remove path from filename",
"sfilename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"# check that the FITS file has been obtained with EMIR",
"instrument",
"=",
"image_header",
"[",
"'instrume'",
"]",
"if",
"instrument",
"!=",
"'EMIR'",
":",
"raise",
"ValueError",
"(",
"\"INSTRUME keyword is not 'EMIR'!\"",
")",
"# read CSU configuration from FITS header",
"csu_config",
"=",
"CsuConfiguration",
".",
"define_from_fits",
"(",
"filename",
")",
"# read DTU configuration from FITS header",
"dtu_config",
"=",
"DtuConfiguration",
".",
"define_from_fits",
"(",
"filename",
")",
"# read grism",
"grism_in_header",
"=",
"image_header",
"[",
"'grism'",
"]",
"if",
"grism",
"!=",
"grism_in_header",
":",
"raise",
"ValueError",
"(",
"\"GRISM keyword=\"",
"+",
"grism_in_header",
"+",
"\" is not the expected value=\"",
"+",
"grism",
")",
"# read filter",
"spfilter_in_header",
"=",
"image_header",
"[",
"'filter'",
"]",
"if",
"spfilter",
"!=",
"spfilter_in_header",
":",
"raise",
"ValueError",
"(",
"\"FILTER keyword=\"",
"+",
"spfilter_in_header",
"+",
"\" is not the expected value=\"",
"+",
"spfilter",
")",
"# read rotator position angle",
"rotang",
"=",
"image_header",
"[",
"'rotang'",
"]",
"# read date-obs",
"date_obs",
"=",
"image_header",
"[",
"'date-obs'",
"]",
"for",
"islitlet",
"in",
"list_slitlets",
":",
"if",
"debugplot",
"<",
"10",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'.'",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sltlim",
"=",
"SlitletLimits",
"(",
"grism",
",",
"spfilter",
",",
"islitlet",
")",
"# extract slitlet2d",
"slitlet2d",
"=",
"extract_slitlet2d",
"(",
"image2d",
",",
"sltlim",
")",
"if",
"debugplot",
"%",
"10",
"!=",
"0",
":",
"ximshow",
"(",
"slitlet2d",
",",
"title",
"=",
"sfilename",
"+",
"\" [original]\"",
"\"\\nslitlet=\"",
"+",
"str",
"(",
"islitlet",
")",
"+",
"\", grism=\"",
"+",
"grism",
"+",
"\", filter=\"",
"+",
"spfilter",
"+",
"\", rotang=\"",
"+",
"str",
"(",
"round",
"(",
"rotang",
",",
"2",
")",
")",
",",
"first_pixel",
"=",
"(",
"sltlim",
".",
"bb_nc1_orig",
",",
"sltlim",
".",
"bb_ns1_orig",
")",
",",
"debugplot",
"=",
"debugplot",
")",
"# apply 1d median filtering (along the spectral direction)",
"# to remove bad pixels",
"size_x",
"=",
"size_x_medfilt",
"size_y",
"=",
"1",
"slitlet2d_smooth",
"=",
"ndimage",
".",
"filters",
".",
"median_filter",
"(",
"slitlet2d",
",",
"size",
"=",
"(",
"size_y",
",",
"size_x",
")",
")",
"if",
"debugplot",
"%",
"10",
"!=",
"0",
":",
"ximshow",
"(",
"slitlet2d_smooth",
",",
"title",
"=",
"sfilename",
"+",
"\" [smoothed]\"",
"\"\\nslitlet=\"",
"+",
"str",
"(",
"islitlet",
")",
"+",
"\", grism=\"",
"+",
"grism",
"+",
"\", filter=\"",
"+",
"spfilter",
"+",
"\", rotang=\"",
"+",
"str",
"(",
"round",
"(",
"rotang",
",",
"2",
")",
")",
",",
"first_pixel",
"=",
"(",
"sltlim",
".",
"bb_nc1_orig",
",",
"sltlim",
".",
"bb_ns1_orig",
")",
",",
"debugplot",
"=",
"debugplot",
")",
"# apply 1d Savitzky-Golay filter (along the spatial direction)",
"# to compute first derivative",
"slitlet2d_savgol",
"=",
"savgol_filter",
"(",
"slitlet2d_smooth",
",",
"window_length",
"=",
"size_y_savgol",
",",
"polyorder",
"=",
"2",
",",
"deriv",
"=",
"1",
",",
"axis",
"=",
"0",
")",
"# compute basic statistics",
"q25",
",",
"q50",
",",
"q75",
"=",
"np",
".",
"percentile",
"(",
"slitlet2d_savgol",
",",
"q",
"=",
"[",
"25.0",
",",
"50.0",
",",
"75.0",
"]",
")",
"sigmag",
"=",
"0.7413",
"*",
"(",
"q75",
"-",
"q25",
")",
"# robust standard deviation",
"if",
"debugplot",
">=",
"10",
":",
"print",
"(",
"\"q50, sigmag:\"",
",",
"q50",
",",
"sigmag",
")",
"if",
"debugplot",
"%",
"10",
"!=",
"0",
":",
"ximshow",
"(",
"slitlet2d_savgol",
",",
"title",
"=",
"sfilename",
"+",
"\" [S.-G.filt.]\"",
"\"\\nslitlet=\"",
"+",
"str",
"(",
"islitlet",
")",
"+",
"\", grism=\"",
"+",
"grism",
"+",
"\", filter=\"",
"+",
"spfilter",
"+",
"\", rotang=\"",
"+",
"str",
"(",
"round",
"(",
"rotang",
",",
"2",
")",
")",
",",
"first_pixel",
"=",
"(",
"sltlim",
".",
"bb_nc1_orig",
",",
"sltlim",
".",
"bb_ns1_orig",
")",
",",
"z1z2",
"=",
"(",
"q50",
"-",
"times_sigma_threshold",
"*",
"sigmag",
",",
"q50",
"+",
"times_sigma_threshold",
"*",
"sigmag",
")",
",",
"debugplot",
"=",
"debugplot",
")",
"# identify objects in slitlet2d_savgol: pixels with positive",
"# derivatives are identify independently from pixels with",
"# negative derivaties; then the two set of potential features",
"# are merged; this approach avoids some problems when, in",
"# nearby regions, there are pixels with positive and negative",
"# derivatives (in those circumstances a single search as",
"# np.logical_or(",
"# slitlet2d_savgol < q50 - times_sigma_threshold * sigmag,",
"# slitlet2d_savgol > q50 + times_sigma_threshold * sigmag)",
"# led to erroneous detections!)",
"#",
"# search for positive derivatives",
"labels2d_objects_pos",
",",
"no_objects_pos",
"=",
"ndimage",
".",
"label",
"(",
"slitlet2d_savgol",
">",
"q50",
"+",
"times_sigma_threshold",
"*",
"sigmag",
")",
"# search for negative derivatives",
"labels2d_objects_neg",
",",
"no_objects_neg",
"=",
"ndimage",
".",
"label",
"(",
"slitlet2d_savgol",
"<",
"q50",
"-",
"times_sigma_threshold",
"*",
"sigmag",
")",
"# merge both sets",
"non_zero_neg",
"=",
"np",
".",
"where",
"(",
"labels2d_objects_neg",
">",
"0",
")",
"labels2d_objects",
"=",
"np",
".",
"copy",
"(",
"labels2d_objects_pos",
")",
"labels2d_objects",
"[",
"non_zero_neg",
"]",
"+=",
"labels2d_objects_neg",
"[",
"non_zero_neg",
"]",
"+",
"no_objects_pos",
"no_objects",
"=",
"no_objects_pos",
"+",
"no_objects_neg",
"if",
"debugplot",
">=",
"10",
":",
"print",
"(",
"\"Number of objects with positive derivative:\"",
",",
"no_objects_pos",
")",
"print",
"(",
"\"Number of objects with negative derivative:\"",
",",
"no_objects_neg",
")",
"print",
"(",
"\"Total number of objects initially found...:\"",
",",
"no_objects",
")",
"if",
"debugplot",
"%",
"10",
"!=",
"0",
":",
"ximshow",
"(",
"labels2d_objects",
",",
"z1z2",
"=",
"(",
"0",
",",
"no_objects",
")",
",",
"title",
"=",
"sfilename",
"+",
"\" [objects]\"",
"\"\\nslitlet=\"",
"+",
"str",
"(",
"islitlet",
")",
"+",
"\", grism=\"",
"+",
"grism",
"+",
"\", filter=\"",
"+",
"spfilter",
"+",
"\", rotang=\"",
"+",
"str",
"(",
"round",
"(",
"rotang",
",",
"2",
")",
")",
",",
"first_pixel",
"=",
"(",
"sltlim",
".",
"bb_nc1_orig",
",",
"sltlim",
".",
"bb_ns1_orig",
")",
",",
"cbar_label",
"=",
"\"Object number\"",
",",
"debugplot",
"=",
"debugplot",
")",
"# select boundaries as the largest objects found with",
"# positive and negative derivatives",
"n_der_pos",
"=",
"0",
"# number of pixels covered by the object with deriv > 0",
"i_der_pos",
"=",
"0",
"# id of the object with deriv > 0",
"n_der_neg",
"=",
"0",
"# number of pixels covered by the object with deriv < 0",
"i_der_neg",
"=",
"0",
"# id of the object with deriv < 0",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"no_objects",
"+",
"1",
")",
":",
"xy_tmp",
"=",
"np",
".",
"where",
"(",
"labels2d_objects",
"==",
"i",
")",
"n_pix",
"=",
"len",
"(",
"xy_tmp",
"[",
"0",
"]",
")",
"if",
"i",
"<=",
"no_objects_pos",
":",
"if",
"n_pix",
">",
"n_der_pos",
":",
"i_der_pos",
"=",
"i",
"n_der_pos",
"=",
"n_pix",
"else",
":",
"if",
"n_pix",
">",
"n_der_neg",
":",
"i_der_neg",
"=",
"i",
"n_der_neg",
"=",
"n_pix",
"# determine which boundary is lower and which is upper",
"y_center_mass_der_pos",
"=",
"ndimage",
".",
"center_of_mass",
"(",
"slitlet2d_savgol",
",",
"labels2d_objects",
",",
"[",
"i_der_pos",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"y_center_mass_der_neg",
"=",
"ndimage",
".",
"center_of_mass",
"(",
"slitlet2d_savgol",
",",
"labels2d_objects",
",",
"[",
"i_der_neg",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"y_center_mass_der_pos",
"<",
"y_center_mass_der_neg",
":",
"i_lower",
"=",
"i_der_pos",
"i_upper",
"=",
"i_der_neg",
"if",
"debugplot",
">=",
"10",
":",
"print",
"(",
"\"-> lower boundary has positive derivatives\"",
")",
"else",
":",
"i_lower",
"=",
"i_der_neg",
"i_upper",
"=",
"i_der_pos",
"if",
"debugplot",
">=",
"10",
":",
"print",
"(",
"\"-> lower boundary has negative derivatives\"",
")",
"list_slices_ok",
"=",
"[",
"i_lower",
",",
"i_upper",
"]",
"# adjust individual boundaries passing the selection:",
"# - select points in the image belonging to a given boundary",
"# - compute weighted mean of the pixels of the boundary, column",
"# by column (this reduces dramatically the number of points",
"# to be fitted to determine the boundary)",
"list_boundaries",
"=",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"2",
")",
":",
"# k=0 lower boundary, k=1 upper boundary",
"# select points to be fitted for a particular boundary",
"# (note: be careful with array indices and pixel",
"# coordinates)",
"xy_tmp",
"=",
"np",
".",
"where",
"(",
"labels2d_objects",
"==",
"list_slices_ok",
"[",
"k",
"]",
")",
"xmin",
"=",
"xy_tmp",
"[",
"1",
"]",
".",
"min",
"(",
")",
"# array indices (integers)",
"xmax",
"=",
"xy_tmp",
"[",
"1",
"]",
".",
"max",
"(",
")",
"# array indices (integers)",
"xfit",
"=",
"[",
"]",
"yfit",
"=",
"[",
"]",
"# fix range for fit",
"if",
"k",
"==",
"0",
":",
"xmineff",
"=",
"max",
"(",
"sltlim",
".",
"xmin_lower_boundary_fit",
",",
"xmin",
")",
"xmaxeff",
"=",
"min",
"(",
"sltlim",
".",
"xmax_lower_boundary_fit",
",",
"xmax",
")",
"else",
":",
"xmineff",
"=",
"max",
"(",
"sltlim",
".",
"xmin_upper_boundary_fit",
",",
"xmin",
")",
"xmaxeff",
"=",
"min",
"(",
"sltlim",
".",
"xmax_upper_boundary_fit",
",",
"xmax",
")",
"# loop in columns of the image belonging to the boundary",
"for",
"xdum",
"in",
"range",
"(",
"xmineff",
",",
"xmaxeff",
"+",
"1",
")",
":",
"# array indices (integer)",
"iok",
"=",
"np",
".",
"where",
"(",
"xy_tmp",
"[",
"1",
"]",
"==",
"xdum",
")",
"y_tmp",
"=",
"xy_tmp",
"[",
"0",
"]",
"[",
"iok",
"]",
"+",
"sltlim",
".",
"bb_ns1_orig",
"# image pixel",
"weight",
"=",
"slitlet2d_savgol",
"[",
"xy_tmp",
"[",
"0",
"]",
"[",
"iok",
"]",
",",
"xy_tmp",
"[",
"1",
"]",
"[",
"iok",
"]",
"]",
"y_wmean",
"=",
"sum",
"(",
"y_tmp",
"*",
"weight",
")",
"/",
"sum",
"(",
"weight",
")",
"xfit",
".",
"append",
"(",
"xdum",
"+",
"sltlim",
".",
"bb_nc1_orig",
")",
"yfit",
".",
"append",
"(",
"y_wmean",
")",
"xfit",
"=",
"np",
".",
"array",
"(",
"xfit",
")",
"yfit",
"=",
"np",
".",
"array",
"(",
"yfit",
")",
"# declare new SpectrumTrail instance",
"boundary",
"=",
"SpectrumTrail",
"(",
")",
"# define new boundary",
"boundary",
".",
"fit",
"(",
"x",
"=",
"xfit",
",",
"y",
"=",
"yfit",
",",
"deg",
"=",
"sltlim",
".",
"deg_boundary",
",",
"times_sigma_reject",
"=",
"10",
",",
"title",
"=",
"\"slit:\"",
"+",
"str",
"(",
"sltlim",
".",
"islitlet",
")",
"+",
"\", deg=\"",
"+",
"str",
"(",
"sltlim",
".",
"deg_boundary",
")",
",",
"debugplot",
"=",
"0",
")",
"list_boundaries",
".",
"append",
"(",
"boundary",
")",
"if",
"debugplot",
"%",
"10",
"!=",
"0",
":",
"for",
"tmp_img",
",",
"tmp_label",
"in",
"zip",
"(",
"[",
"slitlet2d_savgol",
",",
"slitlet2d",
"]",
",",
"[",
"' [S.-G.filt.]'",
",",
"' [original]'",
"]",
")",
":",
"ax",
"=",
"ximshow",
"(",
"tmp_img",
",",
"title",
"=",
"sfilename",
"+",
"tmp_label",
"+",
"\"\\nslitlet=\"",
"+",
"str",
"(",
"islitlet",
")",
"+",
"\", grism=\"",
"+",
"grism",
"+",
"\", filter=\"",
"+",
"spfilter",
"+",
"\", rotang=\"",
"+",
"str",
"(",
"round",
"(",
"rotang",
",",
"2",
")",
")",
",",
"first_pixel",
"=",
"(",
"sltlim",
".",
"bb_nc1_orig",
",",
"sltlim",
".",
"bb_ns1_orig",
")",
",",
"show",
"=",
"False",
",",
"debugplot",
"=",
"debugplot",
")",
"for",
"k",
"in",
"range",
"(",
"2",
")",
":",
"xpol",
",",
"ypol",
"=",
"list_boundaries",
"[",
"k",
"]",
".",
"linspace_pix",
"(",
"start",
"=",
"1",
",",
"stop",
"=",
"EMIR_NAXIS1",
")",
"ax",
".",
"plot",
"(",
"xpol",
",",
"ypol",
",",
"'b--'",
",",
"linewidth",
"=",
"1",
")",
"for",
"k",
"in",
"range",
"(",
"2",
")",
":",
"xpol",
",",
"ypol",
"=",
"list_boundaries",
"[",
"k",
"]",
".",
"linspace_pix",
"(",
")",
"ax",
".",
"plot",
"(",
"xpol",
",",
"ypol",
",",
"'g--'",
",",
"linewidth",
"=",
"4",
")",
"# show plot",
"pause_debugplot",
"(",
"debugplot",
",",
"pltshow",
"=",
"True",
")",
"# update bounddict",
"tmp_dict",
"=",
"{",
"'boundary_coef_lower'",
":",
"list_boundaries",
"[",
"0",
"]",
".",
"poly_funct",
".",
"coef",
".",
"tolist",
"(",
")",
",",
"'boundary_xmin_lower'",
":",
"list_boundaries",
"[",
"0",
"]",
".",
"xlower_line",
",",
"'boundary_xmax_lower'",
":",
"list_boundaries",
"[",
"0",
"]",
".",
"xupper_line",
",",
"'boundary_coef_upper'",
":",
"list_boundaries",
"[",
"1",
"]",
".",
"poly_funct",
".",
"coef",
".",
"tolist",
"(",
")",
",",
"'boundary_xmin_upper'",
":",
"list_boundaries",
"[",
"1",
"]",
".",
"xlower_line",
",",
"'boundary_xmax_upper'",
":",
"list_boundaries",
"[",
"1",
"]",
".",
"xupper_line",
",",
"'csu_bar_left'",
":",
"csu_config",
".",
"csu_bar_left",
"(",
"islitlet",
")",
",",
"'csu_bar_right'",
":",
"csu_config",
".",
"csu_bar_right",
"(",
"islitlet",
")",
",",
"'csu_bar_slit_center'",
":",
"csu_config",
".",
"csu_bar_slit_center",
"(",
"islitlet",
")",
",",
"'csu_bar_slit_width'",
":",
"csu_config",
".",
"csu_bar_slit_width",
"(",
"islitlet",
")",
",",
"'rotang'",
":",
"rotang",
",",
"'xdtu'",
":",
"dtu_config",
".",
"xdtu",
",",
"'ydtu'",
":",
"dtu_config",
".",
"ydtu",
",",
"'zdtu'",
":",
"dtu_config",
".",
"zdtu",
",",
"'xdtu_0'",
":",
"dtu_config",
".",
"xdtu_0",
",",
"'ydtu_0'",
":",
"dtu_config",
".",
"ydtu_0",
",",
"'zdtu_0'",
":",
"dtu_config",
".",
"zdtu_0",
",",
"'zzz_info1'",
":",
"os",
".",
"getlogin",
"(",
")",
"+",
"'@'",
"+",
"socket",
".",
"gethostname",
"(",
")",
",",
"'zzz_info2'",
":",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
"}",
"slitlet_label",
"=",
"\"slitlet\"",
"+",
"str",
"(",
"islitlet",
")",
".",
"zfill",
"(",
"2",
")",
"if",
"slitlet_label",
"not",
"in",
"bounddict",
"[",
"'contents'",
"]",
":",
"bounddict",
"[",
"'contents'",
"]",
"[",
"slitlet_label",
"]",
"=",
"{",
"}",
"bounddict",
"[",
"'contents'",
"]",
"[",
"slitlet_label",
"]",
"[",
"date_obs",
"]",
"=",
"tmp_dict",
"if",
"debugplot",
"<",
"10",
":",
"print",
"(",
"\"\"",
")"
] |
Compute slitlet boundaries using continuum lamp images.
Parameters
----------
filename : string
Input continumm lamp image.
grism : string
Grism name. It must be one in EMIR_VALID_GRISMS.
spfilter : string
Filter name. It must be one in EMIR_VALID_FILTERS.
list_slitlets : list of integers
Number of slitlets to be updated.
size_x_medfilt : int
Window in the X (spectral) direction, in pixels, to apply
the 1d median filter in order to remove bad pixels.
size_y_savgol : int
Window in the Y (spatial) direction to be used when using the
1d Savitzky-Golay filter.
times_sigma_threshold : float
Times sigma to detect peaks in derivatives.
bounddict : dictionary of dictionaries
Structure to store the boundaries.
debugplot : int
Determines whether intermediate computations and/or plots are
displayed.
|
[
"Compute",
"slitlet",
"boundaries",
"using",
"continuum",
"lamp",
"images",
"."
] |
train
|
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/tools/slitlet_boundaries_from_continuum.py#L247-L563
|
BreakingBytes/simkit
|
simkit/core/calculators.py
|
index_registry
|
def index_registry(args, reg, ts=None, idx=None):
"""
Index into a :class:`~simkit.core.Registry` to return arguments
from :class:`~simkit.core.data_sources.DataRegistry` and
:class:`~simkit.core.outputs.OutputRegistry` based on the
calculation parameter file.
:param args: Arguments field from the calculation parameter file.
:param reg: Registry in which to index to get the arguments.
:type reg: :class:`~simkit.core.data_sources.DataRegistry`,
:class:`~simkit.core.outputs.OutputRegistry`
:param ts: Time step [units of time].
:param idx: [None] Index of current time step for dynamic calculations.
Required arguments for static and dynamic calculations are specified in the
calculation parameter file by the "args" key. Arguments can be from
either the data registry or the outputs registry, which is denoted by the
"data" and "outputs" keys. Each argument is a dictionary whose key is the
name of the argument in the formula specified and whose value can be one of
the following:
* The name of the argument in the registry ::
{"args": {"outputs": {"T_bypass": "T_bypass_diode"}}}
maps the formula argument "T_bypass" to the outputs registry item
"T_bypass_diode".
* A list with the name of the argument in the registry as the first element
and a negative integer denoting the index relative to the current
timestep as the second element ::
{"args": {"data": {"T_cell": ["Tcell", -1]}}}
indexes the previous timestep of "Tcell" from the data registry.
* A list with the name of the argument in the registry as the first element
and a list of positive integers denoting the index into the item from the
registry as the second element ::
{"args": {"data": {"cov": ["bypass_diode_covariance", [2]]}}}
indexes the third element of "bypass_diode_covariance".
* A list with the name of the argument in the registry as the first
element, a negative real number denoting the time relative to the current
timestep as the second element, and the units of the time as the third ::
{"args": {"data": {"T_cell": ["Tcell", -1, 'day']}}}
indexes the entire previous day of "Tcell".
"""
# TODO: move this to new Registry method or __getitem__
# TODO: replace idx with datetime object and use timeseries to interpolate
# into data, not necessary for outputs since that will conform to idx
rargs = dict.fromkeys(args) # make dictionary from arguments
# iterate over arguments
for k, v in args.iteritems():
# var ------------------ states ------------------
# idx ===== not None ===== ======= None =======
# isconstant True False None True False None
# is_dynamic no yes yes no no no
is_dynamic = idx and not reg.isconstant.get(v)
# switch based on string type instead of sequence
if isinstance(v, basestring):
# the default assumes the current index
rargs[k] = reg[v][idx] if is_dynamic else reg[v]
elif len(v) < 3:
if reg.isconstant[v[0]]:
# only get indices specified by v[1]
# tuples interpreted as a list of indices, see
# NumPy basic indexing: Dealing with variable
# numbers of indices within programs
rargs[k] = reg[v[0]][tuple(v[1])]
elif v[1] < 0:
# specified offset from current index
rargs[k] = reg[v[0]][idx + v[1]]
else:
# get indices specified by v[1] at current index
rargs[k] = reg[v[0]][idx][tuple(v[1])]
else:
# specified timedelta from current index
dt = 1 + (v[1] * UREG(str(v[2])) / ts).item()
# TODO: deal with fractions of timestep
rargs[k] = reg[v[0]][(idx + dt):(idx + 1)]
return rargs
|
python
|
def index_registry(args, reg, ts=None, idx=None):
"""
Index into a :class:`~simkit.core.Registry` to return arguments
from :class:`~simkit.core.data_sources.DataRegistry` and
:class:`~simkit.core.outputs.OutputRegistry` based on the
calculation parameter file.
:param args: Arguments field from the calculation parameter file.
:param reg: Registry in which to index to get the arguments.
:type reg: :class:`~simkit.core.data_sources.DataRegistry`,
:class:`~simkit.core.outputs.OutputRegistry`
:param ts: Time step [units of time].
:param idx: [None] Index of current time step for dynamic calculations.
Required arguments for static and dynamic calculations are specified in the
calculation parameter file by the "args" key. Arguments can be from
either the data registry or the outputs registry, which is denoted by the
"data" and "outputs" keys. Each argument is a dictionary whose key is the
name of the argument in the formula specified and whose value can be one of
the following:
* The name of the argument in the registry ::
{"args": {"outputs": {"T_bypass": "T_bypass_diode"}}}
maps the formula argument "T_bypass" to the outputs registry item
"T_bypass_diode".
* A list with the name of the argument in the registry as the first element
and a negative integer denoting the index relative to the current
timestep as the second element ::
{"args": {"data": {"T_cell": ["Tcell", -1]}}}
indexes the previous timestep of "Tcell" from the data registry.
* A list with the name of the argument in the registry as the first element
and a list of positive integers denoting the index into the item from the
registry as the second element ::
{"args": {"data": {"cov": ["bypass_diode_covariance", [2]]}}}
indexes the third element of "bypass_diode_covariance".
* A list with the name of the argument in the registry as the first
element, a negative real number denoting the time relative to the current
timestep as the second element, and the units of the time as the third ::
{"args": {"data": {"T_cell": ["Tcell", -1, 'day']}}}
indexes the entire previous day of "Tcell".
"""
# TODO: move this to new Registry method or __getitem__
# TODO: replace idx with datetime object and use timeseries to interpolate
# into data, not necessary for outputs since that will conform to idx
rargs = dict.fromkeys(args) # make dictionary from arguments
# iterate over arguments
for k, v in args.iteritems():
# var ------------------ states ------------------
# idx ===== not None ===== ======= None =======
# isconstant True False None True False None
# is_dynamic no yes yes no no no
is_dynamic = idx and not reg.isconstant.get(v)
# switch based on string type instead of sequence
if isinstance(v, basestring):
# the default assumes the current index
rargs[k] = reg[v][idx] if is_dynamic else reg[v]
elif len(v) < 3:
if reg.isconstant[v[0]]:
# only get indices specified by v[1]
# tuples interpreted as a list of indices, see
# NumPy basic indexing: Dealing with variable
# numbers of indices within programs
rargs[k] = reg[v[0]][tuple(v[1])]
elif v[1] < 0:
# specified offset from current index
rargs[k] = reg[v[0]][idx + v[1]]
else:
# get indices specified by v[1] at current index
rargs[k] = reg[v[0]][idx][tuple(v[1])]
else:
# specified timedelta from current index
dt = 1 + (v[1] * UREG(str(v[2])) / ts).item()
# TODO: deal with fractions of timestep
rargs[k] = reg[v[0]][(idx + dt):(idx + 1)]
return rargs
|
[
"def",
"index_registry",
"(",
"args",
",",
"reg",
",",
"ts",
"=",
"None",
",",
"idx",
"=",
"None",
")",
":",
"# TODO: move this to new Registry method or __getitem__",
"# TODO: replace idx with datetime object and use timeseries to interpolate",
"# into data, not necessary for outputs since that will conform to idx",
"rargs",
"=",
"dict",
".",
"fromkeys",
"(",
"args",
")",
"# make dictionary from arguments",
"# iterate over arguments",
"for",
"k",
",",
"v",
"in",
"args",
".",
"iteritems",
"(",
")",
":",
"# var ------------------ states ------------------",
"# idx ===== not None ===== ======= None =======",
"# isconstant True False None True False None",
"# is_dynamic no yes yes no no no",
"is_dynamic",
"=",
"idx",
"and",
"not",
"reg",
".",
"isconstant",
".",
"get",
"(",
"v",
")",
"# switch based on string type instead of sequence",
"if",
"isinstance",
"(",
"v",
",",
"basestring",
")",
":",
"# the default assumes the current index",
"rargs",
"[",
"k",
"]",
"=",
"reg",
"[",
"v",
"]",
"[",
"idx",
"]",
"if",
"is_dynamic",
"else",
"reg",
"[",
"v",
"]",
"elif",
"len",
"(",
"v",
")",
"<",
"3",
":",
"if",
"reg",
".",
"isconstant",
"[",
"v",
"[",
"0",
"]",
"]",
":",
"# only get indices specified by v[1]",
"# tuples interpreted as a list of indices, see",
"# NumPy basic indexing: Dealing with variable",
"# numbers of indices within programs",
"rargs",
"[",
"k",
"]",
"=",
"reg",
"[",
"v",
"[",
"0",
"]",
"]",
"[",
"tuple",
"(",
"v",
"[",
"1",
"]",
")",
"]",
"elif",
"v",
"[",
"1",
"]",
"<",
"0",
":",
"# specified offset from current index",
"rargs",
"[",
"k",
"]",
"=",
"reg",
"[",
"v",
"[",
"0",
"]",
"]",
"[",
"idx",
"+",
"v",
"[",
"1",
"]",
"]",
"else",
":",
"# get indices specified by v[1] at current index",
"rargs",
"[",
"k",
"]",
"=",
"reg",
"[",
"v",
"[",
"0",
"]",
"]",
"[",
"idx",
"]",
"[",
"tuple",
"(",
"v",
"[",
"1",
"]",
")",
"]",
"else",
":",
"# specified timedelta from current index",
"dt",
"=",
"1",
"+",
"(",
"v",
"[",
"1",
"]",
"*",
"UREG",
"(",
"str",
"(",
"v",
"[",
"2",
"]",
")",
")",
"/",
"ts",
")",
".",
"item",
"(",
")",
"# TODO: deal with fractions of timestep",
"rargs",
"[",
"k",
"]",
"=",
"reg",
"[",
"v",
"[",
"0",
"]",
"]",
"[",
"(",
"idx",
"+",
"dt",
")",
":",
"(",
"idx",
"+",
"1",
")",
"]",
"return",
"rargs"
] |
Index into a :class:`~simkit.core.Registry` to return arguments
from :class:`~simkit.core.data_sources.DataRegistry` and
:class:`~simkit.core.outputs.OutputRegistry` based on the
calculation parameter file.
:param args: Arguments field from the calculation parameter file.
:param reg: Registry in which to index to get the arguments.
:type reg: :class:`~simkit.core.data_sources.DataRegistry`,
:class:`~simkit.core.outputs.OutputRegistry`
:param ts: Time step [units of time].
:param idx: [None] Index of current time step for dynamic calculations.
Required arguments for static and dynamic calculations are specified in the
calculation parameter file by the "args" key. Arguments can be from
either the data registry or the outputs registry, which is denoted by the
"data" and "outputs" keys. Each argument is a dictionary whose key is the
name of the argument in the formula specified and whose value can be one of
the following:
* The name of the argument in the registry ::
{"args": {"outputs": {"T_bypass": "T_bypass_diode"}}}
maps the formula argument "T_bypass" to the outputs registry item
"T_bypass_diode".
* A list with the name of the argument in the registry as the first element
and a negative integer denoting the index relative to the current
timestep as the second element ::
{"args": {"data": {"T_cell": ["Tcell", -1]}}}
indexes the previous timestep of "Tcell" from the data registry.
* A list with the name of the argument in the registry as the first element
and a list of positive integers denoting the index into the item from the
registry as the second element ::
{"args": {"data": {"cov": ["bypass_diode_covariance", [2]]}}}
indexes the third element of "bypass_diode_covariance".
* A list with the name of the argument in the registry as the first
element, a negative real number denoting the time relative to the current
timestep as the second element, and the units of the time as the third ::
{"args": {"data": {"T_cell": ["Tcell", -1, 'day']}}}
indexes the entire previous day of "Tcell".
|
[
"Index",
"into",
"a",
":",
"class",
":",
"~simkit",
".",
"core",
".",
"Registry",
"to",
"return",
"arguments",
"from",
":",
"class",
":",
"~simkit",
".",
"core",
".",
"data_sources",
".",
"DataRegistry",
"and",
":",
"class",
":",
"~simkit",
".",
"core",
".",
"outputs",
".",
"OutputRegistry",
"based",
"on",
"the",
"calculation",
"parameter",
"file",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/calculators.py#L11-L96
|
BreakingBytes/simkit
|
simkit/core/calculators.py
|
Calculator.get_covariance
|
def get_covariance(datargs, outargs, vargs, datvar, outvar):
"""
Get covariance matrix.
:param datargs: data arguments
:param outargs: output arguments
:param vargs: variable arguments
:param datvar: variance of data arguments
:param outvar: variance of output arguments
:return: covariance
"""
# number of formula arguments that are not constant
argn = len(vargs)
# number of observations must be the same for all vargs
nobs = 1
for m in xrange(argn):
a = vargs[m]
try:
a = datargs[a]
except (KeyError, TypeError):
a = outargs[a]
avar = outvar[a]
else:
avar = datvar[a]
for n in xrange(argn):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
c = avar.get(b, 0.0)
try:
nobs = max(nobs, len(c))
except (TypeError, ValueError):
LOGGER.debug('c of %s vs %s = %g', a, b, c)
# covariance matrix is initially zeros
cov = np.zeros((nobs, argn, argn))
# loop over arguments in both directions, fill in covariance
for m in xrange(argn):
a = vargs[m]
try:
a = datargs[a]
except (KeyError, TypeError):
a = outargs[a]
avar = outvar[a]
else:
avar = datvar[a]
for n in xrange(argn):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
cov[:, m, n] = avar.get(b, 0.0)
if nobs == 1:
cov = cov.squeeze() # squeeze out any extra dimensions
LOGGER.debug('covariance:\n%r', cov)
return cov
|
python
|
def get_covariance(datargs, outargs, vargs, datvar, outvar):
"""
Get covariance matrix.
:param datargs: data arguments
:param outargs: output arguments
:param vargs: variable arguments
:param datvar: variance of data arguments
:param outvar: variance of output arguments
:return: covariance
"""
# number of formula arguments that are not constant
argn = len(vargs)
# number of observations must be the same for all vargs
nobs = 1
for m in xrange(argn):
a = vargs[m]
try:
a = datargs[a]
except (KeyError, TypeError):
a = outargs[a]
avar = outvar[a]
else:
avar = datvar[a]
for n in xrange(argn):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
c = avar.get(b, 0.0)
try:
nobs = max(nobs, len(c))
except (TypeError, ValueError):
LOGGER.debug('c of %s vs %s = %g', a, b, c)
# covariance matrix is initially zeros
cov = np.zeros((nobs, argn, argn))
# loop over arguments in both directions, fill in covariance
for m in xrange(argn):
a = vargs[m]
try:
a = datargs[a]
except (KeyError, TypeError):
a = outargs[a]
avar = outvar[a]
else:
avar = datvar[a]
for n in xrange(argn):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
cov[:, m, n] = avar.get(b, 0.0)
if nobs == 1:
cov = cov.squeeze() # squeeze out any extra dimensions
LOGGER.debug('covariance:\n%r', cov)
return cov
|
[
"def",
"get_covariance",
"(",
"datargs",
",",
"outargs",
",",
"vargs",
",",
"datvar",
",",
"outvar",
")",
":",
"# number of formula arguments that are not constant",
"argn",
"=",
"len",
"(",
"vargs",
")",
"# number of observations must be the same for all vargs",
"nobs",
"=",
"1",
"for",
"m",
"in",
"xrange",
"(",
"argn",
")",
":",
"a",
"=",
"vargs",
"[",
"m",
"]",
"try",
":",
"a",
"=",
"datargs",
"[",
"a",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"a",
"=",
"outargs",
"[",
"a",
"]",
"avar",
"=",
"outvar",
"[",
"a",
"]",
"else",
":",
"avar",
"=",
"datvar",
"[",
"a",
"]",
"for",
"n",
"in",
"xrange",
"(",
"argn",
")",
":",
"b",
"=",
"vargs",
"[",
"n",
"]",
"try",
":",
"b",
"=",
"datargs",
"[",
"b",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"b",
"=",
"outargs",
"[",
"b",
"]",
"c",
"=",
"avar",
".",
"get",
"(",
"b",
",",
"0.0",
")",
"try",
":",
"nobs",
"=",
"max",
"(",
"nobs",
",",
"len",
"(",
"c",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'c of %s vs %s = %g'",
",",
"a",
",",
"b",
",",
"c",
")",
"# covariance matrix is initially zeros",
"cov",
"=",
"np",
".",
"zeros",
"(",
"(",
"nobs",
",",
"argn",
",",
"argn",
")",
")",
"# loop over arguments in both directions, fill in covariance",
"for",
"m",
"in",
"xrange",
"(",
"argn",
")",
":",
"a",
"=",
"vargs",
"[",
"m",
"]",
"try",
":",
"a",
"=",
"datargs",
"[",
"a",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"a",
"=",
"outargs",
"[",
"a",
"]",
"avar",
"=",
"outvar",
"[",
"a",
"]",
"else",
":",
"avar",
"=",
"datvar",
"[",
"a",
"]",
"for",
"n",
"in",
"xrange",
"(",
"argn",
")",
":",
"b",
"=",
"vargs",
"[",
"n",
"]",
"try",
":",
"b",
"=",
"datargs",
"[",
"b",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"b",
"=",
"outargs",
"[",
"b",
"]",
"cov",
"[",
":",
",",
"m",
",",
"n",
"]",
"=",
"avar",
".",
"get",
"(",
"b",
",",
"0.0",
")",
"if",
"nobs",
"==",
"1",
":",
"cov",
"=",
"cov",
".",
"squeeze",
"(",
")",
"# squeeze out any extra dimensions",
"LOGGER",
".",
"debug",
"(",
"'covariance:\\n%r'",
",",
"cov",
")",
"return",
"cov"
] |
Get covariance matrix.
:param datargs: data arguments
:param outargs: output arguments
:param vargs: variable arguments
:param datvar: variance of data arguments
:param outvar: variance of output arguments
:return: covariance
|
[
"Get",
"covariance",
"matrix",
"."
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/calculators.py#L106-L163
|
BreakingBytes/simkit
|
simkit/core/calculators.py
|
Calculator.calculate
|
def calculate(cls, calc, formula_reg, data_reg, out_reg,
timestep=None, idx=None):
"""
Execute calculation
:param calc: calculation, with formula, args and return keys
:type calc: dict
:param formula_reg: Registry of formulas.
:type formula_reg: :class:`~simkit.core.FormulaRegistry`
:param data_reg: Data registry.
:type data_reg: :class:`~simkit.core.data_sources.DataRegistry`
:param out_reg: Outputs registry.
:type out_reg: :class:`~simkit.core.outputs.OutputRegistry`
:param timestep: simulation interval length [time], default is ``None``
:param idx: interval index, default is ``None``
:type idx: int
"""
# get the formula-key from each static calc
formula = calc['formula'] # name of formula in calculation
func = formula_reg[formula] # formula function object
fargs = formula_reg.args.get(formula, []) # formula arguments
constants = formula_reg.isconstant.get(formula) # constant args
# formula arguments that are not constant
vargs = [] if constants is None else [a for a in fargs if a not in constants]
args = calc['args'] # calculation arguments
# separate data and output arguments
datargs, outargs = args.get('data', {}), args.get('outputs', {})
data = index_registry(datargs, data_reg, timestep, idx)
outputs = index_registry(outargs, out_reg, timestep, idx)
kwargs = dict(data, **outputs) # combined data and output args
args = [kwargs.pop(a) for a in fargs if a in kwargs]
returns = calc['returns'] # return arguments
# if constants is None then the covariance should also be None
# TODO: except other values, eg: "all" to indicate no covariance
if constants is None:
cov = None # do not propagate uncertainty
else:
# get covariance matrix
cov = cls.get_covariance(datargs, outargs, vargs,
data_reg.variance, out_reg.variance)
# update kwargs with covariance if it exists
kwargs['__covariance__'] = cov
retval = func(*args, **kwargs) # calculate function
# update output registry with covariance and jacobian
if cov is not None:
# split uncertainty and jacobian from return values
cov, jac = retval[-2:]
retval = retval[:-2]
# scale covariance
scale = np.asarray(
[1 / r.m if isinstance(r, UREG.Quantity) else 1 / r
for r in retval]
) # use magnitudes if quantities
cov = (np.swapaxes((cov.T * scale), 0, 1) * scale).T
nret = len(retval) # number of return output
for m in xrange(nret):
a = returns[m] # name in output registry
out_reg.variance[a] = {}
out_reg.uncertainty[a] = {}
out_reg.jacobian[a] = {}
for n in xrange(nret):
b = returns[n]
out_reg.variance[a][b] = cov[:, m, n]
if a == b:
unc = np.sqrt(cov[:, m, n]) * 100 * UREG.percent
out_reg.uncertainty[a][b] = unc
for n in xrange(len(vargs)):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
out_reg.jacobian[a][b] = jac[:, m, n]
LOGGER.debug('%s cov:\n%r', a, out_reg.variance[a])
LOGGER.debug('%s jac:\n%r', a, out_reg.jacobian[a])
LOGGER.debug('%s unc:\n%r', a, out_reg.uncertainty[a])
# if there's only one return value, squeeze out extra dimensions
if len(retval) == 1:
retval = retval[0]
# put return values into output registry
if len(returns) > 1:
# more than one return, zip them up
if idx is None:
out_reg.update(zip(returns, retval))
else:
for k, v in zip(returns, retval):
out_reg[k][idx] = v
else:
# only one return, get it by index at 0
if idx is None:
out_reg[returns[0]] = retval
else:
out_reg[returns[0]][idx] = retval
|
python
|
def calculate(cls, calc, formula_reg, data_reg, out_reg,
timestep=None, idx=None):
"""
Execute calculation
:param calc: calculation, with formula, args and return keys
:type calc: dict
:param formula_reg: Registry of formulas.
:type formula_reg: :class:`~simkit.core.FormulaRegistry`
:param data_reg: Data registry.
:type data_reg: :class:`~simkit.core.data_sources.DataRegistry`
:param out_reg: Outputs registry.
:type out_reg: :class:`~simkit.core.outputs.OutputRegistry`
:param timestep: simulation interval length [time], default is ``None``
:param idx: interval index, default is ``None``
:type idx: int
"""
# get the formula-key from each static calc
formula = calc['formula'] # name of formula in calculation
func = formula_reg[formula] # formula function object
fargs = formula_reg.args.get(formula, []) # formula arguments
constants = formula_reg.isconstant.get(formula) # constant args
# formula arguments that are not constant
vargs = [] if constants is None else [a for a in fargs if a not in constants]
args = calc['args'] # calculation arguments
# separate data and output arguments
datargs, outargs = args.get('data', {}), args.get('outputs', {})
data = index_registry(datargs, data_reg, timestep, idx)
outputs = index_registry(outargs, out_reg, timestep, idx)
kwargs = dict(data, **outputs) # combined data and output args
args = [kwargs.pop(a) for a in fargs if a in kwargs]
returns = calc['returns'] # return arguments
# if constants is None then the covariance should also be None
# TODO: except other values, eg: "all" to indicate no covariance
if constants is None:
cov = None # do not propagate uncertainty
else:
# get covariance matrix
cov = cls.get_covariance(datargs, outargs, vargs,
data_reg.variance, out_reg.variance)
# update kwargs with covariance if it exists
kwargs['__covariance__'] = cov
retval = func(*args, **kwargs) # calculate function
# update output registry with covariance and jacobian
if cov is not None:
# split uncertainty and jacobian from return values
cov, jac = retval[-2:]
retval = retval[:-2]
# scale covariance
scale = np.asarray(
[1 / r.m if isinstance(r, UREG.Quantity) else 1 / r
for r in retval]
) # use magnitudes if quantities
cov = (np.swapaxes((cov.T * scale), 0, 1) * scale).T
nret = len(retval) # number of return output
for m in xrange(nret):
a = returns[m] # name in output registry
out_reg.variance[a] = {}
out_reg.uncertainty[a] = {}
out_reg.jacobian[a] = {}
for n in xrange(nret):
b = returns[n]
out_reg.variance[a][b] = cov[:, m, n]
if a == b:
unc = np.sqrt(cov[:, m, n]) * 100 * UREG.percent
out_reg.uncertainty[a][b] = unc
for n in xrange(len(vargs)):
b = vargs[n]
try:
b = datargs[b]
except (KeyError, TypeError):
b = outargs[b]
out_reg.jacobian[a][b] = jac[:, m, n]
LOGGER.debug('%s cov:\n%r', a, out_reg.variance[a])
LOGGER.debug('%s jac:\n%r', a, out_reg.jacobian[a])
LOGGER.debug('%s unc:\n%r', a, out_reg.uncertainty[a])
# if there's only one return value, squeeze out extra dimensions
if len(retval) == 1:
retval = retval[0]
# put return values into output registry
if len(returns) > 1:
# more than one return, zip them up
if idx is None:
out_reg.update(zip(returns, retval))
else:
for k, v in zip(returns, retval):
out_reg[k][idx] = v
else:
# only one return, get it by index at 0
if idx is None:
out_reg[returns[0]] = retval
else:
out_reg[returns[0]][idx] = retval
|
[
"def",
"calculate",
"(",
"cls",
",",
"calc",
",",
"formula_reg",
",",
"data_reg",
",",
"out_reg",
",",
"timestep",
"=",
"None",
",",
"idx",
"=",
"None",
")",
":",
"# get the formula-key from each static calc",
"formula",
"=",
"calc",
"[",
"'formula'",
"]",
"# name of formula in calculation",
"func",
"=",
"formula_reg",
"[",
"formula",
"]",
"# formula function object",
"fargs",
"=",
"formula_reg",
".",
"args",
".",
"get",
"(",
"formula",
",",
"[",
"]",
")",
"# formula arguments",
"constants",
"=",
"formula_reg",
".",
"isconstant",
".",
"get",
"(",
"formula",
")",
"# constant args",
"# formula arguments that are not constant",
"vargs",
"=",
"[",
"]",
"if",
"constants",
"is",
"None",
"else",
"[",
"a",
"for",
"a",
"in",
"fargs",
"if",
"a",
"not",
"in",
"constants",
"]",
"args",
"=",
"calc",
"[",
"'args'",
"]",
"# calculation arguments",
"# separate data and output arguments",
"datargs",
",",
"outargs",
"=",
"args",
".",
"get",
"(",
"'data'",
",",
"{",
"}",
")",
",",
"args",
".",
"get",
"(",
"'outputs'",
",",
"{",
"}",
")",
"data",
"=",
"index_registry",
"(",
"datargs",
",",
"data_reg",
",",
"timestep",
",",
"idx",
")",
"outputs",
"=",
"index_registry",
"(",
"outargs",
",",
"out_reg",
",",
"timestep",
",",
"idx",
")",
"kwargs",
"=",
"dict",
"(",
"data",
",",
"*",
"*",
"outputs",
")",
"# combined data and output args",
"args",
"=",
"[",
"kwargs",
".",
"pop",
"(",
"a",
")",
"for",
"a",
"in",
"fargs",
"if",
"a",
"in",
"kwargs",
"]",
"returns",
"=",
"calc",
"[",
"'returns'",
"]",
"# return arguments",
"# if constants is None then the covariance should also be None",
"# TODO: except other values, eg: \"all\" to indicate no covariance",
"if",
"constants",
"is",
"None",
":",
"cov",
"=",
"None",
"# do not propagate uncertainty",
"else",
":",
"# get covariance matrix",
"cov",
"=",
"cls",
".",
"get_covariance",
"(",
"datargs",
",",
"outargs",
",",
"vargs",
",",
"data_reg",
".",
"variance",
",",
"out_reg",
".",
"variance",
")",
"# update kwargs with covariance if it exists",
"kwargs",
"[",
"'__covariance__'",
"]",
"=",
"cov",
"retval",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# calculate function",
"# update output registry with covariance and jacobian",
"if",
"cov",
"is",
"not",
"None",
":",
"# split uncertainty and jacobian from return values",
"cov",
",",
"jac",
"=",
"retval",
"[",
"-",
"2",
":",
"]",
"retval",
"=",
"retval",
"[",
":",
"-",
"2",
"]",
"# scale covariance",
"scale",
"=",
"np",
".",
"asarray",
"(",
"[",
"1",
"/",
"r",
".",
"m",
"if",
"isinstance",
"(",
"r",
",",
"UREG",
".",
"Quantity",
")",
"else",
"1",
"/",
"r",
"for",
"r",
"in",
"retval",
"]",
")",
"# use magnitudes if quantities",
"cov",
"=",
"(",
"np",
".",
"swapaxes",
"(",
"(",
"cov",
".",
"T",
"*",
"scale",
")",
",",
"0",
",",
"1",
")",
"*",
"scale",
")",
".",
"T",
"nret",
"=",
"len",
"(",
"retval",
")",
"# number of return output",
"for",
"m",
"in",
"xrange",
"(",
"nret",
")",
":",
"a",
"=",
"returns",
"[",
"m",
"]",
"# name in output registry",
"out_reg",
".",
"variance",
"[",
"a",
"]",
"=",
"{",
"}",
"out_reg",
".",
"uncertainty",
"[",
"a",
"]",
"=",
"{",
"}",
"out_reg",
".",
"jacobian",
"[",
"a",
"]",
"=",
"{",
"}",
"for",
"n",
"in",
"xrange",
"(",
"nret",
")",
":",
"b",
"=",
"returns",
"[",
"n",
"]",
"out_reg",
".",
"variance",
"[",
"a",
"]",
"[",
"b",
"]",
"=",
"cov",
"[",
":",
",",
"m",
",",
"n",
"]",
"if",
"a",
"==",
"b",
":",
"unc",
"=",
"np",
".",
"sqrt",
"(",
"cov",
"[",
":",
",",
"m",
",",
"n",
"]",
")",
"*",
"100",
"*",
"UREG",
".",
"percent",
"out_reg",
".",
"uncertainty",
"[",
"a",
"]",
"[",
"b",
"]",
"=",
"unc",
"for",
"n",
"in",
"xrange",
"(",
"len",
"(",
"vargs",
")",
")",
":",
"b",
"=",
"vargs",
"[",
"n",
"]",
"try",
":",
"b",
"=",
"datargs",
"[",
"b",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"b",
"=",
"outargs",
"[",
"b",
"]",
"out_reg",
".",
"jacobian",
"[",
"a",
"]",
"[",
"b",
"]",
"=",
"jac",
"[",
":",
",",
"m",
",",
"n",
"]",
"LOGGER",
".",
"debug",
"(",
"'%s cov:\\n%r'",
",",
"a",
",",
"out_reg",
".",
"variance",
"[",
"a",
"]",
")",
"LOGGER",
".",
"debug",
"(",
"'%s jac:\\n%r'",
",",
"a",
",",
"out_reg",
".",
"jacobian",
"[",
"a",
"]",
")",
"LOGGER",
".",
"debug",
"(",
"'%s unc:\\n%r'",
",",
"a",
",",
"out_reg",
".",
"uncertainty",
"[",
"a",
"]",
")",
"# if there's only one return value, squeeze out extra dimensions",
"if",
"len",
"(",
"retval",
")",
"==",
"1",
":",
"retval",
"=",
"retval",
"[",
"0",
"]",
"# put return values into output registry",
"if",
"len",
"(",
"returns",
")",
">",
"1",
":",
"# more than one return, zip them up",
"if",
"idx",
"is",
"None",
":",
"out_reg",
".",
"update",
"(",
"zip",
"(",
"returns",
",",
"retval",
")",
")",
"else",
":",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"returns",
",",
"retval",
")",
":",
"out_reg",
"[",
"k",
"]",
"[",
"idx",
"]",
"=",
"v",
"else",
":",
"# only one return, get it by index at 0",
"if",
"idx",
"is",
"None",
":",
"out_reg",
"[",
"returns",
"[",
"0",
"]",
"]",
"=",
"retval",
"else",
":",
"out_reg",
"[",
"returns",
"[",
"0",
"]",
"]",
"[",
"idx",
"]",
"=",
"retval"
] |
Execute calculation
:param calc: calculation, with formula, args and return keys
:type calc: dict
:param formula_reg: Registry of formulas.
:type formula_reg: :class:`~simkit.core.FormulaRegistry`
:param data_reg: Data registry.
:type data_reg: :class:`~simkit.core.data_sources.DataRegistry`
:param out_reg: Outputs registry.
:type out_reg: :class:`~simkit.core.outputs.OutputRegistry`
:param timestep: simulation interval length [time], default is ``None``
:param idx: interval index, default is ``None``
:type idx: int
|
[
"Execute",
"calculation"
] |
train
|
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/calculators.py#L166-L258
|
guaix-ucm/pyemir
|
emirdrp/recipes/image/checks.py
|
check_photometry_categorize
|
def check_photometry_categorize(x, y, levels, tags=None):
'''Put every point in its category.
levels must be sorted.'''
x = numpy.asarray(x)
y = numpy.asarray(y)
ys = y.copy()
ys.sort()
# Mean of the upper half
m = ys[len(ys) // 2:].mean()
y /= m
m = 1.0
s = ys[len(ys) // 2:].std()
result = []
if tags is None:
tags = list(six.moves.range(len(levels) + 1))
for l, t in zip(levels, tags):
indc = y < l
if indc.any():
x1 = x[indc]
y1 = y[indc]
result.append((x1, y1, t))
x = x[~indc]
y = y[~indc]
else:
result.append((x, y, tags[-1]))
return result, (m, s)
|
python
|
def check_photometry_categorize(x, y, levels, tags=None):
'''Put every point in its category.
levels must be sorted.'''
x = numpy.asarray(x)
y = numpy.asarray(y)
ys = y.copy()
ys.sort()
# Mean of the upper half
m = ys[len(ys) // 2:].mean()
y /= m
m = 1.0
s = ys[len(ys) // 2:].std()
result = []
if tags is None:
tags = list(six.moves.range(len(levels) + 1))
for l, t in zip(levels, tags):
indc = y < l
if indc.any():
x1 = x[indc]
y1 = y[indc]
result.append((x1, y1, t))
x = x[~indc]
y = y[~indc]
else:
result.append((x, y, tags[-1]))
return result, (m, s)
|
[
"def",
"check_photometry_categorize",
"(",
"x",
",",
"y",
",",
"levels",
",",
"tags",
"=",
"None",
")",
":",
"x",
"=",
"numpy",
".",
"asarray",
"(",
"x",
")",
"y",
"=",
"numpy",
".",
"asarray",
"(",
"y",
")",
"ys",
"=",
"y",
".",
"copy",
"(",
")",
"ys",
".",
"sort",
"(",
")",
"# Mean of the upper half",
"m",
"=",
"ys",
"[",
"len",
"(",
"ys",
")",
"//",
"2",
":",
"]",
".",
"mean",
"(",
")",
"y",
"/=",
"m",
"m",
"=",
"1.0",
"s",
"=",
"ys",
"[",
"len",
"(",
"ys",
")",
"//",
"2",
":",
"]",
".",
"std",
"(",
")",
"result",
"=",
"[",
"]",
"if",
"tags",
"is",
"None",
":",
"tags",
"=",
"list",
"(",
"six",
".",
"moves",
".",
"range",
"(",
"len",
"(",
"levels",
")",
"+",
"1",
")",
")",
"for",
"l",
",",
"t",
"in",
"zip",
"(",
"levels",
",",
"tags",
")",
":",
"indc",
"=",
"y",
"<",
"l",
"if",
"indc",
".",
"any",
"(",
")",
":",
"x1",
"=",
"x",
"[",
"indc",
"]",
"y1",
"=",
"y",
"[",
"indc",
"]",
"result",
".",
"append",
"(",
"(",
"x1",
",",
"y1",
",",
"t",
")",
")",
"x",
"=",
"x",
"[",
"~",
"indc",
"]",
"y",
"=",
"y",
"[",
"~",
"indc",
"]",
"else",
":",
"result",
".",
"append",
"(",
"(",
"x",
",",
"y",
",",
"tags",
"[",
"-",
"1",
"]",
")",
")",
"return",
"result",
",",
"(",
"m",
",",
"s",
")"
] |
Put every point in its category.
levels must be sorted.
|
[
"Put",
"every",
"point",
"in",
"its",
"category",
"."
] |
train
|
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/recipes/image/checks.py#L173-L203
|
IdentityPython/oidcendpoint
|
src/oidcendpoint/endpoint.py
|
Endpoint.client_authentication
|
def client_authentication(self, request, auth=None, **kwargs):
"""
Do client authentication
:param endpoint_context: A
:py:class:`oidcendpoint.endpoint_context.SrvInfo` instance
:param request: Parsed request, a self.request_cls class instance
:param authn: Authorization info
:return: client_id or raise and exception
"""
return verify_client(self.endpoint_context, request, auth)
|
python
|
def client_authentication(self, request, auth=None, **kwargs):
"""
Do client authentication
:param endpoint_context: A
:py:class:`oidcendpoint.endpoint_context.SrvInfo` instance
:param request: Parsed request, a self.request_cls class instance
:param authn: Authorization info
:return: client_id or raise and exception
"""
return verify_client(self.endpoint_context, request, auth)
|
[
"def",
"client_authentication",
"(",
"self",
",",
"request",
",",
"auth",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"verify_client",
"(",
"self",
".",
"endpoint_context",
",",
"request",
",",
"auth",
")"
] |
Do client authentication
:param endpoint_context: A
:py:class:`oidcendpoint.endpoint_context.SrvInfo` instance
:param request: Parsed request, a self.request_cls class instance
:param authn: Authorization info
:return: client_id or raise and exception
|
[
"Do",
"client",
"authentication"
] |
train
|
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/endpoint.py#L146-L157
|
IdentityPython/oidcendpoint
|
src/oidcendpoint/endpoint.py
|
Endpoint.construct
|
def construct(self, response_args, request, **kwargs):
"""
Construct the response
:param response_args: response arguments
:param request: The parsed request, a self.request_cls class instance
:param kwargs: Extra keyword arguments
:return: An instance of the self.response_cls class
"""
response_args = self.do_pre_construct(response_args, request, **kwargs)
# logger.debug("kwargs: %s" % sanitize(kwargs))
response = self.response_cls(**response_args)
return self.do_post_construct(response, request, **kwargs)
|
python
|
def construct(self, response_args, request, **kwargs):
"""
Construct the response
:param response_args: response arguments
:param request: The parsed request, a self.request_cls class instance
:param kwargs: Extra keyword arguments
:return: An instance of the self.response_cls class
"""
response_args = self.do_pre_construct(response_args, request, **kwargs)
# logger.debug("kwargs: %s" % sanitize(kwargs))
response = self.response_cls(**response_args)
return self.do_post_construct(response, request, **kwargs)
|
[
"def",
"construct",
"(",
"self",
",",
"response_args",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"response_args",
"=",
"self",
".",
"do_pre_construct",
"(",
"response_args",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
"# logger.debug(\"kwargs: %s\" % sanitize(kwargs))",
"response",
"=",
"self",
".",
"response_cls",
"(",
"*",
"*",
"response_args",
")",
"return",
"self",
".",
"do_post_construct",
"(",
"response",
",",
"request",
",",
"*",
"*",
"kwargs",
")"
] |
Construct the response
:param response_args: response arguments
:param request: The parsed request, a self.request_cls class instance
:param kwargs: Extra keyword arguments
:return: An instance of the self.response_cls class
|
[
"Construct",
"the",
"response"
] |
train
|
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/endpoint.py#L189-L203
|
guaix-ucm/pyemir
|
emirdrp/processing/wavecal/set_wv_parameters.py
|
set_wv_parameters
|
def set_wv_parameters(filter_name, grism_name):
"""Set wavelength calibration parameters for rectified images.
Parameters
----------
filter_name : str
Filter name.
grism_name : str
Grism name.
Returns
-------
wv_parameters : dictionary
Python dictionary containing relevant wavelength calibration
parameters:
- crpix1_enlarged : float
- crval1_enlarged : float
- cdelt1_enlarged : float
- naxis1_enlarged: int
- islitlet_min : int
Minimium slitlet number.
- islitlet_max : int
Maximium slitlet number.
- nbrightlines : python list
List of integers containing the number of brightlines to be
used in the initial wavelength calibration.
- poly_crval1_linear : numpy.polynomial.Polynomial instance
Polynomial providing the value of CRVAL1_linear as a function
of csu_bar_slit_center.
- poly_cdelt1_linear : numpy.polynomial.Polynomial instance
Polynomial providing the value of CDELT1_linear as a function
of csu_bar_slit_center.
"""
# protections
if filter_name not in EMIR_VALID_FILTERS:
raise ValueError('Unexpected filter_name:', filter_name)
if grism_name not in EMIR_VALID_GRISMS:
raise ValueError('Unexpected grism_name:', grism_name)
# intialize output
wv_parameters = {}
# set parameters
wv_parameters['crpix1_enlarged'] = 1.0
if grism_name == "J" and filter_name == "J":
wv_parameters['islitlet_min'] = 2
wv_parameters['islitlet_max'] = 54
wv_parameters['nbrightlines'] = [18]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
1.25122231e+04,
-4.83412417e+00,
5.31657015e-04
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
7.73411692e-01,
-3.28055653e-05,
-1.20813896e-08
])
wv_parameters['crval1_enlarged'] = 11200.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 0.77 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = EMIR_NAXIS1_ENLARGED # pixels
wv_parameters['wvmin_expected'] = None
wv_parameters['wvmax_expected'] = None
wv_parameters['wvmin_useful'] = None
wv_parameters['wvmax_useful'] = None
elif grism_name == "H" and filter_name == "H":
wv_parameters['islitlet_min'] = 2
wv_parameters['islitlet_max'] = 54
wv_parameters['nbrightlines'] = [12]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
1.65536274e+04,
-7.63517173e+00,
7.74790265e-04
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
1.21327515e+00,
1.42140078e-05,
-1.27489119e-07
])
wv_parameters['crval1_enlarged'] = 14500.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 1.2200 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = EMIR_NAXIS1_ENLARGED # pixels
wv_parameters['wvmin_expected'] = None
wv_parameters['wvmax_expected'] = None
wv_parameters['wvmin_useful'] = None
wv_parameters['wvmax_useful'] = None
elif grism_name == "K" and filter_name == "Ksp":
wv_parameters['islitlet_min'] = 2
wv_parameters['islitlet_max'] = 54
wv_parameters['nbrightlines'] = [12]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
2.21044741e+04,
-1.08737529e+01,
9.05081653e-04
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
1.72696857e+00,
2.35009351e-05,
-1.02164228e-07
])
wv_parameters['crval1_enlarged'] = 19100.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 1.7300 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = EMIR_NAXIS1_ENLARGED # pixels
wv_parameters['wvmin_expected'] = None
wv_parameters['wvmax_expected'] = None
wv_parameters['wvmin_useful'] = None
wv_parameters['wvmax_useful'] = None
elif grism_name == "LR" and filter_name == "YJ":
wv_parameters['islitlet_min'] = 4
wv_parameters['islitlet_max'] = 55
wv_parameters['nbrightlines'] = [20]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
1.04272465e+04,
-2.33176855e+01,
6.55101267e-03
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
3.49037727e+00,
1.26008332e-03,
-4.66149678e-06
])
wv_parameters['crval1_enlarged'] = 8900.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 3.5600 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = 1270 # pixels
wv_parameters['wvmin_expected'] = 4000
wv_parameters['wvmax_expected'] = 15000
wv_parameters['wvmin_useful'] = 8900
wv_parameters['wvmax_useful'] = 13400
elif grism_name == "LR" and filter_name == "HK":
wv_parameters['islitlet_min'] = 4
wv_parameters['islitlet_max'] = 55
wv_parameters['nbrightlines'] = [25]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
2.00704978e+04,
-4.07702886e+01,
-5.95247468e-03
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
6.54247758e+00,
2.09061196e-03,
-2.48206609e-06
])
wv_parameters['crval1_enlarged'] = 14500.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 6.83 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = 1435 # pixels
wv_parameters['wvmin_expected'] = 8000
wv_parameters['wvmax_expected'] = 30000
wv_parameters['wvmin_useful'] = 14500
wv_parameters['wvmax_useful'] = 24300
else:
print("filter_name..:", filter_name)
print("grism_name...:", grism_name)
raise ValueError("invalid filter_name and grism_name combination")
return wv_parameters
|
python
|
def set_wv_parameters(filter_name, grism_name):
"""Set wavelength calibration parameters for rectified images.
Parameters
----------
filter_name : str
Filter name.
grism_name : str
Grism name.
Returns
-------
wv_parameters : dictionary
Python dictionary containing relevant wavelength calibration
parameters:
- crpix1_enlarged : float
- crval1_enlarged : float
- cdelt1_enlarged : float
- naxis1_enlarged: int
- islitlet_min : int
Minimium slitlet number.
- islitlet_max : int
Maximium slitlet number.
- nbrightlines : python list
List of integers containing the number of brightlines to be
used in the initial wavelength calibration.
- poly_crval1_linear : numpy.polynomial.Polynomial instance
Polynomial providing the value of CRVAL1_linear as a function
of csu_bar_slit_center.
- poly_cdelt1_linear : numpy.polynomial.Polynomial instance
Polynomial providing the value of CDELT1_linear as a function
of csu_bar_slit_center.
"""
# protections
if filter_name not in EMIR_VALID_FILTERS:
raise ValueError('Unexpected filter_name:', filter_name)
if grism_name not in EMIR_VALID_GRISMS:
raise ValueError('Unexpected grism_name:', grism_name)
# intialize output
wv_parameters = {}
# set parameters
wv_parameters['crpix1_enlarged'] = 1.0
if grism_name == "J" and filter_name == "J":
wv_parameters['islitlet_min'] = 2
wv_parameters['islitlet_max'] = 54
wv_parameters['nbrightlines'] = [18]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
1.25122231e+04,
-4.83412417e+00,
5.31657015e-04
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
7.73411692e-01,
-3.28055653e-05,
-1.20813896e-08
])
wv_parameters['crval1_enlarged'] = 11200.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 0.77 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = EMIR_NAXIS1_ENLARGED # pixels
wv_parameters['wvmin_expected'] = None
wv_parameters['wvmax_expected'] = None
wv_parameters['wvmin_useful'] = None
wv_parameters['wvmax_useful'] = None
elif grism_name == "H" and filter_name == "H":
wv_parameters['islitlet_min'] = 2
wv_parameters['islitlet_max'] = 54
wv_parameters['nbrightlines'] = [12]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
1.65536274e+04,
-7.63517173e+00,
7.74790265e-04
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
1.21327515e+00,
1.42140078e-05,
-1.27489119e-07
])
wv_parameters['crval1_enlarged'] = 14500.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 1.2200 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = EMIR_NAXIS1_ENLARGED # pixels
wv_parameters['wvmin_expected'] = None
wv_parameters['wvmax_expected'] = None
wv_parameters['wvmin_useful'] = None
wv_parameters['wvmax_useful'] = None
elif grism_name == "K" and filter_name == "Ksp":
wv_parameters['islitlet_min'] = 2
wv_parameters['islitlet_max'] = 54
wv_parameters['nbrightlines'] = [12]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
2.21044741e+04,
-1.08737529e+01,
9.05081653e-04
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
1.72696857e+00,
2.35009351e-05,
-1.02164228e-07
])
wv_parameters['crval1_enlarged'] = 19100.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 1.7300 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = EMIR_NAXIS1_ENLARGED # pixels
wv_parameters['wvmin_expected'] = None
wv_parameters['wvmax_expected'] = None
wv_parameters['wvmin_useful'] = None
wv_parameters['wvmax_useful'] = None
elif grism_name == "LR" and filter_name == "YJ":
wv_parameters['islitlet_min'] = 4
wv_parameters['islitlet_max'] = 55
wv_parameters['nbrightlines'] = [20]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
1.04272465e+04,
-2.33176855e+01,
6.55101267e-03
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
3.49037727e+00,
1.26008332e-03,
-4.66149678e-06
])
wv_parameters['crval1_enlarged'] = 8900.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 3.5600 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = 1270 # pixels
wv_parameters['wvmin_expected'] = 4000
wv_parameters['wvmax_expected'] = 15000
wv_parameters['wvmin_useful'] = 8900
wv_parameters['wvmax_useful'] = 13400
elif grism_name == "LR" and filter_name == "HK":
wv_parameters['islitlet_min'] = 4
wv_parameters['islitlet_max'] = 55
wv_parameters['nbrightlines'] = [25]
wv_parameters['poly_crval1_linear'] = np.polynomial.Polynomial([
2.00704978e+04,
-4.07702886e+01,
-5.95247468e-03
])
wv_parameters['poly_cdelt1_linear'] = np.polynomial.Polynomial([
6.54247758e+00,
2.09061196e-03,
-2.48206609e-06
])
wv_parameters['crval1_enlarged'] = 14500.0000 # Angstroms
wv_parameters['cdelt1_enlarged'] = 6.83 # Angstroms/pixel
wv_parameters['naxis1_enlarged'] = 1435 # pixels
wv_parameters['wvmin_expected'] = 8000
wv_parameters['wvmax_expected'] = 30000
wv_parameters['wvmin_useful'] = 14500
wv_parameters['wvmax_useful'] = 24300
else:
print("filter_name..:", filter_name)
print("grism_name...:", grism_name)
raise ValueError("invalid filter_name and grism_name combination")
return wv_parameters
|
[
"def",
"set_wv_parameters",
"(",
"filter_name",
",",
"grism_name",
")",
":",
"# protections",
"if",
"filter_name",
"not",
"in",
"EMIR_VALID_FILTERS",
":",
"raise",
"ValueError",
"(",
"'Unexpected filter_name:'",
",",
"filter_name",
")",
"if",
"grism_name",
"not",
"in",
"EMIR_VALID_GRISMS",
":",
"raise",
"ValueError",
"(",
"'Unexpected grism_name:'",
",",
"grism_name",
")",
"# intialize output",
"wv_parameters",
"=",
"{",
"}",
"# set parameters",
"wv_parameters",
"[",
"'crpix1_enlarged'",
"]",
"=",
"1.0",
"if",
"grism_name",
"==",
"\"J\"",
"and",
"filter_name",
"==",
"\"J\"",
":",
"wv_parameters",
"[",
"'islitlet_min'",
"]",
"=",
"2",
"wv_parameters",
"[",
"'islitlet_max'",
"]",
"=",
"54",
"wv_parameters",
"[",
"'nbrightlines'",
"]",
"=",
"[",
"18",
"]",
"wv_parameters",
"[",
"'poly_crval1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"1.25122231e+04",
",",
"-",
"4.83412417e+00",
",",
"5.31657015e-04",
"]",
")",
"wv_parameters",
"[",
"'poly_cdelt1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"7.73411692e-01",
",",
"-",
"3.28055653e-05",
",",
"-",
"1.20813896e-08",
"]",
")",
"wv_parameters",
"[",
"'crval1_enlarged'",
"]",
"=",
"11200.0000",
"# Angstroms",
"wv_parameters",
"[",
"'cdelt1_enlarged'",
"]",
"=",
"0.77",
"# Angstroms/pixel",
"wv_parameters",
"[",
"'naxis1_enlarged'",
"]",
"=",
"EMIR_NAXIS1_ENLARGED",
"# pixels",
"wv_parameters",
"[",
"'wvmin_expected'",
"]",
"=",
"None",
"wv_parameters",
"[",
"'wvmax_expected'",
"]",
"=",
"None",
"wv_parameters",
"[",
"'wvmin_useful'",
"]",
"=",
"None",
"wv_parameters",
"[",
"'wvmax_useful'",
"]",
"=",
"None",
"elif",
"grism_name",
"==",
"\"H\"",
"and",
"filter_name",
"==",
"\"H\"",
":",
"wv_parameters",
"[",
"'islitlet_min'",
"]",
"=",
"2",
"wv_parameters",
"[",
"'islitlet_max'",
"]",
"=",
"54",
"wv_parameters",
"[",
"'nbrightlines'",
"]",
"=",
"[",
"12",
"]",
"wv_parameters",
"[",
"'poly_crval1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"1.65536274e+04",
",",
"-",
"7.63517173e+00",
",",
"7.74790265e-04",
"]",
")",
"wv_parameters",
"[",
"'poly_cdelt1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"1.21327515e+00",
",",
"1.42140078e-05",
",",
"-",
"1.27489119e-07",
"]",
")",
"wv_parameters",
"[",
"'crval1_enlarged'",
"]",
"=",
"14500.0000",
"# Angstroms",
"wv_parameters",
"[",
"'cdelt1_enlarged'",
"]",
"=",
"1.2200",
"# Angstroms/pixel",
"wv_parameters",
"[",
"'naxis1_enlarged'",
"]",
"=",
"EMIR_NAXIS1_ENLARGED",
"# pixels",
"wv_parameters",
"[",
"'wvmin_expected'",
"]",
"=",
"None",
"wv_parameters",
"[",
"'wvmax_expected'",
"]",
"=",
"None",
"wv_parameters",
"[",
"'wvmin_useful'",
"]",
"=",
"None",
"wv_parameters",
"[",
"'wvmax_useful'",
"]",
"=",
"None",
"elif",
"grism_name",
"==",
"\"K\"",
"and",
"filter_name",
"==",
"\"Ksp\"",
":",
"wv_parameters",
"[",
"'islitlet_min'",
"]",
"=",
"2",
"wv_parameters",
"[",
"'islitlet_max'",
"]",
"=",
"54",
"wv_parameters",
"[",
"'nbrightlines'",
"]",
"=",
"[",
"12",
"]",
"wv_parameters",
"[",
"'poly_crval1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"2.21044741e+04",
",",
"-",
"1.08737529e+01",
",",
"9.05081653e-04",
"]",
")",
"wv_parameters",
"[",
"'poly_cdelt1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"1.72696857e+00",
",",
"2.35009351e-05",
",",
"-",
"1.02164228e-07",
"]",
")",
"wv_parameters",
"[",
"'crval1_enlarged'",
"]",
"=",
"19100.0000",
"# Angstroms",
"wv_parameters",
"[",
"'cdelt1_enlarged'",
"]",
"=",
"1.7300",
"# Angstroms/pixel",
"wv_parameters",
"[",
"'naxis1_enlarged'",
"]",
"=",
"EMIR_NAXIS1_ENLARGED",
"# pixels",
"wv_parameters",
"[",
"'wvmin_expected'",
"]",
"=",
"None",
"wv_parameters",
"[",
"'wvmax_expected'",
"]",
"=",
"None",
"wv_parameters",
"[",
"'wvmin_useful'",
"]",
"=",
"None",
"wv_parameters",
"[",
"'wvmax_useful'",
"]",
"=",
"None",
"elif",
"grism_name",
"==",
"\"LR\"",
"and",
"filter_name",
"==",
"\"YJ\"",
":",
"wv_parameters",
"[",
"'islitlet_min'",
"]",
"=",
"4",
"wv_parameters",
"[",
"'islitlet_max'",
"]",
"=",
"55",
"wv_parameters",
"[",
"'nbrightlines'",
"]",
"=",
"[",
"20",
"]",
"wv_parameters",
"[",
"'poly_crval1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"1.04272465e+04",
",",
"-",
"2.33176855e+01",
",",
"6.55101267e-03",
"]",
")",
"wv_parameters",
"[",
"'poly_cdelt1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"3.49037727e+00",
",",
"1.26008332e-03",
",",
"-",
"4.66149678e-06",
"]",
")",
"wv_parameters",
"[",
"'crval1_enlarged'",
"]",
"=",
"8900.0000",
"# Angstroms",
"wv_parameters",
"[",
"'cdelt1_enlarged'",
"]",
"=",
"3.5600",
"# Angstroms/pixel",
"wv_parameters",
"[",
"'naxis1_enlarged'",
"]",
"=",
"1270",
"# pixels",
"wv_parameters",
"[",
"'wvmin_expected'",
"]",
"=",
"4000",
"wv_parameters",
"[",
"'wvmax_expected'",
"]",
"=",
"15000",
"wv_parameters",
"[",
"'wvmin_useful'",
"]",
"=",
"8900",
"wv_parameters",
"[",
"'wvmax_useful'",
"]",
"=",
"13400",
"elif",
"grism_name",
"==",
"\"LR\"",
"and",
"filter_name",
"==",
"\"HK\"",
":",
"wv_parameters",
"[",
"'islitlet_min'",
"]",
"=",
"4",
"wv_parameters",
"[",
"'islitlet_max'",
"]",
"=",
"55",
"wv_parameters",
"[",
"'nbrightlines'",
"]",
"=",
"[",
"25",
"]",
"wv_parameters",
"[",
"'poly_crval1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"2.00704978e+04",
",",
"-",
"4.07702886e+01",
",",
"-",
"5.95247468e-03",
"]",
")",
"wv_parameters",
"[",
"'poly_cdelt1_linear'",
"]",
"=",
"np",
".",
"polynomial",
".",
"Polynomial",
"(",
"[",
"6.54247758e+00",
",",
"2.09061196e-03",
",",
"-",
"2.48206609e-06",
"]",
")",
"wv_parameters",
"[",
"'crval1_enlarged'",
"]",
"=",
"14500.0000",
"# Angstroms",
"wv_parameters",
"[",
"'cdelt1_enlarged'",
"]",
"=",
"6.83",
"# Angstroms/pixel",
"wv_parameters",
"[",
"'naxis1_enlarged'",
"]",
"=",
"1435",
"# pixels",
"wv_parameters",
"[",
"'wvmin_expected'",
"]",
"=",
"8000",
"wv_parameters",
"[",
"'wvmax_expected'",
"]",
"=",
"30000",
"wv_parameters",
"[",
"'wvmin_useful'",
"]",
"=",
"14500",
"wv_parameters",
"[",
"'wvmax_useful'",
"]",
"=",
"24300",
"else",
":",
"print",
"(",
"\"filter_name..:\"",
",",
"filter_name",
")",
"print",
"(",
"\"grism_name...:\"",
",",
"grism_name",
")",
"raise",
"ValueError",
"(",
"\"invalid filter_name and grism_name combination\"",
")",
"return",
"wv_parameters"
] |
Set wavelength calibration parameters for rectified images.
Parameters
----------
filter_name : str
Filter name.
grism_name : str
Grism name.
Returns
-------
wv_parameters : dictionary
Python dictionary containing relevant wavelength calibration
parameters:
- crpix1_enlarged : float
- crval1_enlarged : float
- cdelt1_enlarged : float
- naxis1_enlarged: int
- islitlet_min : int
Minimium slitlet number.
- islitlet_max : int
Maximium slitlet number.
- nbrightlines : python list
List of integers containing the number of brightlines to be
used in the initial wavelength calibration.
- poly_crval1_linear : numpy.polynomial.Polynomial instance
Polynomial providing the value of CRVAL1_linear as a function
of csu_bar_slit_center.
- poly_cdelt1_linear : numpy.polynomial.Polynomial instance
Polynomial providing the value of CDELT1_linear as a function
of csu_bar_slit_center.
|
[
"Set",
"wavelength",
"calibration",
"parameters",
"for",
"rectified",
"images",
"."
] |
train
|
https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/set_wv_parameters.py#L31-L186
|
IdentityPython/oidcendpoint
|
src/oidcendpoint/oidc/authorization.py
|
inputs
|
def inputs(form_args):
"""
Creates list of input elements
"""
element = []
for name, value in form_args.items():
element.append(
'<input type="hidden" name="{}" value="{}"/>'.format(name, value))
return "\n".join(element)
|
python
|
def inputs(form_args):
"""
Creates list of input elements
"""
element = []
for name, value in form_args.items():
element.append(
'<input type="hidden" name="{}" value="{}"/>'.format(name, value))
return "\n".join(element)
|
[
"def",
"inputs",
"(",
"form_args",
")",
":",
"element",
"=",
"[",
"]",
"for",
"name",
",",
"value",
"in",
"form_args",
".",
"items",
"(",
")",
":",
"element",
".",
"append",
"(",
"'<input type=\"hidden\" name=\"{}\" value=\"{}\"/>'",
".",
"format",
"(",
"name",
",",
"value",
")",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"element",
")"
] |
Creates list of input elements
|
[
"Creates",
"list",
"of",
"input",
"elements"
] |
train
|
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L53-L61
|
IdentityPython/oidcendpoint
|
src/oidcendpoint/oidc/authorization.py
|
verify_uri
|
def verify_uri(endpoint_context, request, uri_type, client_id=None):
"""
A redirect URI
MUST NOT contain a fragment
MAY contain query component
:param endpoint_context:
:param request:
:param uri_type: redirect_uri/post_logout_redirect_uri
:return: An error response if the redirect URI is faulty otherwise
None
"""
try:
_cid = request["client_id"]
except KeyError:
_cid = client_id
if not _cid:
logger.error('No client id found')
raise UnknownClient('No client_id provided')
_redirect_uri = unquote(request[uri_type])
part = urlparse(_redirect_uri)
if part.fragment:
raise URIError("Contains fragment")
(_base, _query) = splitquery(_redirect_uri)
if _query:
_query = parse_qs(_query)
match = False
try:
values = endpoint_context.cdb[_cid]['{}s'.format(uri_type)]
except KeyError:
raise ValueError('No registered {}'.format(uri_type))
else:
for regbase, rquery in values:
# The URI MUST exactly match one of the Redirection URI
if _base == regbase:
# every registered query component must exist in the uri
if rquery:
if not _query:
raise ValueError('Missing query part')
for key, vals in rquery.items():
if key not in _query:
raise ValueError('"{}" not in query part'.format(key))
for val in vals:
if val not in _query[key]:
raise ValueError('{}={} value not in query part'.format(key, val))
# and vice versa, every query component in the uri
# must be registered
if _query:
if not rquery:
raise ValueError('No registered query part')
for key, vals in _query.items():
if key not in rquery:
raise ValueError('"{}" extra in query part'.format(key))
for val in vals:
if val not in rquery[key]:
raise ValueError('Extra {}={} value in query part'.format(key, val))
match = True
break
if not match:
raise RedirectURIError("Doesn't match any registered uris")
|
python
|
def verify_uri(endpoint_context, request, uri_type, client_id=None):
"""
A redirect URI
MUST NOT contain a fragment
MAY contain query component
:param endpoint_context:
:param request:
:param uri_type: redirect_uri/post_logout_redirect_uri
:return: An error response if the redirect URI is faulty otherwise
None
"""
try:
_cid = request["client_id"]
except KeyError:
_cid = client_id
if not _cid:
logger.error('No client id found')
raise UnknownClient('No client_id provided')
_redirect_uri = unquote(request[uri_type])
part = urlparse(_redirect_uri)
if part.fragment:
raise URIError("Contains fragment")
(_base, _query) = splitquery(_redirect_uri)
if _query:
_query = parse_qs(_query)
match = False
try:
values = endpoint_context.cdb[_cid]['{}s'.format(uri_type)]
except KeyError:
raise ValueError('No registered {}'.format(uri_type))
else:
for regbase, rquery in values:
# The URI MUST exactly match one of the Redirection URI
if _base == regbase:
# every registered query component must exist in the uri
if rquery:
if not _query:
raise ValueError('Missing query part')
for key, vals in rquery.items():
if key not in _query:
raise ValueError('"{}" not in query part'.format(key))
for val in vals:
if val not in _query[key]:
raise ValueError('{}={} value not in query part'.format(key, val))
# and vice versa, every query component in the uri
# must be registered
if _query:
if not rquery:
raise ValueError('No registered query part')
for key, vals in _query.items():
if key not in rquery:
raise ValueError('"{}" extra in query part'.format(key))
for val in vals:
if val not in rquery[key]:
raise ValueError('Extra {}={} value in query part'.format(key, val))
match = True
break
if not match:
raise RedirectURIError("Doesn't match any registered uris")
|
[
"def",
"verify_uri",
"(",
"endpoint_context",
",",
"request",
",",
"uri_type",
",",
"client_id",
"=",
"None",
")",
":",
"try",
":",
"_cid",
"=",
"request",
"[",
"\"client_id\"",
"]",
"except",
"KeyError",
":",
"_cid",
"=",
"client_id",
"if",
"not",
"_cid",
":",
"logger",
".",
"error",
"(",
"'No client id found'",
")",
"raise",
"UnknownClient",
"(",
"'No client_id provided'",
")",
"_redirect_uri",
"=",
"unquote",
"(",
"request",
"[",
"uri_type",
"]",
")",
"part",
"=",
"urlparse",
"(",
"_redirect_uri",
")",
"if",
"part",
".",
"fragment",
":",
"raise",
"URIError",
"(",
"\"Contains fragment\"",
")",
"(",
"_base",
",",
"_query",
")",
"=",
"splitquery",
"(",
"_redirect_uri",
")",
"if",
"_query",
":",
"_query",
"=",
"parse_qs",
"(",
"_query",
")",
"match",
"=",
"False",
"try",
":",
"values",
"=",
"endpoint_context",
".",
"cdb",
"[",
"_cid",
"]",
"[",
"'{}s'",
".",
"format",
"(",
"uri_type",
")",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'No registered {}'",
".",
"format",
"(",
"uri_type",
")",
")",
"else",
":",
"for",
"regbase",
",",
"rquery",
"in",
"values",
":",
"# The URI MUST exactly match one of the Redirection URI",
"if",
"_base",
"==",
"regbase",
":",
"# every registered query component must exist in the uri",
"if",
"rquery",
":",
"if",
"not",
"_query",
":",
"raise",
"ValueError",
"(",
"'Missing query part'",
")",
"for",
"key",
",",
"vals",
"in",
"rquery",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"_query",
":",
"raise",
"ValueError",
"(",
"'\"{}\" not in query part'",
".",
"format",
"(",
"key",
")",
")",
"for",
"val",
"in",
"vals",
":",
"if",
"val",
"not",
"in",
"_query",
"[",
"key",
"]",
":",
"raise",
"ValueError",
"(",
"'{}={} value not in query part'",
".",
"format",
"(",
"key",
",",
"val",
")",
")",
"# and vice versa, every query component in the uri",
"# must be registered",
"if",
"_query",
":",
"if",
"not",
"rquery",
":",
"raise",
"ValueError",
"(",
"'No registered query part'",
")",
"for",
"key",
",",
"vals",
"in",
"_query",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"rquery",
":",
"raise",
"ValueError",
"(",
"'\"{}\" extra in query part'",
".",
"format",
"(",
"key",
")",
")",
"for",
"val",
"in",
"vals",
":",
"if",
"val",
"not",
"in",
"rquery",
"[",
"key",
"]",
":",
"raise",
"ValueError",
"(",
"'Extra {}={} value in query part'",
".",
"format",
"(",
"key",
",",
"val",
")",
")",
"match",
"=",
"True",
"break",
"if",
"not",
"match",
":",
"raise",
"RedirectURIError",
"(",
"\"Doesn't match any registered uris\"",
")"
] |
A redirect URI
MUST NOT contain a fragment
MAY contain query component
:param endpoint_context:
:param request:
:param uri_type: redirect_uri/post_logout_redirect_uri
:return: An error response if the redirect URI is faulty otherwise
None
|
[
"A",
"redirect",
"URI",
"MUST",
"NOT",
"contain",
"a",
"fragment",
"MAY",
"contain",
"query",
"component"
] |
train
|
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L100-L168
|
IdentityPython/oidcendpoint
|
src/oidcendpoint/oidc/authorization.py
|
get_uri
|
def get_uri(endpoint_context, request, uri_type):
""" verify that the redirect URI is reasonable
:param endpoint_context:
:param request: The Authorization request
:param uri_type: 'redirect_uri' or 'post_logout_redirect_uri'
:return: redirect_uri
"""
if uri_type in request:
verify_uri(endpoint_context, request, uri_type)
uri = request[uri_type]
else:
try:
_specs = endpoint_context.cdb[
str(request["client_id"])]["{}s".format(uri_type)]
except KeyError:
raise ParameterError(
"Missing {} and none registered".format(uri_type))
else:
if len(_specs) > 1:
raise ParameterError(
"Missing {} and more than one registered".format(uri_type))
else:
uri = join_query(*_specs[0])
return uri
|
python
|
def get_uri(endpoint_context, request, uri_type):
""" verify that the redirect URI is reasonable
:param endpoint_context:
:param request: The Authorization request
:param uri_type: 'redirect_uri' or 'post_logout_redirect_uri'
:return: redirect_uri
"""
if uri_type in request:
verify_uri(endpoint_context, request, uri_type)
uri = request[uri_type]
else:
try:
_specs = endpoint_context.cdb[
str(request["client_id"])]["{}s".format(uri_type)]
except KeyError:
raise ParameterError(
"Missing {} and none registered".format(uri_type))
else:
if len(_specs) > 1:
raise ParameterError(
"Missing {} and more than one registered".format(uri_type))
else:
uri = join_query(*_specs[0])
return uri
|
[
"def",
"get_uri",
"(",
"endpoint_context",
",",
"request",
",",
"uri_type",
")",
":",
"if",
"uri_type",
"in",
"request",
":",
"verify_uri",
"(",
"endpoint_context",
",",
"request",
",",
"uri_type",
")",
"uri",
"=",
"request",
"[",
"uri_type",
"]",
"else",
":",
"try",
":",
"_specs",
"=",
"endpoint_context",
".",
"cdb",
"[",
"str",
"(",
"request",
"[",
"\"client_id\"",
"]",
")",
"]",
"[",
"\"{}s\"",
".",
"format",
"(",
"uri_type",
")",
"]",
"except",
"KeyError",
":",
"raise",
"ParameterError",
"(",
"\"Missing {} and none registered\"",
".",
"format",
"(",
"uri_type",
")",
")",
"else",
":",
"if",
"len",
"(",
"_specs",
")",
">",
"1",
":",
"raise",
"ParameterError",
"(",
"\"Missing {} and more than one registered\"",
".",
"format",
"(",
"uri_type",
")",
")",
"else",
":",
"uri",
"=",
"join_query",
"(",
"*",
"_specs",
"[",
"0",
"]",
")",
"return",
"uri"
] |
verify that the redirect URI is reasonable
:param endpoint_context:
:param request: The Authorization request
:param uri_type: 'redirect_uri' or 'post_logout_redirect_uri'
:return: redirect_uri
|
[
"verify",
"that",
"the",
"redirect",
"URI",
"is",
"reasonable"
] |
train
|
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L184-L209
|
IdentityPython/oidcendpoint
|
src/oidcendpoint/oidc/authorization.py
|
Authorization.post_authentication
|
def post_authentication(self, user, request, sid, **kwargs):
"""
Things that are done after a successful authentication.
:param user:
:param request:
:param sid:
:param kwargs:
:return: A dictionary with 'response_args'
"""
response_info = {}
# Do the authorization
try:
permission = self.endpoint_context.authz(
user, client_id=request['client_id'])
except ToOld as err:
return self.error_response(
response_info, 'access_denied',
'Authentication to old {}'.format(err.args))
except Exception as err:
return self.error_response(response_info, 'access_denied',
'{}'.format(err.args))
else:
try:
self.endpoint_context.sdb.update(sid, permission=permission)
except Exception as err:
return self.error_response(response_info, 'server_error',
'{}'.format(err.args))
logger.debug("response type: %s" % request["response_type"])
if self.endpoint_context.sdb.is_session_revoked(sid):
return self.error_response(response_info, "access_denied",
"Session is revoked")
response_info = create_authn_response(self, request, sid)
try:
redirect_uri = get_uri(self.endpoint_context, request, 'redirect_uri')
except (RedirectURIError, ParameterError) as err:
return self.error_response(response_info,
'invalid_request',
'{}'.format(err.args))
else:
response_info['return_uri'] = redirect_uri
# Must not use HTTP unless implicit grant type and native application
# info = self.aresp_check(response_info['response_args'], request)
# if isinstance(info, ResponseMessage):
# return info
_cookie = new_cookie(
self.endpoint_context, sub=user, sid=sid, state=request['state'],
client_id=request['client_id'],
cookie_name=self.endpoint_context.cookie_name['session'])
# Now about the response_mode. Should not be set if it's obvious
# from the response_type. Knows about 'query', 'fragment' and
# 'form_post'.
if "response_mode" in request:
try:
response_info = self.response_mode(request, **response_info)
except InvalidRequest as err:
return self.error_response(response_info,
'invalid_request',
'{}'.format(err.args))
response_info['cookie'] = [_cookie]
return response_info
|
python
|
def post_authentication(self, user, request, sid, **kwargs):
"""
Things that are done after a successful authentication.
:param user:
:param request:
:param sid:
:param kwargs:
:return: A dictionary with 'response_args'
"""
response_info = {}
# Do the authorization
try:
permission = self.endpoint_context.authz(
user, client_id=request['client_id'])
except ToOld as err:
return self.error_response(
response_info, 'access_denied',
'Authentication to old {}'.format(err.args))
except Exception as err:
return self.error_response(response_info, 'access_denied',
'{}'.format(err.args))
else:
try:
self.endpoint_context.sdb.update(sid, permission=permission)
except Exception as err:
return self.error_response(response_info, 'server_error',
'{}'.format(err.args))
logger.debug("response type: %s" % request["response_type"])
if self.endpoint_context.sdb.is_session_revoked(sid):
return self.error_response(response_info, "access_denied",
"Session is revoked")
response_info = create_authn_response(self, request, sid)
try:
redirect_uri = get_uri(self.endpoint_context, request, 'redirect_uri')
except (RedirectURIError, ParameterError) as err:
return self.error_response(response_info,
'invalid_request',
'{}'.format(err.args))
else:
response_info['return_uri'] = redirect_uri
# Must not use HTTP unless implicit grant type and native application
# info = self.aresp_check(response_info['response_args'], request)
# if isinstance(info, ResponseMessage):
# return info
_cookie = new_cookie(
self.endpoint_context, sub=user, sid=sid, state=request['state'],
client_id=request['client_id'],
cookie_name=self.endpoint_context.cookie_name['session'])
# Now about the response_mode. Should not be set if it's obvious
# from the response_type. Knows about 'query', 'fragment' and
# 'form_post'.
if "response_mode" in request:
try:
response_info = self.response_mode(request, **response_info)
except InvalidRequest as err:
return self.error_response(response_info,
'invalid_request',
'{}'.format(err.args))
response_info['cookie'] = [_cookie]
return response_info
|
[
"def",
"post_authentication",
"(",
"self",
",",
"user",
",",
"request",
",",
"sid",
",",
"*",
"*",
"kwargs",
")",
":",
"response_info",
"=",
"{",
"}",
"# Do the authorization",
"try",
":",
"permission",
"=",
"self",
".",
"endpoint_context",
".",
"authz",
"(",
"user",
",",
"client_id",
"=",
"request",
"[",
"'client_id'",
"]",
")",
"except",
"ToOld",
"as",
"err",
":",
"return",
"self",
".",
"error_response",
"(",
"response_info",
",",
"'access_denied'",
",",
"'Authentication to old {}'",
".",
"format",
"(",
"err",
".",
"args",
")",
")",
"except",
"Exception",
"as",
"err",
":",
"return",
"self",
".",
"error_response",
"(",
"response_info",
",",
"'access_denied'",
",",
"'{}'",
".",
"format",
"(",
"err",
".",
"args",
")",
")",
"else",
":",
"try",
":",
"self",
".",
"endpoint_context",
".",
"sdb",
".",
"update",
"(",
"sid",
",",
"permission",
"=",
"permission",
")",
"except",
"Exception",
"as",
"err",
":",
"return",
"self",
".",
"error_response",
"(",
"response_info",
",",
"'server_error'",
",",
"'{}'",
".",
"format",
"(",
"err",
".",
"args",
")",
")",
"logger",
".",
"debug",
"(",
"\"response type: %s\"",
"%",
"request",
"[",
"\"response_type\"",
"]",
")",
"if",
"self",
".",
"endpoint_context",
".",
"sdb",
".",
"is_session_revoked",
"(",
"sid",
")",
":",
"return",
"self",
".",
"error_response",
"(",
"response_info",
",",
"\"access_denied\"",
",",
"\"Session is revoked\"",
")",
"response_info",
"=",
"create_authn_response",
"(",
"self",
",",
"request",
",",
"sid",
")",
"try",
":",
"redirect_uri",
"=",
"get_uri",
"(",
"self",
".",
"endpoint_context",
",",
"request",
",",
"'redirect_uri'",
")",
"except",
"(",
"RedirectURIError",
",",
"ParameterError",
")",
"as",
"err",
":",
"return",
"self",
".",
"error_response",
"(",
"response_info",
",",
"'invalid_request'",
",",
"'{}'",
".",
"format",
"(",
"err",
".",
"args",
")",
")",
"else",
":",
"response_info",
"[",
"'return_uri'",
"]",
"=",
"redirect_uri",
"# Must not use HTTP unless implicit grant type and native application",
"# info = self.aresp_check(response_info['response_args'], request)",
"# if isinstance(info, ResponseMessage):",
"# return info",
"_cookie",
"=",
"new_cookie",
"(",
"self",
".",
"endpoint_context",
",",
"sub",
"=",
"user",
",",
"sid",
"=",
"sid",
",",
"state",
"=",
"request",
"[",
"'state'",
"]",
",",
"client_id",
"=",
"request",
"[",
"'client_id'",
"]",
",",
"cookie_name",
"=",
"self",
".",
"endpoint_context",
".",
"cookie_name",
"[",
"'session'",
"]",
")",
"# Now about the response_mode. Should not be set if it's obvious",
"# from the response_type. Knows about 'query', 'fragment' and",
"# 'form_post'.",
"if",
"\"response_mode\"",
"in",
"request",
":",
"try",
":",
"response_info",
"=",
"self",
".",
"response_mode",
"(",
"request",
",",
"*",
"*",
"response_info",
")",
"except",
"InvalidRequest",
"as",
"err",
":",
"return",
"self",
".",
"error_response",
"(",
"response_info",
",",
"'invalid_request'",
",",
"'{}'",
".",
"format",
"(",
"err",
".",
"args",
")",
")",
"response_info",
"[",
"'cookie'",
"]",
"=",
"[",
"_cookie",
"]",
"return",
"response_info"
] |
Things that are done after a successful authentication.
:param user:
:param request:
:param sid:
:param kwargs:
:return: A dictionary with 'response_args'
|
[
"Things",
"that",
"are",
"done",
"after",
"a",
"successful",
"authentication",
"."
] |
train
|
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/authorization.py#L568-L640
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.