id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequencelengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
sequencelengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
docstring_summary
stringclasses
1 value
parameters
stringclasses
1 value
return_statement
stringclasses
1 value
argument_list
stringclasses
1 value
identifier
stringclasses
1 value
nwo
stringclasses
1 value
score
float32
-1
-1
251,700
chovanecm/sacredboard
sacredboard/bootstrap.py
add_mongo_config_simple
def add_mongo_config_simple(app, connection_string, collection_name): """ Configure the app to use MongoDB. :param app: Flask Application :type app: Flask :param connection_string: in format host:port:database or database (default: sacred) :type connection_string: str :param collection_name: Name of the collection :type collection_name: str """ split_string = connection_string.split(":") config = {"host": "localhost", "port": 27017, "db": "sacred"} if len(split_string) > 0 and len(split_string[-1]) > 0: config["db"] = split_string[-1] if len(split_string) > 1: config["port"] = int(split_string[-2]) if len(split_string) > 2: config["host"] = split_string[-3] app.config["data"] = PyMongoDataAccess.build_data_access( config["host"], config["port"], config["db"], collection_name)
python
def add_mongo_config_simple(app, connection_string, collection_name): """ Configure the app to use MongoDB. :param app: Flask Application :type app: Flask :param connection_string: in format host:port:database or database (default: sacred) :type connection_string: str :param collection_name: Name of the collection :type collection_name: str """ split_string = connection_string.split(":") config = {"host": "localhost", "port": 27017, "db": "sacred"} if len(split_string) > 0 and len(split_string[-1]) > 0: config["db"] = split_string[-1] if len(split_string) > 1: config["port"] = int(split_string[-2]) if len(split_string) > 2: config["host"] = split_string[-3] app.config["data"] = PyMongoDataAccess.build_data_access( config["host"], config["port"], config["db"], collection_name)
[ "def", "add_mongo_config_simple", "(", "app", ",", "connection_string", ",", "collection_name", ")", ":", "split_string", "=", "connection_string", ".", "split", "(", "\":\"", ")", "config", "=", "{", "\"host\"", ":", "\"localhost\"", ",", "\"port\"", ":", "27017", ",", "\"db\"", ":", "\"sacred\"", "}", "if", "len", "(", "split_string", ")", ">", "0", "and", "len", "(", "split_string", "[", "-", "1", "]", ")", ">", "0", ":", "config", "[", "\"db\"", "]", "=", "split_string", "[", "-", "1", "]", "if", "len", "(", "split_string", ")", ">", "1", ":", "config", "[", "\"port\"", "]", "=", "int", "(", "split_string", "[", "-", "2", "]", ")", "if", "len", "(", "split_string", ")", ">", "2", ":", "config", "[", "\"host\"", "]", "=", "split_string", "[", "-", "3", "]", "app", ".", "config", "[", "\"data\"", "]", "=", "PyMongoDataAccess", ".", "build_data_access", "(", "config", "[", "\"host\"", "]", ",", "config", "[", "\"port\"", "]", ",", "config", "[", "\"db\"", "]", ",", "collection_name", ")" ]
Configure the app to use MongoDB. :param app: Flask Application :type app: Flask :param connection_string: in format host:port:database or database (default: sacred) :type connection_string: str :param collection_name: Name of the collection :type collection_name: str
[ "Configure", "the", "app", "to", "use", "MongoDB", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/bootstrap.py#L161-L183
-1
251,701
chovanecm/sacredboard
sacredboard/bootstrap.py
add_mongo_config_with_uri
def add_mongo_config_with_uri(app, connection_string_uri, database_name, collection_name): """ Configure PyMongo with a MongoDB connection string. :param app: Flask application :param connection_string_uri: MongoDB connection string :param database_name: Sacred database name :param collection_name: Sacred's collection with runs :return: """ app.config["data"] = PyMongoDataAccess.build_data_access_with_uri( connection_string_uri, database_name, collection_name )
python
def add_mongo_config_with_uri(app, connection_string_uri, database_name, collection_name): """ Configure PyMongo with a MongoDB connection string. :param app: Flask application :param connection_string_uri: MongoDB connection string :param database_name: Sacred database name :param collection_name: Sacred's collection with runs :return: """ app.config["data"] = PyMongoDataAccess.build_data_access_with_uri( connection_string_uri, database_name, collection_name )
[ "def", "add_mongo_config_with_uri", "(", "app", ",", "connection_string_uri", ",", "database_name", ",", "collection_name", ")", ":", "app", ".", "config", "[", "\"data\"", "]", "=", "PyMongoDataAccess", ".", "build_data_access_with_uri", "(", "connection_string_uri", ",", "database_name", ",", "collection_name", ")" ]
Configure PyMongo with a MongoDB connection string. :param app: Flask application :param connection_string_uri: MongoDB connection string :param database_name: Sacred database name :param collection_name: Sacred's collection with runs :return:
[ "Configure", "PyMongo", "with", "a", "MongoDB", "connection", "string", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/bootstrap.py#L186-L199
-1
251,702
chovanecm/sacredboard
sacredboard/app/process/tensorboard.py
stop_all_tensorboards
def stop_all_tensorboards(): """Terminate all TensorBoard instances.""" for process in Process.instances: print("Process '%s', running %d" % (process.command[0], process.is_running())) if process.is_running() and process.command[0] == "tensorboard": process.terminate()
python
def stop_all_tensorboards(): """Terminate all TensorBoard instances.""" for process in Process.instances: print("Process '%s', running %d" % (process.command[0], process.is_running())) if process.is_running() and process.command[0] == "tensorboard": process.terminate()
[ "def", "stop_all_tensorboards", "(", ")", ":", "for", "process", "in", "Process", ".", "instances", ":", "print", "(", "\"Process '%s', running %d\"", "%", "(", "process", ".", "command", "[", "0", "]", ",", "process", ".", "is_running", "(", ")", ")", ")", "if", "process", ".", "is_running", "(", ")", "and", "process", ".", "command", "[", "0", "]", "==", "\"tensorboard\"", ":", "process", ".", "terminate", "(", ")" ]
Terminate all TensorBoard instances.
[ "Terminate", "all", "TensorBoard", "instances", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/process/tensorboard.py#L11-L17
-1
251,703
chovanecm/sacredboard
sacredboard/app/process/tensorboard.py
run_tensorboard
def run_tensorboard(logdir, listen_on="0.0.0.0", port=0, tensorboard_args=None, timeout=10): """ Launch a new TensorBoard instance. :param logdir: Path to a TensorFlow summary directory :param listen_on: The IP address TensorBoard should listen on. :param port: Port number to listen on. 0 for a random port. :param tensorboard_args: Additional TensorBoard arguments. :param timeout: Timeout after which the Timeout :type timeout: float :return: Returns the port TensorBoard is listening on. :raise UnexpectedOutputError :raise TensorboardNotFoundError :raise TimeoutError """ if tensorboard_args is None: tensorboard_args = [] tensorboard_instance = Process.create_process( TENSORBOARD_BINARY.split(" ") + ["--logdir", logdir, "--host", listen_on, "--port", str(port)] + tensorboard_args) try: tensorboard_instance.run() except FileNotFoundError as ex: raise TensorboardNotFoundError(ex) # Wait for a message that signaliezes start of Tensorboard start = time.time() data = "" while time.time() - start < timeout: line = tensorboard_instance.read_line_stderr(time_limit=timeout) data += line if "at http://" in line: port = parse_port_from_tensorboard_output(line) # Good case return port elif "TensorBoard attempted to bind to port" in line: break tensorboard_instance.terminate() raise UnexpectedOutputError( data, expected="Confirmation that Tensorboard has started" )
python
def run_tensorboard(logdir, listen_on="0.0.0.0", port=0, tensorboard_args=None, timeout=10): """ Launch a new TensorBoard instance. :param logdir: Path to a TensorFlow summary directory :param listen_on: The IP address TensorBoard should listen on. :param port: Port number to listen on. 0 for a random port. :param tensorboard_args: Additional TensorBoard arguments. :param timeout: Timeout after which the Timeout :type timeout: float :return: Returns the port TensorBoard is listening on. :raise UnexpectedOutputError :raise TensorboardNotFoundError :raise TimeoutError """ if tensorboard_args is None: tensorboard_args = [] tensorboard_instance = Process.create_process( TENSORBOARD_BINARY.split(" ") + ["--logdir", logdir, "--host", listen_on, "--port", str(port)] + tensorboard_args) try: tensorboard_instance.run() except FileNotFoundError as ex: raise TensorboardNotFoundError(ex) # Wait for a message that signaliezes start of Tensorboard start = time.time() data = "" while time.time() - start < timeout: line = tensorboard_instance.read_line_stderr(time_limit=timeout) data += line if "at http://" in line: port = parse_port_from_tensorboard_output(line) # Good case return port elif "TensorBoard attempted to bind to port" in line: break tensorboard_instance.terminate() raise UnexpectedOutputError( data, expected="Confirmation that Tensorboard has started" )
[ "def", "run_tensorboard", "(", "logdir", ",", "listen_on", "=", "\"0.0.0.0\"", ",", "port", "=", "0", ",", "tensorboard_args", "=", "None", ",", "timeout", "=", "10", ")", ":", "if", "tensorboard_args", "is", "None", ":", "tensorboard_args", "=", "[", "]", "tensorboard_instance", "=", "Process", ".", "create_process", "(", "TENSORBOARD_BINARY", ".", "split", "(", "\" \"", ")", "+", "[", "\"--logdir\"", ",", "logdir", ",", "\"--host\"", ",", "listen_on", ",", "\"--port\"", ",", "str", "(", "port", ")", "]", "+", "tensorboard_args", ")", "try", ":", "tensorboard_instance", ".", "run", "(", ")", "except", "FileNotFoundError", "as", "ex", ":", "raise", "TensorboardNotFoundError", "(", "ex", ")", "# Wait for a message that signaliezes start of Tensorboard", "start", "=", "time", ".", "time", "(", ")", "data", "=", "\"\"", "while", "time", ".", "time", "(", ")", "-", "start", "<", "timeout", ":", "line", "=", "tensorboard_instance", ".", "read_line_stderr", "(", "time_limit", "=", "timeout", ")", "data", "+=", "line", "if", "\"at http://\"", "in", "line", ":", "port", "=", "parse_port_from_tensorboard_output", "(", "line", ")", "# Good case", "return", "port", "elif", "\"TensorBoard attempted to bind to port\"", "in", "line", ":", "break", "tensorboard_instance", ".", "terminate", "(", ")", "raise", "UnexpectedOutputError", "(", "data", ",", "expected", "=", "\"Confirmation that Tensorboard has started\"", ")" ]
Launch a new TensorBoard instance. :param logdir: Path to a TensorFlow summary directory :param listen_on: The IP address TensorBoard should listen on. :param port: Port number to listen on. 0 for a random port. :param tensorboard_args: Additional TensorBoard arguments. :param timeout: Timeout after which the Timeout :type timeout: float :return: Returns the port TensorBoard is listening on. :raise UnexpectedOutputError :raise TensorboardNotFoundError :raise TimeoutError
[ "Launch", "a", "new", "TensorBoard", "instance", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/process/tensorboard.py#L26-L68
-1
251,704
chovanecm/sacredboard
sacredboard/app/process/tensorboard.py
parse_port_from_tensorboard_output
def parse_port_from_tensorboard_output(tensorboard_output: str) -> int: """ Parse tensorboard port from its outputted message. :param tensorboard_output: Output message of Tensorboard in format TensorBoard 1.8.0 at http://martin-VirtualBox:36869 :return: Returns the port TensorBoard is listening on. :raise UnexpectedOutputError """ search = re.search("at http://[^:]+:([0-9]+)", tensorboard_output) if search is not None: port = search.group(1) return int(port) else: raise UnexpectedOutputError(tensorboard_output, "Address and port where Tensorboard has started," " e.g. TensorBoard 1.8.0 at http://martin-VirtualBox:36869")
python
def parse_port_from_tensorboard_output(tensorboard_output: str) -> int: """ Parse tensorboard port from its outputted message. :param tensorboard_output: Output message of Tensorboard in format TensorBoard 1.8.0 at http://martin-VirtualBox:36869 :return: Returns the port TensorBoard is listening on. :raise UnexpectedOutputError """ search = re.search("at http://[^:]+:([0-9]+)", tensorboard_output) if search is not None: port = search.group(1) return int(port) else: raise UnexpectedOutputError(tensorboard_output, "Address and port where Tensorboard has started," " e.g. TensorBoard 1.8.0 at http://martin-VirtualBox:36869")
[ "def", "parse_port_from_tensorboard_output", "(", "tensorboard_output", ":", "str", ")", "->", "int", ":", "search", "=", "re", ".", "search", "(", "\"at http://[^:]+:([0-9]+)\"", ",", "tensorboard_output", ")", "if", "search", "is", "not", "None", ":", "port", "=", "search", ".", "group", "(", "1", ")", "return", "int", "(", "port", ")", "else", ":", "raise", "UnexpectedOutputError", "(", "tensorboard_output", ",", "\"Address and port where Tensorboard has started,\"", "\" e.g. TensorBoard 1.8.0 at http://martin-VirtualBox:36869\"", ")" ]
Parse tensorboard port from its outputted message. :param tensorboard_output: Output message of Tensorboard in format TensorBoard 1.8.0 at http://martin-VirtualBox:36869 :return: Returns the port TensorBoard is listening on. :raise UnexpectedOutputError
[ "Parse", "tensorboard", "port", "from", "its", "outputted", "message", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/process/tensorboard.py#L71-L86
-1
251,705
chovanecm/sacredboard
sacredboard/app/data/pymongo/mongodb.py
PyMongoDataAccess.connect
def connect(self): """Initialize the database connection.""" self._client = self._create_client() self._db = getattr(self._client, self._db_name) self._generic_dao = GenericDAO(self._client, self._db_name)
python
def connect(self): """Initialize the database connection.""" self._client = self._create_client() self._db = getattr(self._client, self._db_name) self._generic_dao = GenericDAO(self._client, self._db_name)
[ "def", "connect", "(", "self", ")", ":", "self", ".", "_client", "=", "self", ".", "_create_client", "(", ")", "self", ".", "_db", "=", "getattr", "(", "self", ".", "_client", ",", "self", ".", "_db_name", ")", "self", ".", "_generic_dao", "=", "GenericDAO", "(", "self", ".", "_client", ",", "self", ".", "_db_name", ")" ]
Initialize the database connection.
[ "Initialize", "the", "database", "connection", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/pymongo/mongodb.py#L44-L48
-1
251,706
chovanecm/sacredboard
sacredboard/app/data/pymongo/mongodb.py
PyMongoDataAccess.build_data_access
def build_data_access(host, port, database_name, collection_name): """ Create data access gateway. :param host: The database server to connect to. :type host: str :param port: Database port. :type port: int :param database_name: Database name. :type database_name: str :param collection_name: Name of the collection with Sacred runs. :type collection_name: str """ return PyMongoDataAccess("mongodb://%s:%d" % (host, port), database_name, collection_name)
python
def build_data_access(host, port, database_name, collection_name): """ Create data access gateway. :param host: The database server to connect to. :type host: str :param port: Database port. :type port: int :param database_name: Database name. :type database_name: str :param collection_name: Name of the collection with Sacred runs. :type collection_name: str """ return PyMongoDataAccess("mongodb://%s:%d" % (host, port), database_name, collection_name)
[ "def", "build_data_access", "(", "host", ",", "port", ",", "database_name", ",", "collection_name", ")", ":", "return", "PyMongoDataAccess", "(", "\"mongodb://%s:%d\"", "%", "(", "host", ",", "port", ")", ",", "database_name", ",", "collection_name", ")" ]
Create data access gateway. :param host: The database server to connect to. :type host: str :param port: Database port. :type port: int :param database_name: Database name. :type database_name: str :param collection_name: Name of the collection with Sacred runs. :type collection_name: str
[ "Create", "data", "access", "gateway", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/pymongo/mongodb.py#L55-L69
-1
251,707
chovanecm/sacredboard
sacredboard/app/webapi/routes.py
run_tensorboard
def run_tensorboard(run_id, tflog_id): """Launch TensorBoard for a given run ID and log ID of that run.""" data = current_app.config["data"] # optimisticaly suppose the run exists... run = data.get_run_dao().get(run_id) base_dir = Path(run["experiment"]["base_dir"]) log_dir = Path(run["info"]["tensorflow"]["logdirs"][tflog_id]) # TODO ugly!!! if log_dir.is_absolute(): path_to_log_dir = log_dir else: path_to_log_dir = base_dir.joinpath(log_dir) port = int(tensorboard.run_tensorboard(str(path_to_log_dir))) url_root = request.url_root url_parts = re.search("://([^:/]+)", url_root) redirect_to_address = url_parts.group(1) return redirect("http://%s:%d" % (redirect_to_address, port))
python
def run_tensorboard(run_id, tflog_id): """Launch TensorBoard for a given run ID and log ID of that run.""" data = current_app.config["data"] # optimisticaly suppose the run exists... run = data.get_run_dao().get(run_id) base_dir = Path(run["experiment"]["base_dir"]) log_dir = Path(run["info"]["tensorflow"]["logdirs"][tflog_id]) # TODO ugly!!! if log_dir.is_absolute(): path_to_log_dir = log_dir else: path_to_log_dir = base_dir.joinpath(log_dir) port = int(tensorboard.run_tensorboard(str(path_to_log_dir))) url_root = request.url_root url_parts = re.search("://([^:/]+)", url_root) redirect_to_address = url_parts.group(1) return redirect("http://%s:%d" % (redirect_to_address, port))
[ "def", "run_tensorboard", "(", "run_id", ",", "tflog_id", ")", ":", "data", "=", "current_app", ".", "config", "[", "\"data\"", "]", "# optimisticaly suppose the run exists...", "run", "=", "data", ".", "get_run_dao", "(", ")", ".", "get", "(", "run_id", ")", "base_dir", "=", "Path", "(", "run", "[", "\"experiment\"", "]", "[", "\"base_dir\"", "]", ")", "log_dir", "=", "Path", "(", "run", "[", "\"info\"", "]", "[", "\"tensorflow\"", "]", "[", "\"logdirs\"", "]", "[", "tflog_id", "]", ")", "# TODO ugly!!!", "if", "log_dir", ".", "is_absolute", "(", ")", ":", "path_to_log_dir", "=", "log_dir", "else", ":", "path_to_log_dir", "=", "base_dir", ".", "joinpath", "(", "log_dir", ")", "port", "=", "int", "(", "tensorboard", ".", "run_tensorboard", "(", "str", "(", "path_to_log_dir", ")", ")", ")", "url_root", "=", "request", ".", "url_root", "url_parts", "=", "re", ".", "search", "(", "\"://([^:/]+)\"", ",", "url_root", ")", "redirect_to_address", "=", "url_parts", ".", "group", "(", "1", ")", "return", "redirect", "(", "\"http://%s:%d\"", "%", "(", "redirect_to_address", ",", "port", ")", ")" ]
Launch TensorBoard for a given run ID and log ID of that run.
[ "Launch", "TensorBoard", "for", "a", "given", "run", "ID", "and", "log", "ID", "of", "that", "run", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/webapi/routes.py#L39-L56
-1
251,708
chovanecm/sacredboard
sacredboard/app/data/pymongo/metricsdao.py
MongoMetricsDAO.get
def get(self, run_id, metric_id): """ Read a metric of the given id and run. The returned object has the following format (timestamps are datetime objects). .. code:: {"steps": [0,1,20,40,...], "timestamps": [timestamp1,timestamp2,timestamp3,...], "values": [0,1 2,3,4,5,6,...], "name": "name of the metric", "metric_id": "metric_id", "run_id": "run_id"} :param run_id: ID of the Run that the metric belongs to. :param metric_id: The ID fo the metric. :return: The whole metric as specified. :raise NotFoundError """ run_id = self._parse_run_id(run_id) query = self._build_query(run_id, metric_id) row = self._read_metric_from_db(metric_id, run_id, query) metric = self._to_intermediary_object(row) return metric
python
def get(self, run_id, metric_id): """ Read a metric of the given id and run. The returned object has the following format (timestamps are datetime objects). .. code:: {"steps": [0,1,20,40,...], "timestamps": [timestamp1,timestamp2,timestamp3,...], "values": [0,1 2,3,4,5,6,...], "name": "name of the metric", "metric_id": "metric_id", "run_id": "run_id"} :param run_id: ID of the Run that the metric belongs to. :param metric_id: The ID fo the metric. :return: The whole metric as specified. :raise NotFoundError """ run_id = self._parse_run_id(run_id) query = self._build_query(run_id, metric_id) row = self._read_metric_from_db(metric_id, run_id, query) metric = self._to_intermediary_object(row) return metric
[ "def", "get", "(", "self", ",", "run_id", ",", "metric_id", ")", ":", "run_id", "=", "self", ".", "_parse_run_id", "(", "run_id", ")", "query", "=", "self", ".", "_build_query", "(", "run_id", ",", "metric_id", ")", "row", "=", "self", ".", "_read_metric_from_db", "(", "metric_id", ",", "run_id", ",", "query", ")", "metric", "=", "self", ".", "_to_intermediary_object", "(", "row", ")", "return", "metric" ]
Read a metric of the given id and run. The returned object has the following format (timestamps are datetime objects). .. code:: {"steps": [0,1,20,40,...], "timestamps": [timestamp1,timestamp2,timestamp3,...], "values": [0,1 2,3,4,5,6,...], "name": "name of the metric", "metric_id": "metric_id", "run_id": "run_id"} :param run_id: ID of the Run that the metric belongs to. :param metric_id: The ID fo the metric. :return: The whole metric as specified. :raise NotFoundError
[ "Read", "a", "metric", "of", "the", "given", "id", "and", "run", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/pymongo/metricsdao.py#L29-L55
-1
251,709
chovanecm/sacredboard
sacredboard/app/data/pymongo/metricsdao.py
MongoMetricsDAO.delete
def delete(self, run_id): """ Delete all metrics belonging to the given run. :param run_id: ID of the Run that the metric belongs to. """ self.generic_dao.delete_record( self.metrics_collection_name, {"run_id": self._parse_run_id(run_id)})
python
def delete(self, run_id): """ Delete all metrics belonging to the given run. :param run_id: ID of the Run that the metric belongs to. """ self.generic_dao.delete_record( self.metrics_collection_name, {"run_id": self._parse_run_id(run_id)})
[ "def", "delete", "(", "self", ",", "run_id", ")", ":", "self", ".", "generic_dao", ".", "delete_record", "(", "self", ".", "metrics_collection_name", ",", "{", "\"run_id\"", ":", "self", ".", "_parse_run_id", "(", "run_id", ")", "}", ")" ]
Delete all metrics belonging to the given run. :param run_id: ID of the Run that the metric belongs to.
[ "Delete", "all", "metrics", "belonging", "to", "the", "given", "run", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/pymongo/metricsdao.py#L57-L65
-1
251,710
chovanecm/sacredboard
sacredboard/app/business/runfacade.py
RunFacade.delete_run
def delete_run(self, run_id): """ Delete run of the given run_id. :raise NotImplementedError If not supported by the backend. :raise DataSourceError General data source error. :raise NotFoundError The run was not found. (Some backends may succeed even if the run does not exist. """ ds = self.datastorage ds.get_metrics_dao().delete(run_id) # TODO: implement # ds.get_artifact_dao().delete(run_id) # ds.get_resource_dao().delete(run_id) ds.get_run_dao().delete(run_id)
python
def delete_run(self, run_id): """ Delete run of the given run_id. :raise NotImplementedError If not supported by the backend. :raise DataSourceError General data source error. :raise NotFoundError The run was not found. (Some backends may succeed even if the run does not exist. """ ds = self.datastorage ds.get_metrics_dao().delete(run_id) # TODO: implement # ds.get_artifact_dao().delete(run_id) # ds.get_resource_dao().delete(run_id) ds.get_run_dao().delete(run_id)
[ "def", "delete_run", "(", "self", ",", "run_id", ")", ":", "ds", "=", "self", ".", "datastorage", "ds", ".", "get_metrics_dao", "(", ")", ".", "delete", "(", "run_id", ")", "# TODO: implement", "# ds.get_artifact_dao().delete(run_id)", "# ds.get_resource_dao().delete(run_id)", "ds", ".", "get_run_dao", "(", ")", ".", "delete", "(", "run_id", ")" ]
Delete run of the given run_id. :raise NotImplementedError If not supported by the backend. :raise DataSourceError General data source error. :raise NotFoundError The run was not found. (Some backends may succeed even if the run does not exist.
[ "Delete", "run", "of", "the", "given", "run_id", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/business/runfacade.py#L11-L24
-1
251,711
chovanecm/sacredboard
sacredboard/app/data/filestorage/rundao.py
FileStoreRunDAO.get_runs
def get_runs(self, sort_by=None, sort_direction=None, start=0, limit=None, query={"type": "and", "filters": []}): """ Return all runs in the file store. If a run is corrupt, e.g. missing files, it is skipped. :param sort_by: NotImplemented :param sort_direction: NotImplemented :param start: NotImplemented :param limit: NotImplemented :param query: NotImplemented :return: FileStoreCursor """ all_run_ids = os.listdir(self.directory) def run_iterator(): blacklist = set(["_sources"]) for id in all_run_ids: if id in blacklist: continue try: yield self.get(id) except FileNotFoundError: # An incomplete experiment is a corrupt experiment. # Skip it for now. # TODO pass count = len(all_run_ids) return FileStoreCursor(count, run_iterator())
python
def get_runs(self, sort_by=None, sort_direction=None, start=0, limit=None, query={"type": "and", "filters": []}): """ Return all runs in the file store. If a run is corrupt, e.g. missing files, it is skipped. :param sort_by: NotImplemented :param sort_direction: NotImplemented :param start: NotImplemented :param limit: NotImplemented :param query: NotImplemented :return: FileStoreCursor """ all_run_ids = os.listdir(self.directory) def run_iterator(): blacklist = set(["_sources"]) for id in all_run_ids: if id in blacklist: continue try: yield self.get(id) except FileNotFoundError: # An incomplete experiment is a corrupt experiment. # Skip it for now. # TODO pass count = len(all_run_ids) return FileStoreCursor(count, run_iterator())
[ "def", "get_runs", "(", "self", ",", "sort_by", "=", "None", ",", "sort_direction", "=", "None", ",", "start", "=", "0", ",", "limit", "=", "None", ",", "query", "=", "{", "\"type\"", ":", "\"and\"", ",", "\"filters\"", ":", "[", "]", "}", ")", ":", "all_run_ids", "=", "os", ".", "listdir", "(", "self", ".", "directory", ")", "def", "run_iterator", "(", ")", ":", "blacklist", "=", "set", "(", "[", "\"_sources\"", "]", ")", "for", "id", "in", "all_run_ids", ":", "if", "id", "in", "blacklist", ":", "continue", "try", ":", "yield", "self", ".", "get", "(", "id", ")", "except", "FileNotFoundError", ":", "# An incomplete experiment is a corrupt experiment.", "# Skip it for now.", "# TODO", "pass", "count", "=", "len", "(", "all_run_ids", ")", "return", "FileStoreCursor", "(", "count", ",", "run_iterator", "(", ")", ")" ]
Return all runs in the file store. If a run is corrupt, e.g. missing files, it is skipped. :param sort_by: NotImplemented :param sort_direction: NotImplemented :param start: NotImplemented :param limit: NotImplemented :param query: NotImplemented :return: FileStoreCursor
[ "Return", "all", "runs", "in", "the", "file", "store", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/filestorage/rundao.py#L20-L49
-1
251,712
chovanecm/sacredboard
sacredboard/app/data/filestorage/rundao.py
FileStoreRunDAO.get
def get(self, run_id): """ Return the run associated with a particular `run_id`. :param run_id: :return: dict :raises FileNotFoundError """ config = _read_json(_path_to_config(self.directory, run_id)) run = _read_json(_path_to_run(self.directory, run_id)) try: info = _read_json(_path_to_info(self.directory, run_id)) except IOError: info = {} return _create_run(run_id, run, config, info)
python
def get(self, run_id): """ Return the run associated with a particular `run_id`. :param run_id: :return: dict :raises FileNotFoundError """ config = _read_json(_path_to_config(self.directory, run_id)) run = _read_json(_path_to_run(self.directory, run_id)) try: info = _read_json(_path_to_info(self.directory, run_id)) except IOError: info = {} return _create_run(run_id, run, config, info)
[ "def", "get", "(", "self", ",", "run_id", ")", ":", "config", "=", "_read_json", "(", "_path_to_config", "(", "self", ".", "directory", ",", "run_id", ")", ")", "run", "=", "_read_json", "(", "_path_to_run", "(", "self", ".", "directory", ",", "run_id", ")", ")", "try", ":", "info", "=", "_read_json", "(", "_path_to_info", "(", "self", ".", "directory", ",", "run_id", ")", ")", "except", "IOError", ":", "info", "=", "{", "}", "return", "_create_run", "(", "run_id", ",", "run", ",", "config", ",", "info", ")" ]
Return the run associated with a particular `run_id`. :param run_id: :return: dict :raises FileNotFoundError
[ "Return", "the", "run", "associated", "with", "a", "particular", "run_id", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/filestorage/rundao.py#L51-L66
-1
251,713
chovanecm/sacredboard
sacredboard/app/webapi/metrics.py
get_metric
def get_metric(run_id, metric_id): """ Get a specific Sacred metric from the database. Returns a JSON response or HTTP 404 if not found. Issue: https://github.com/chovanecm/sacredboard/issues/58 """ data = current_app.config["data"] # type: DataStorage dao = data.get_metrics_dao() metric = dao.get(run_id, metric_id) return Response(render_template( "api/metric.js", run_id=metric["run_id"], metric_id=metric["metric_id"], name=metric["name"], steps=metric["steps"], timestamps=metric["timestamps"], values=metric["values"]), mimetype="application/json")
python
def get_metric(run_id, metric_id): """ Get a specific Sacred metric from the database. Returns a JSON response or HTTP 404 if not found. Issue: https://github.com/chovanecm/sacredboard/issues/58 """ data = current_app.config["data"] # type: DataStorage dao = data.get_metrics_dao() metric = dao.get(run_id, metric_id) return Response(render_template( "api/metric.js", run_id=metric["run_id"], metric_id=metric["metric_id"], name=metric["name"], steps=metric["steps"], timestamps=metric["timestamps"], values=metric["values"]), mimetype="application/json")
[ "def", "get_metric", "(", "run_id", ",", "metric_id", ")", ":", "data", "=", "current_app", ".", "config", "[", "\"data\"", "]", "# type: DataStorage", "dao", "=", "data", ".", "get_metrics_dao", "(", ")", "metric", "=", "dao", ".", "get", "(", "run_id", ",", "metric_id", ")", "return", "Response", "(", "render_template", "(", "\"api/metric.js\"", ",", "run_id", "=", "metric", "[", "\"run_id\"", "]", ",", "metric_id", "=", "metric", "[", "\"metric_id\"", "]", ",", "name", "=", "metric", "[", "\"name\"", "]", ",", "steps", "=", "metric", "[", "\"steps\"", "]", ",", "timestamps", "=", "metric", "[", "\"timestamps\"", "]", ",", "values", "=", "metric", "[", "\"values\"", "]", ")", ",", "mimetype", "=", "\"application/json\"", ")" ]
Get a specific Sacred metric from the database. Returns a JSON response or HTTP 404 if not found. Issue: https://github.com/chovanecm/sacredboard/issues/58
[ "Get", "a", "specific", "Sacred", "metric", "from", "the", "database", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/webapi/metrics.py#L15-L34
-1
251,714
chovanecm/sacredboard
sacredboard/app/webapi/wsgi_server.py
ServerRunner.initialize
def initialize(self, app: Flask, app_config): """ Prepare the server to run and determine the port. :param app: The Flask Application. :param app_config: Configuration dictionary. This module uses the `debug` (`True`/`False`) and `http.port` attributes. """ debug = app_config["debug"] port = app_config["http.port"] if debug: self.started_on_port = port app.run(host="0.0.0.0", debug=True, port=port) else: for port in range(port, port + 50): self.http_server = WSGIServer(('0.0.0.0', port), app) try: self.http_server.start() except OSError: # try next port continue self.started_on_port = port break
python
def initialize(self, app: Flask, app_config): """ Prepare the server to run and determine the port. :param app: The Flask Application. :param app_config: Configuration dictionary. This module uses the `debug` (`True`/`False`) and `http.port` attributes. """ debug = app_config["debug"] port = app_config["http.port"] if debug: self.started_on_port = port app.run(host="0.0.0.0", debug=True, port=port) else: for port in range(port, port + 50): self.http_server = WSGIServer(('0.0.0.0', port), app) try: self.http_server.start() except OSError: # try next port continue self.started_on_port = port break
[ "def", "initialize", "(", "self", ",", "app", ":", "Flask", ",", "app_config", ")", ":", "debug", "=", "app_config", "[", "\"debug\"", "]", "port", "=", "app_config", "[", "\"http.port\"", "]", "if", "debug", ":", "self", ".", "started_on_port", "=", "port", "app", ".", "run", "(", "host", "=", "\"0.0.0.0\"", ",", "debug", "=", "True", ",", "port", "=", "port", ")", "else", ":", "for", "port", "in", "range", "(", "port", ",", "port", "+", "50", ")", ":", "self", ".", "http_server", "=", "WSGIServer", "(", "(", "'0.0.0.0'", ",", "port", ")", ",", "app", ")", "try", ":", "self", ".", "http_server", ".", "start", "(", ")", "except", "OSError", ":", "# try next port", "continue", "self", ".", "started_on_port", "=", "port", "break" ]
Prepare the server to run and determine the port. :param app: The Flask Application. :param app_config: Configuration dictionary. This module uses the `debug` (`True`/`False`) and `http.port` attributes.
[ "Prepare", "the", "server", "to", "run", "and", "determine", "the", "port", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/webapi/wsgi_server.py#L14-L36
-1
251,715
chovanecm/sacredboard
sacredboard/app/webapi/runs.py
api_run_delete
def api_run_delete(run_id): """Delete the given run and corresponding entities.""" data = current_app.config["data"] # type: DataStorage RunFacade(data).delete_run(run_id) return "DELETED run %s" % run_id
python
def api_run_delete(run_id): """Delete the given run and corresponding entities.""" data = current_app.config["data"] # type: DataStorage RunFacade(data).delete_run(run_id) return "DELETED run %s" % run_id
[ "def", "api_run_delete", "(", "run_id", ")", ":", "data", "=", "current_app", ".", "config", "[", "\"data\"", "]", "# type: DataStorage", "RunFacade", "(", "data", ")", ".", "delete_run", "(", "run_id", ")", "return", "\"DELETED run %s\"", "%", "run_id" ]
Delete the given run and corresponding entities.
[ "Delete", "the", "given", "run", "and", "corresponding", "entities", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/webapi/runs.py#L20-L24
-1
251,716
chovanecm/sacredboard
sacredboard/app/webapi/runs.py
api_run_get
def api_run_get(run_id): """Return a single run as a JSON object.""" data = current_app.config["data"] run = data.get_run_dao().get(run_id) records_total = 1 if run is not None else 0 if records_total == 0: return Response( render_template( "api/error.js", error_code=404, error_message="Run %s not found." % run_id), status=404, mimetype="application/json") records_filtered = records_total return Response(render_template("api/runs.js", runs=[run], draw=1, recordsTotal=records_total, recordsFiltered=records_filtered, full_object=True), mimetype="application/json")
python
def api_run_get(run_id): """Return a single run as a JSON object.""" data = current_app.config["data"] run = data.get_run_dao().get(run_id) records_total = 1 if run is not None else 0 if records_total == 0: return Response( render_template( "api/error.js", error_code=404, error_message="Run %s not found." % run_id), status=404, mimetype="application/json") records_filtered = records_total return Response(render_template("api/runs.js", runs=[run], draw=1, recordsTotal=records_total, recordsFiltered=records_filtered, full_object=True), mimetype="application/json")
[ "def", "api_run_get", "(", "run_id", ")", ":", "data", "=", "current_app", ".", "config", "[", "\"data\"", "]", "run", "=", "data", ".", "get_run_dao", "(", ")", ".", "get", "(", "run_id", ")", "records_total", "=", "1", "if", "run", "is", "not", "None", "else", "0", "if", "records_total", "==", "0", ":", "return", "Response", "(", "render_template", "(", "\"api/error.js\"", ",", "error_code", "=", "404", ",", "error_message", "=", "\"Run %s not found.\"", "%", "run_id", ")", ",", "status", "=", "404", ",", "mimetype", "=", "\"application/json\"", ")", "records_filtered", "=", "records_total", "return", "Response", "(", "render_template", "(", "\"api/runs.js\"", ",", "runs", "=", "[", "run", "]", ",", "draw", "=", "1", ",", "recordsTotal", "=", "records_total", ",", "recordsFiltered", "=", "records_filtered", ",", "full_object", "=", "True", ")", ",", "mimetype", "=", "\"application/json\"", ")" ]
Return a single run as a JSON object.
[ "Return", "a", "single", "run", "as", "a", "JSON", "object", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/webapi/runs.py#L28-L46
-1
251,717
chovanecm/sacredboard
sacredboard/app/webapi/runs.py
parse_int_arg
def parse_int_arg(name, default): """Return a given URL parameter as int or return the default value.""" return default if request.args.get(name) is None \ else int(request.args.get(name))
python
def parse_int_arg(name, default): """Return a given URL parameter as int or return the default value.""" return default if request.args.get(name) is None \ else int(request.args.get(name))
[ "def", "parse_int_arg", "(", "name", ",", "default", ")", ":", "return", "default", "if", "request", ".", "args", ".", "get", "(", "name", ")", "is", "None", "else", "int", "(", "request", ".", "args", ".", "get", "(", "name", ")", ")" ]
Return a given URL parameter as int or return the default value.
[ "Return", "a", "given", "URL", "parameter", "as", "int", "or", "return", "the", "default", "value", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/webapi/runs.py#L49-L52
-1
251,718
chovanecm/sacredboard
sacredboard/app/webapi/runs.py
parse_query_filter
def parse_query_filter(): """Parse the Run query filter from the URL as a dictionary.""" query_string = request.args.get("queryFilter") if query_string is None: return {"type": "and", "filters": []} query = json.loads(query_string) assert type(query) == dict assert type(query.get("type")) == str return query
python
def parse_query_filter(): """Parse the Run query filter from the URL as a dictionary.""" query_string = request.args.get("queryFilter") if query_string is None: return {"type": "and", "filters": []} query = json.loads(query_string) assert type(query) == dict assert type(query.get("type")) == str return query
[ "def", "parse_query_filter", "(", ")", ":", "query_string", "=", "request", ".", "args", ".", "get", "(", "\"queryFilter\"", ")", "if", "query_string", "is", "None", ":", "return", "{", "\"type\"", ":", "\"and\"", ",", "\"filters\"", ":", "[", "]", "}", "query", "=", "json", ".", "loads", "(", "query_string", ")", "assert", "type", "(", "query", ")", "==", "dict", "assert", "type", "(", "query", ".", "get", "(", "\"type\"", ")", ")", "==", "str", "return", "query" ]
Parse the Run query filter from the URL as a dictionary.
[ "Parse", "the", "Run", "query", "filter", "from", "the", "URL", "as", "a", "dictionary", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/webapi/runs.py#L55-L63
-1
251,719
chovanecm/sacredboard
sacredboard/app/webapi/runs.py
get_runs
def get_runs(): """Get all runs, sort it and return a response.""" data = current_app.config["data"] draw = parse_int_arg("draw", 1) start = parse_int_arg("start", 0) length = parse_int_arg("length", -1) length = length if length >= 0 else None order_column = request.args.get("order[0][column]") order_dir = request.args.get("order[0][dir]") query = parse_query_filter() if order_column is not None: order_column = \ request.args.get("columns[%d][name]" % int(order_column)) if order_column == "hostname": order_column = "host.hostname" runs = data.get_run_dao().get_runs( start=start, limit=length, sort_by=order_column, sort_direction=order_dir, query=query) # records_total should be the total size of the records in the database, # not what was returned records_total = runs.count() records_filtered = runs.count() return Response(render_template( "api/runs.js", runs=runs, draw=draw, recordsTotal=records_total, recordsFiltered=records_filtered), mimetype="application/json")
python
def get_runs(): """Get all runs, sort it and return a response.""" data = current_app.config["data"] draw = parse_int_arg("draw", 1) start = parse_int_arg("start", 0) length = parse_int_arg("length", -1) length = length if length >= 0 else None order_column = request.args.get("order[0][column]") order_dir = request.args.get("order[0][dir]") query = parse_query_filter() if order_column is not None: order_column = \ request.args.get("columns[%d][name]" % int(order_column)) if order_column == "hostname": order_column = "host.hostname" runs = data.get_run_dao().get_runs( start=start, limit=length, sort_by=order_column, sort_direction=order_dir, query=query) # records_total should be the total size of the records in the database, # not what was returned records_total = runs.count() records_filtered = runs.count() return Response(render_template( "api/runs.js", runs=runs, draw=draw, recordsTotal=records_total, recordsFiltered=records_filtered), mimetype="application/json")
[ "def", "get_runs", "(", ")", ":", "data", "=", "current_app", ".", "config", "[", "\"data\"", "]", "draw", "=", "parse_int_arg", "(", "\"draw\"", ",", "1", ")", "start", "=", "parse_int_arg", "(", "\"start\"", ",", "0", ")", "length", "=", "parse_int_arg", "(", "\"length\"", ",", "-", "1", ")", "length", "=", "length", "if", "length", ">=", "0", "else", "None", "order_column", "=", "request", ".", "args", ".", "get", "(", "\"order[0][column]\"", ")", "order_dir", "=", "request", ".", "args", ".", "get", "(", "\"order[0][dir]\"", ")", "query", "=", "parse_query_filter", "(", ")", "if", "order_column", "is", "not", "None", ":", "order_column", "=", "request", ".", "args", ".", "get", "(", "\"columns[%d][name]\"", "%", "int", "(", "order_column", ")", ")", "if", "order_column", "==", "\"hostname\"", ":", "order_column", "=", "\"host.hostname\"", "runs", "=", "data", ".", "get_run_dao", "(", ")", ".", "get_runs", "(", "start", "=", "start", ",", "limit", "=", "length", ",", "sort_by", "=", "order_column", ",", "sort_direction", "=", "order_dir", ",", "query", "=", "query", ")", "# records_total should be the total size of the records in the database,", "# not what was returned", "records_total", "=", "runs", ".", "count", "(", ")", "records_filtered", "=", "runs", ".", "count", "(", ")", "return", "Response", "(", "render_template", "(", "\"api/runs.js\"", ",", "runs", "=", "runs", ",", "draw", "=", "draw", ",", "recordsTotal", "=", "records_total", ",", "recordsFiltered", "=", "records_filtered", ")", ",", "mimetype", "=", "\"application/json\"", ")" ]
Get all runs, sort it and return a response.
[ "Get", "all", "runs", "sort", "it", "and", "return", "a", "response", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/webapi/runs.py#L66-L95
-1
251,720
chovanecm/sacredboard
sacredboard/app/data/filestorage/filesdao.py
FileStoreFilesDAO.get
def get(self, file_id: str) -> [typing.BinaryIO, str, datetime.datetime]: """Return the file identified by a file_id string, its file name and upload date.""" raise NotImplementedError("Downloading files for downloading files in FileStore has not been implemented yet.")
python
def get(self, file_id: str) -> [typing.BinaryIO, str, datetime.datetime]: """Return the file identified by a file_id string, its file name and upload date.""" raise NotImplementedError("Downloading files for downloading files in FileStore has not been implemented yet.")
[ "def", "get", "(", "self", ",", "file_id", ":", "str", ")", "->", "[", "typing", ".", "BinaryIO", ",", "str", ",", "datetime", ".", "datetime", "]", ":", "raise", "NotImplementedError", "(", "\"Downloading files for downloading files in FileStore has not been implemented yet.\"", ")" ]
Return the file identified by a file_id string, its file name and upload date.
[ "Return", "the", "file", "identified", "by", "a", "file_id", "string", "its", "file", "name", "and", "upload", "date", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/data/filestorage/filesdao.py#L20-L22
-1
251,721
chovanecm/sacredboard
sacredboard/app/config/jinja_filters.py
timediff
def timediff(time): """Return the difference in seconds between now and the given time.""" now = datetime.datetime.utcnow() diff = now - time diff_sec = diff.total_seconds() return diff_sec
python
def timediff(time): """Return the difference in seconds between now and the given time.""" now = datetime.datetime.utcnow() diff = now - time diff_sec = diff.total_seconds() return diff_sec
[ "def", "timediff", "(", "time", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "diff", "=", "now", "-", "time", "diff_sec", "=", "diff", ".", "total_seconds", "(", ")", "return", "diff_sec" ]
Return the difference in seconds between now and the given time.
[ "Return", "the", "difference", "in", "seconds", "between", "now", "and", "the", "given", "time", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/config/jinja_filters.py#L25-L30
-1
251,722
chovanecm/sacredboard
sacredboard/app/config/jinja_filters.py
last_line
def last_line(text): """ Get the last meaningful line of the text, that is the last non-empty line. :param text: Text to search the last line :type text: str :return: :rtype: str """ last_line_of_text = "" while last_line_of_text == "" and len(text) > 0: last_line_start = text.rfind("\n") # Handle one-line strings (without \n) last_line_start = max(0, last_line_start) last_line_of_text = text[last_line_start:].strip("\r\n ") text = text[:last_line_start] return last_line_of_text
python
def last_line(text): """ Get the last meaningful line of the text, that is the last non-empty line. :param text: Text to search the last line :type text: str :return: :rtype: str """ last_line_of_text = "" while last_line_of_text == "" and len(text) > 0: last_line_start = text.rfind("\n") # Handle one-line strings (without \n) last_line_start = max(0, last_line_start) last_line_of_text = text[last_line_start:].strip("\r\n ") text = text[:last_line_start] return last_line_of_text
[ "def", "last_line", "(", "text", ")", ":", "last_line_of_text", "=", "\"\"", "while", "last_line_of_text", "==", "\"\"", "and", "len", "(", "text", ")", ">", "0", ":", "last_line_start", "=", "text", ".", "rfind", "(", "\"\\n\"", ")", "# Handle one-line strings (without \\n)", "last_line_start", "=", "max", "(", "0", ",", "last_line_start", ")", "last_line_of_text", "=", "text", "[", "last_line_start", ":", "]", ".", "strip", "(", "\"\\r\\n \"", ")", "text", "=", "text", "[", ":", "last_line_start", "]", "return", "last_line_of_text" ]
Get the last meaningful line of the text, that is the last non-empty line. :param text: Text to search the last line :type text: str :return: :rtype: str
[ "Get", "the", "last", "meaningful", "line", "of", "the", "text", "that", "is", "the", "last", "non", "-", "empty", "line", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/config/jinja_filters.py#L34-L50
-1
251,723
chovanecm/sacredboard
sacredboard/app/config/jinja_filters.py
dump_json
def dump_json(obj): """Dump Python object as JSON string.""" return simplejson.dumps(obj, ignore_nan=True, default=json_util.default)
python
def dump_json(obj): """Dump Python object as JSON string.""" return simplejson.dumps(obj, ignore_nan=True, default=json_util.default)
[ "def", "dump_json", "(", "obj", ")", ":", "return", "simplejson", ".", "dumps", "(", "obj", ",", "ignore_nan", "=", "True", ",", "default", "=", "json_util", ".", "default", ")" ]
Dump Python object as JSON string.
[ "Dump", "Python", "object", "as", "JSON", "string", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/config/jinja_filters.py#L60-L62
-1
251,724
chovanecm/sacredboard
sacredboard/app/process/process.py
Process.terminate
def terminate(self, wait=False): """Terminate the process.""" if self.proc is not None: self.proc.stdout.close() try: self.proc.terminate() except ProcessLookupError: pass if wait: self.proc.wait()
python
def terminate(self, wait=False): """Terminate the process.""" if self.proc is not None: self.proc.stdout.close() try: self.proc.terminate() except ProcessLookupError: pass if wait: self.proc.wait()
[ "def", "terminate", "(", "self", ",", "wait", "=", "False", ")", ":", "if", "self", ".", "proc", "is", "not", "None", ":", "self", ".", "proc", ".", "stdout", ".", "close", "(", ")", "try", ":", "self", ".", "proc", ".", "terminate", "(", ")", "except", "ProcessLookupError", ":", "pass", "if", "wait", ":", "self", ".", "proc", ".", "wait", "(", ")" ]
Terminate the process.
[ "Terminate", "the", "process", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/process/process.py#L88-L97
-1
251,725
chovanecm/sacredboard
sacredboard/app/process/process.py
Process.terminate_all
def terminate_all(wait=False): """ Terminate all processes. :param wait: Wait for each to terminate :type wait: bool :return: :rtype: """ for instance in Process.instances: if instance.is_running(): instance.terminate(wait)
python
def terminate_all(wait=False): """ Terminate all processes. :param wait: Wait for each to terminate :type wait: bool :return: :rtype: """ for instance in Process.instances: if instance.is_running(): instance.terminate(wait)
[ "def", "terminate_all", "(", "wait", "=", "False", ")", ":", "for", "instance", "in", "Process", ".", "instances", ":", "if", "instance", ".", "is_running", "(", ")", ":", "instance", ".", "terminate", "(", "wait", ")" ]
Terminate all processes. :param wait: Wait for each to terminate :type wait: bool :return: :rtype:
[ "Terminate", "all", "processes", "." ]
47e1c99e3be3c1b099d3772bc077f5666020eb0b
https://github.com/chovanecm/sacredboard/blob/47e1c99e3be3c1b099d3772bc077f5666020eb0b/sacredboard/app/process/process.py#L107-L118
-1
251,726
noahbenson/neuropythy
neuropythy/commands/atlas.py
calc_worklog
def calc_worklog(stdout=Ellipsis, stderr=Ellipsis, verbose=False): ''' calc_worklog constructs the worklog from the stdout, stderr, stdin, and verbose arguments. ''' try: cols = int(os.environ['COLUMNS']) except Exception: cols = 80 return pimms.worklog(columns=cols, stdout=stdout, stderr=stderr, verbose=verbose)
python
def calc_worklog(stdout=Ellipsis, stderr=Ellipsis, verbose=False): ''' calc_worklog constructs the worklog from the stdout, stderr, stdin, and verbose arguments. ''' try: cols = int(os.environ['COLUMNS']) except Exception: cols = 80 return pimms.worklog(columns=cols, stdout=stdout, stderr=stderr, verbose=verbose)
[ "def", "calc_worklog", "(", "stdout", "=", "Ellipsis", ",", "stderr", "=", "Ellipsis", ",", "verbose", "=", "False", ")", ":", "try", ":", "cols", "=", "int", "(", "os", ".", "environ", "[", "'COLUMNS'", "]", ")", "except", "Exception", ":", "cols", "=", "80", "return", "pimms", ".", "worklog", "(", "columns", "=", "cols", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "verbose", "=", "verbose", ")" ]
calc_worklog constructs the worklog from the stdout, stderr, stdin, and verbose arguments.
[ "calc_worklog", "constructs", "the", "worklog", "from", "the", "stdout", "stderr", "stdin", "and", "verbose", "arguments", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/atlas.py#L24-L30
-1
251,727
noahbenson/neuropythy
neuropythy/commands/atlas.py
calc_subject
def calc_subject(argv, worklog): ''' calc_subject converts a subject_id into a subject object. Afferent parameters: @ argv The FreeSurfer subject name(s), HCP subject ID(s), or path(s) of the subject(s) to which the atlas should be applied. ''' if len(argv) == 0: raise ValueError('No subject-id given') elif len(argv) > 1: worklog.warn('WARNING: Unused subject arguments: %s' % (argv[1:],)) subject_id = argv[0] try: sub = freesurfer_subject(subject_id) if sub is not None: worklog('Using FreeSurfer subject: %s' % sub.path) return sub except Exception: pass try: sub = hcp_subject(subject_id) if sub is not None: worklog('Using HCP subject: %s' % sub.path) return sub except Exception: pass raise ValueError('Could not load subject %s' % subject_id)
python
def calc_subject(argv, worklog): ''' calc_subject converts a subject_id into a subject object. Afferent parameters: @ argv The FreeSurfer subject name(s), HCP subject ID(s), or path(s) of the subject(s) to which the atlas should be applied. ''' if len(argv) == 0: raise ValueError('No subject-id given') elif len(argv) > 1: worklog.warn('WARNING: Unused subject arguments: %s' % (argv[1:],)) subject_id = argv[0] try: sub = freesurfer_subject(subject_id) if sub is not None: worklog('Using FreeSurfer subject: %s' % sub.path) return sub except Exception: pass try: sub = hcp_subject(subject_id) if sub is not None: worklog('Using HCP subject: %s' % sub.path) return sub except Exception: pass raise ValueError('Could not load subject %s' % subject_id)
[ "def", "calc_subject", "(", "argv", ",", "worklog", ")", ":", "if", "len", "(", "argv", ")", "==", "0", ":", "raise", "ValueError", "(", "'No subject-id given'", ")", "elif", "len", "(", "argv", ")", ">", "1", ":", "worklog", ".", "warn", "(", "'WARNING: Unused subject arguments: %s'", "%", "(", "argv", "[", "1", ":", "]", ",", ")", ")", "subject_id", "=", "argv", "[", "0", "]", "try", ":", "sub", "=", "freesurfer_subject", "(", "subject_id", ")", "if", "sub", "is", "not", "None", ":", "worklog", "(", "'Using FreeSurfer subject: %s'", "%", "sub", ".", "path", ")", "return", "sub", "except", "Exception", ":", "pass", "try", ":", "sub", "=", "hcp_subject", "(", "subject_id", ")", "if", "sub", "is", "not", "None", ":", "worklog", "(", "'Using HCP subject: %s'", "%", "sub", ".", "path", ")", "return", "sub", "except", "Exception", ":", "pass", "raise", "ValueError", "(", "'Could not load subject %s'", "%", "subject_id", ")" ]
calc_subject converts a subject_id into a subject object. Afferent parameters: @ argv The FreeSurfer subject name(s), HCP subject ID(s), or path(s) of the subject(s) to which the atlas should be applied.
[ "calc_subject", "converts", "a", "subject_id", "into", "a", "subject", "object", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/atlas.py#L32-L56
-1
251,728
noahbenson/neuropythy
neuropythy/commands/atlas.py
calc_atlases
def calc_atlases(worklog, atlas_subject_id='fsaverage'): ''' cacl_atlases finds all available atlases in the possible subject directories of the given atlas subject. In order to be a template, it must either be a collection of files (either mgh/mgz or FreeSurfer curv/morph-data files) named as '<hemi>.<template>_<quantity><ending>' such as the files 'lh.wang2015_mplbl.mgz' and 'rh.wang2015_mplbl.mgz'. They may additionally have a version prior to the ending, as in 'lh.benson14_angle.v2_5.mgz'. Files without versions are considered to be of a higher version than all versioned files. All files must be found in the atlas subject's surf/ directory; however, all subjects in all FreeSurfer subjects paths with the same subject id are searched if the atlas is not found in the atlas subejct's directory. Afferent parameters: @ atlas_subject_id The FreeSurfer subject name subject path of the subject that is to be used as the atlas subject from which the atlas is interpolated. HCP subjects are not currently supported. Efferent values: @ atlas_map A persistent map whose keys are atlas names, the values of which are themselves persistent maps whose keys are the versions of the given atlas (None potentially being included). The values of these maps are again maps of hemisphere names then finally of the of the quantity names (such as 'eccen' or 'maxprob') to the property vectors imported from the appropriate files. ''' try: sub = freesurfer_subject(atlas_subject_id) except Exception: sub = None if sub is None: try: sub = hcp_subject(atlas_subject_id) except Exception: sub = None if sub is None: raise ValueError('Could not load atlas subject %s' % atlas_subject_id) worklog('Using Atlas subject: %s' % sub.path) # Now find the requested atlases atlases = AutoDict() atlas_patt = r'^([lr]h)\.([^_]+)_([^.]+)(\.(v(\d+(_\d+)*)))?((\.mg[hz])|\.nii(\.gz)?)?$' atlas_hemi_ii = 1 atlas_atls_ii = 2 atlas_meas_ii = 3 atlas_vrsn_ii = 6 libdir = os.path.join(library_path(), 'data') for pth in [libdir] + config['freesurfer_subject_paths'] + [sub.path]: # see if appropriate files are in this directory pth = os.path.join(pth, sub.name, 'surf') if not os.path.isdir(pth): continue for fl in os.listdir(pth): m = re.match(atlas_patt, fl) if m is None: continue fl = os.path.join(pth, fl) (h, atls, meas, vrsn) = [ m.group(ii) for ii in (atlas_hemi_ii, atlas_atls_ii, atlas_meas_ii, atlas_vrsn_ii)] if vrsn is not None: vrsn = tuple([int(s) for s in vrsn.split('_')]) atlases[atls][vrsn][h][meas] = curry(nyio.load, fl) # convert the possible atlas maps into persistent/lazy maps atlas_map = pyr.pmap({a:pyr.pmap({v:pyr.pmap({h:pimms.lazy_map(hv) for (h,hv) in six.iteritems(vv)}) for (v,vv) in six.iteritems(av)}) for (a,av) in six.iteritems(atlases)}) return {'atlas_map':atlas_map, 'atlas_subject':sub}
python
def calc_atlases(worklog, atlas_subject_id='fsaverage'): ''' cacl_atlases finds all available atlases in the possible subject directories of the given atlas subject. In order to be a template, it must either be a collection of files (either mgh/mgz or FreeSurfer curv/morph-data files) named as '<hemi>.<template>_<quantity><ending>' such as the files 'lh.wang2015_mplbl.mgz' and 'rh.wang2015_mplbl.mgz'. They may additionally have a version prior to the ending, as in 'lh.benson14_angle.v2_5.mgz'. Files without versions are considered to be of a higher version than all versioned files. All files must be found in the atlas subject's surf/ directory; however, all subjects in all FreeSurfer subjects paths with the same subject id are searched if the atlas is not found in the atlas subejct's directory. Afferent parameters: @ atlas_subject_id The FreeSurfer subject name subject path of the subject that is to be used as the atlas subject from which the atlas is interpolated. HCP subjects are not currently supported. Efferent values: @ atlas_map A persistent map whose keys are atlas names, the values of which are themselves persistent maps whose keys are the versions of the given atlas (None potentially being included). The values of these maps are again maps of hemisphere names then finally of the of the quantity names (such as 'eccen' or 'maxprob') to the property vectors imported from the appropriate files. ''' try: sub = freesurfer_subject(atlas_subject_id) except Exception: sub = None if sub is None: try: sub = hcp_subject(atlas_subject_id) except Exception: sub = None if sub is None: raise ValueError('Could not load atlas subject %s' % atlas_subject_id) worklog('Using Atlas subject: %s' % sub.path) # Now find the requested atlases atlases = AutoDict() atlas_patt = r'^([lr]h)\.([^_]+)_([^.]+)(\.(v(\d+(_\d+)*)))?((\.mg[hz])|\.nii(\.gz)?)?$' atlas_hemi_ii = 1 atlas_atls_ii = 2 atlas_meas_ii = 3 atlas_vrsn_ii = 6 libdir = os.path.join(library_path(), 'data') for pth in [libdir] + config['freesurfer_subject_paths'] + [sub.path]: # see if appropriate files are in this directory pth = os.path.join(pth, sub.name, 'surf') if not os.path.isdir(pth): continue for fl in os.listdir(pth): m = re.match(atlas_patt, fl) if m is None: continue fl = os.path.join(pth, fl) (h, atls, meas, vrsn) = [ m.group(ii) for ii in (atlas_hemi_ii, atlas_atls_ii, atlas_meas_ii, atlas_vrsn_ii)] if vrsn is not None: vrsn = tuple([int(s) for s in vrsn.split('_')]) atlases[atls][vrsn][h][meas] = curry(nyio.load, fl) # convert the possible atlas maps into persistent/lazy maps atlas_map = pyr.pmap({a:pyr.pmap({v:pyr.pmap({h:pimms.lazy_map(hv) for (h,hv) in six.iteritems(vv)}) for (v,vv) in six.iteritems(av)}) for (a,av) in six.iteritems(atlases)}) return {'atlas_map':atlas_map, 'atlas_subject':sub}
[ "def", "calc_atlases", "(", "worklog", ",", "atlas_subject_id", "=", "'fsaverage'", ")", ":", "try", ":", "sub", "=", "freesurfer_subject", "(", "atlas_subject_id", ")", "except", "Exception", ":", "sub", "=", "None", "if", "sub", "is", "None", ":", "try", ":", "sub", "=", "hcp_subject", "(", "atlas_subject_id", ")", "except", "Exception", ":", "sub", "=", "None", "if", "sub", "is", "None", ":", "raise", "ValueError", "(", "'Could not load atlas subject %s'", "%", "atlas_subject_id", ")", "worklog", "(", "'Using Atlas subject: %s'", "%", "sub", ".", "path", ")", "# Now find the requested atlases", "atlases", "=", "AutoDict", "(", ")", "atlas_patt", "=", "r'^([lr]h)\\.([^_]+)_([^.]+)(\\.(v(\\d+(_\\d+)*)))?((\\.mg[hz])|\\.nii(\\.gz)?)?$'", "atlas_hemi_ii", "=", "1", "atlas_atls_ii", "=", "2", "atlas_meas_ii", "=", "3", "atlas_vrsn_ii", "=", "6", "libdir", "=", "os", ".", "path", ".", "join", "(", "library_path", "(", ")", ",", "'data'", ")", "for", "pth", "in", "[", "libdir", "]", "+", "config", "[", "'freesurfer_subject_paths'", "]", "+", "[", "sub", ".", "path", "]", ":", "# see if appropriate files are in this directory", "pth", "=", "os", ".", "path", ".", "join", "(", "pth", ",", "sub", ".", "name", ",", "'surf'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "pth", ")", ":", "continue", "for", "fl", "in", "os", ".", "listdir", "(", "pth", ")", ":", "m", "=", "re", ".", "match", "(", "atlas_patt", ",", "fl", ")", "if", "m", "is", "None", ":", "continue", "fl", "=", "os", ".", "path", ".", "join", "(", "pth", ",", "fl", ")", "(", "h", ",", "atls", ",", "meas", ",", "vrsn", ")", "=", "[", "m", ".", "group", "(", "ii", ")", "for", "ii", "in", "(", "atlas_hemi_ii", ",", "atlas_atls_ii", ",", "atlas_meas_ii", ",", "atlas_vrsn_ii", ")", "]", "if", "vrsn", "is", "not", "None", ":", "vrsn", "=", "tuple", "(", "[", "int", "(", "s", ")", "for", "s", "in", "vrsn", ".", "split", "(", "'_'", ")", "]", ")", "atlases", "[", "atls", "]", "[", "vrsn", "]", "[", "h", "]", "[", "meas", "]", "=", "curry", "(", "nyio", ".", "load", ",", "fl", ")", "# convert the possible atlas maps into persistent/lazy maps", "atlas_map", "=", "pyr", ".", "pmap", "(", "{", "a", ":", "pyr", ".", "pmap", "(", "{", "v", ":", "pyr", ".", "pmap", "(", "{", "h", ":", "pimms", ".", "lazy_map", "(", "hv", ")", "for", "(", "h", ",", "hv", ")", "in", "six", ".", "iteritems", "(", "vv", ")", "}", ")", "for", "(", "v", ",", "vv", ")", "in", "six", ".", "iteritems", "(", "av", ")", "}", ")", "for", "(", "a", ",", "av", ")", "in", "six", ".", "iteritems", "(", "atlases", ")", "}", ")", "return", "{", "'atlas_map'", ":", "atlas_map", ",", "'atlas_subject'", ":", "sub", "}" ]
cacl_atlases finds all available atlases in the possible subject directories of the given atlas subject. In order to be a template, it must either be a collection of files (either mgh/mgz or FreeSurfer curv/morph-data files) named as '<hemi>.<template>_<quantity><ending>' such as the files 'lh.wang2015_mplbl.mgz' and 'rh.wang2015_mplbl.mgz'. They may additionally have a version prior to the ending, as in 'lh.benson14_angle.v2_5.mgz'. Files without versions are considered to be of a higher version than all versioned files. All files must be found in the atlas subject's surf/ directory; however, all subjects in all FreeSurfer subjects paths with the same subject id are searched if the atlas is not found in the atlas subejct's directory. Afferent parameters: @ atlas_subject_id The FreeSurfer subject name subject path of the subject that is to be used as the atlas subject from which the atlas is interpolated. HCP subjects are not currently supported. Efferent values: @ atlas_map A persistent map whose keys are atlas names, the values of which are themselves persistent maps whose keys are the versions of the given atlas (None potentially being included). The values of these maps are again maps of hemisphere names then finally of the of the quantity names (such as 'eccen' or 'maxprob') to the property vectors imported from the appropriate files.
[ "cacl_atlases", "finds", "all", "available", "atlases", "in", "the", "possible", "subject", "directories", "of", "the", "given", "atlas", "subject", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/atlas.py#L58-L116
-1
251,729
noahbenson/neuropythy
neuropythy/commands/atlas.py
calc_filemap
def calc_filemap(atlas_properties, subject, atlas_version_tags, worklog, output_path=None, overwrite=False, output_format='mgz', create_directory=False): ''' calc_filemap is a calculator that converts the atlas properties nested-map into a single-depth map whose keys are filenames and whose values are the interpolated property data. Afferent parameters @ output_path The directory into which the atlas files should be written. If not provided or None then uses the subject's surf directory. If this directory doesn't exist, then it uses the subject's directory itself. @ overwrite Whether to overwrite existing atlas files. If True, then atlas files that already exist will be overwritten. If False, then no files are overwritten. @ create_directory Whether to create the output path if it doesn't exist. This is False by default. @ output_format The desired output format of the files to be written. May be one of the following: 'mgz', 'mgh', or either 'curv' or 'morph'. Efferent values: @ filemap A pimms lazy map whose keys are filenames and whose values are interpolated atlas properties. @ export_all_fn A function of no arguments that, when called, exports all of the files in the filemap to the output_path. ''' if output_path is None: output_path = os.path.join(subject.path, 'surf') if not os.path.isdir(output_path): output_path = subject.path output_format = 'mgz' if output_format is None else output_format.lower() if output_format.startswith('.'): output_format = output_format[1:] (fmt,ending) = (('mgh','.mgz') if output_format == 'mgz' else ('mgh','.mgh') if output_format == 'mgh' else ('freesurfer_morph','')) # make the filemap... worklog('Preparing Filemap...') fm = AutoDict() for (atl,atldat) in six.iteritems(atlas_properties): for (ver,verdat) in six.iteritems(atldat): vstr = atlas_version_tags[atl][ver] for (h,hdat) in six.iteritems(verdat): for m in six.iterkeys(hdat): flnm = '%s.%s_%s%s%s' % (h, atl, m, vstr, ending) flnm = os.path.join(output_path, flnm) fm[flnm] = curry(lambda hdat,m: hdat[m], hdat, m) # okay, make that a lazy map: filemap = pimms.lazy_map(fm) # the function for exporting all properties: def export_all(): ''' This function will export all files from its associated filemap and return a list of the filenames. ''' if not os.path.isdir(output_path): if not create_directory: raise ValueError('No such path and create_direcotry is False: %s' % output_path) os.makedirs(os.path.abspath(output_path), 0o755) filenames = [] worklog('Extracting Files...') wl = worklog.indent() for flnm in six.iterkeys(filemap): wl(flnm) filenames.append(nyio.save(flnm, filemap[flnm], fmt)) return filenames return {'filemap': filemap, 'export_all_fn': export_all}
python
def calc_filemap(atlas_properties, subject, atlas_version_tags, worklog, output_path=None, overwrite=False, output_format='mgz', create_directory=False): ''' calc_filemap is a calculator that converts the atlas properties nested-map into a single-depth map whose keys are filenames and whose values are the interpolated property data. Afferent parameters @ output_path The directory into which the atlas files should be written. If not provided or None then uses the subject's surf directory. If this directory doesn't exist, then it uses the subject's directory itself. @ overwrite Whether to overwrite existing atlas files. If True, then atlas files that already exist will be overwritten. If False, then no files are overwritten. @ create_directory Whether to create the output path if it doesn't exist. This is False by default. @ output_format The desired output format of the files to be written. May be one of the following: 'mgz', 'mgh', or either 'curv' or 'morph'. Efferent values: @ filemap A pimms lazy map whose keys are filenames and whose values are interpolated atlas properties. @ export_all_fn A function of no arguments that, when called, exports all of the files in the filemap to the output_path. ''' if output_path is None: output_path = os.path.join(subject.path, 'surf') if not os.path.isdir(output_path): output_path = subject.path output_format = 'mgz' if output_format is None else output_format.lower() if output_format.startswith('.'): output_format = output_format[1:] (fmt,ending) = (('mgh','.mgz') if output_format == 'mgz' else ('mgh','.mgh') if output_format == 'mgh' else ('freesurfer_morph','')) # make the filemap... worklog('Preparing Filemap...') fm = AutoDict() for (atl,atldat) in six.iteritems(atlas_properties): for (ver,verdat) in six.iteritems(atldat): vstr = atlas_version_tags[atl][ver] for (h,hdat) in six.iteritems(verdat): for m in six.iterkeys(hdat): flnm = '%s.%s_%s%s%s' % (h, atl, m, vstr, ending) flnm = os.path.join(output_path, flnm) fm[flnm] = curry(lambda hdat,m: hdat[m], hdat, m) # okay, make that a lazy map: filemap = pimms.lazy_map(fm) # the function for exporting all properties: def export_all(): ''' This function will export all files from its associated filemap and return a list of the filenames. ''' if not os.path.isdir(output_path): if not create_directory: raise ValueError('No such path and create_direcotry is False: %s' % output_path) os.makedirs(os.path.abspath(output_path), 0o755) filenames = [] worklog('Extracting Files...') wl = worklog.indent() for flnm in six.iterkeys(filemap): wl(flnm) filenames.append(nyio.save(flnm, filemap[flnm], fmt)) return filenames return {'filemap': filemap, 'export_all_fn': export_all}
[ "def", "calc_filemap", "(", "atlas_properties", ",", "subject", ",", "atlas_version_tags", ",", "worklog", ",", "output_path", "=", "None", ",", "overwrite", "=", "False", ",", "output_format", "=", "'mgz'", ",", "create_directory", "=", "False", ")", ":", "if", "output_path", "is", "None", ":", "output_path", "=", "os", ".", "path", ".", "join", "(", "subject", ".", "path", ",", "'surf'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "output_path", ")", ":", "output_path", "=", "subject", ".", "path", "output_format", "=", "'mgz'", "if", "output_format", "is", "None", "else", "output_format", ".", "lower", "(", ")", "if", "output_format", ".", "startswith", "(", "'.'", ")", ":", "output_format", "=", "output_format", "[", "1", ":", "]", "(", "fmt", ",", "ending", ")", "=", "(", "(", "'mgh'", ",", "'.mgz'", ")", "if", "output_format", "==", "'mgz'", "else", "(", "'mgh'", ",", "'.mgh'", ")", "if", "output_format", "==", "'mgh'", "else", "(", "'freesurfer_morph'", ",", "''", ")", ")", "# make the filemap...", "worklog", "(", "'Preparing Filemap...'", ")", "fm", "=", "AutoDict", "(", ")", "for", "(", "atl", ",", "atldat", ")", "in", "six", ".", "iteritems", "(", "atlas_properties", ")", ":", "for", "(", "ver", ",", "verdat", ")", "in", "six", ".", "iteritems", "(", "atldat", ")", ":", "vstr", "=", "atlas_version_tags", "[", "atl", "]", "[", "ver", "]", "for", "(", "h", ",", "hdat", ")", "in", "six", ".", "iteritems", "(", "verdat", ")", ":", "for", "m", "in", "six", ".", "iterkeys", "(", "hdat", ")", ":", "flnm", "=", "'%s.%s_%s%s%s'", "%", "(", "h", ",", "atl", ",", "m", ",", "vstr", ",", "ending", ")", "flnm", "=", "os", ".", "path", ".", "join", "(", "output_path", ",", "flnm", ")", "fm", "[", "flnm", "]", "=", "curry", "(", "lambda", "hdat", ",", "m", ":", "hdat", "[", "m", "]", ",", "hdat", ",", "m", ")", "# okay, make that a lazy map:", "filemap", "=", "pimms", ".", "lazy_map", "(", "fm", ")", "# the function for exporting all properties:", "def", "export_all", "(", ")", ":", "'''\n This function will export all files from its associated filemap and return a list of the\n filenames.\n '''", "if", "not", "os", ".", "path", ".", "isdir", "(", "output_path", ")", ":", "if", "not", "create_directory", ":", "raise", "ValueError", "(", "'No such path and create_direcotry is False: %s'", "%", "output_path", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "abspath", "(", "output_path", ")", ",", "0o755", ")", "filenames", "=", "[", "]", "worklog", "(", "'Extracting Files...'", ")", "wl", "=", "worklog", ".", "indent", "(", ")", "for", "flnm", "in", "six", ".", "iterkeys", "(", "filemap", ")", ":", "wl", "(", "flnm", ")", "filenames", ".", "append", "(", "nyio", ".", "save", "(", "flnm", ",", "filemap", "[", "flnm", "]", ",", "fmt", ")", ")", "return", "filenames", "return", "{", "'filemap'", ":", "filemap", ",", "'export_all_fn'", ":", "export_all", "}" ]
calc_filemap is a calculator that converts the atlas properties nested-map into a single-depth map whose keys are filenames and whose values are the interpolated property data. Afferent parameters @ output_path The directory into which the atlas files should be written. If not provided or None then uses the subject's surf directory. If this directory doesn't exist, then it uses the subject's directory itself. @ overwrite Whether to overwrite existing atlas files. If True, then atlas files that already exist will be overwritten. If False, then no files are overwritten. @ create_directory Whether to create the output path if it doesn't exist. This is False by default. @ output_format The desired output format of the files to be written. May be one of the following: 'mgz', 'mgh', or either 'curv' or 'morph'. Efferent values: @ filemap A pimms lazy map whose keys are filenames and whose values are interpolated atlas properties. @ export_all_fn A function of no arguments that, when called, exports all of the files in the filemap to the output_path.
[ "calc_filemap", "is", "a", "calculator", "that", "converts", "the", "atlas", "properties", "nested", "-", "map", "into", "a", "single", "-", "depth", "map", "whose", "keys", "are", "filenames", "and", "whose", "values", "are", "the", "interpolated", "property", "data", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/atlas.py#L245-L311
-1
251,730
noahbenson/neuropythy
neuropythy/mri/images.py
ImageType.parse_type
def parse_type(self, hdat, dataobj=None): ''' Parses the dtype out of the header data or the array, depending on which is given; if both, then the header-data overrides the array; if neither, then np.float32. ''' try: dataobj = dataobj.dataobj except Exception: pass dtype = np.asarray(dataobj).dtype if dataobj else self.default_type() if hdat and 'type' in hdat: dtype = np.dtype(hdat['type']) elif hdat and 'dtype' in hdat: dtype = np.dtype(hdat['dtype']) return dtype
python
def parse_type(self, hdat, dataobj=None): ''' Parses the dtype out of the header data or the array, depending on which is given; if both, then the header-data overrides the array; if neither, then np.float32. ''' try: dataobj = dataobj.dataobj except Exception: pass dtype = np.asarray(dataobj).dtype if dataobj else self.default_type() if hdat and 'type' in hdat: dtype = np.dtype(hdat['type']) elif hdat and 'dtype' in hdat: dtype = np.dtype(hdat['dtype']) return dtype
[ "def", "parse_type", "(", "self", ",", "hdat", ",", "dataobj", "=", "None", ")", ":", "try", ":", "dataobj", "=", "dataobj", ".", "dataobj", "except", "Exception", ":", "pass", "dtype", "=", "np", ".", "asarray", "(", "dataobj", ")", ".", "dtype", "if", "dataobj", "else", "self", ".", "default_type", "(", ")", "if", "hdat", "and", "'type'", "in", "hdat", ":", "dtype", "=", "np", ".", "dtype", "(", "hdat", "[", "'type'", "]", ")", "elif", "hdat", "and", "'dtype'", "in", "hdat", ":", "dtype", "=", "np", ".", "dtype", "(", "hdat", "[", "'dtype'", "]", ")", "return", "dtype" ]
Parses the dtype out of the header data or the array, depending on which is given; if both, then the header-data overrides the array; if neither, then np.float32.
[ "Parses", "the", "dtype", "out", "of", "the", "header", "data", "or", "the", "array", "depending", "on", "which", "is", "given", ";", "if", "both", "then", "the", "header", "-", "data", "overrides", "the", "array", ";", "if", "neither", "then", "np", ".", "float32", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/mri/images.py#L54-L64
-1
251,731
noahbenson/neuropythy
neuropythy/mri/images.py
ImageType.parse_affine
def parse_affine(self, hdat, dataobj=None): ''' Parses the affine out of the given header data and yields it. ''' if 'affine' in hdat: return to_affine(hdat['affine']) else: return to_affine(self.default_affine())
python
def parse_affine(self, hdat, dataobj=None): ''' Parses the affine out of the given header data and yields it. ''' if 'affine' in hdat: return to_affine(hdat['affine']) else: return to_affine(self.default_affine())
[ "def", "parse_affine", "(", "self", ",", "hdat", ",", "dataobj", "=", "None", ")", ":", "if", "'affine'", "in", "hdat", ":", "return", "to_affine", "(", "hdat", "[", "'affine'", "]", ")", "else", ":", "return", "to_affine", "(", "self", ".", "default_affine", "(", ")", ")" ]
Parses the affine out of the given header data and yields it.
[ "Parses", "the", "affine", "out", "of", "the", "given", "header", "data", "and", "yields", "it", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/mri/images.py#L66-L71
-1
251,732
noahbenson/neuropythy
neuropythy/registration/core.py
_parse_field_arguments
def _parse_field_arguments(arg, faces, edges, coords): '''See mesh_register.''' if not hasattr(arg, '__iter__'): raise RuntimeError('field argument must be a list-like collection of instructions') pot = [_parse_field_argument(instruct, faces, edges, coords) for instruct in arg] # make a new Potential sum unless the length is 1 if len(pot) <= 1: return pot[0] else: sp = java_link().jvm.nben.mesh.registration.Fields.newSum() for field in pot: sp.addField(field) return sp
python
def _parse_field_arguments(arg, faces, edges, coords): '''See mesh_register.''' if not hasattr(arg, '__iter__'): raise RuntimeError('field argument must be a list-like collection of instructions') pot = [_parse_field_argument(instruct, faces, edges, coords) for instruct in arg] # make a new Potential sum unless the length is 1 if len(pot) <= 1: return pot[0] else: sp = java_link().jvm.nben.mesh.registration.Fields.newSum() for field in pot: sp.addField(field) return sp
[ "def", "_parse_field_arguments", "(", "arg", ",", "faces", ",", "edges", ",", "coords", ")", ":", "if", "not", "hasattr", "(", "arg", ",", "'__iter__'", ")", ":", "raise", "RuntimeError", "(", "'field argument must be a list-like collection of instructions'", ")", "pot", "=", "[", "_parse_field_argument", "(", "instruct", ",", "faces", ",", "edges", ",", "coords", ")", "for", "instruct", "in", "arg", "]", "# make a new Potential sum unless the length is 1", "if", "len", "(", "pot", ")", "<=", "1", ":", "return", "pot", "[", "0", "]", "else", ":", "sp", "=", "java_link", "(", ")", ".", "jvm", ".", "nben", ".", "mesh", ".", "registration", ".", "Fields", ".", "newSum", "(", ")", "for", "field", "in", "pot", ":", "sp", ".", "addField", "(", "field", ")", "return", "sp" ]
See mesh_register.
[ "See", "mesh_register", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/registration/core.py#L96-L107
-1
251,733
noahbenson/neuropythy
neuropythy/graphics/core.py
retino_colors
def retino_colors(vcolorfn, *args, **kwargs): 'See eccen_colors, angle_colors, sigma_colors, and varea_colors.' if len(args) == 0: def _retino_color_pass(*args, **new_kwargs): return retino_colors(vcolorfn, *args, **{k:(new_kwargs[k] if k in new_kwargs else kwargs[k]) for k in set(kwargs.keys() + new_kwargs.keys())}) return _retino_color_pass elif len(args) > 1: raise ValueError('retinotopy color functions accepts at most one argument') m = args[0] # we need to handle the arguments if isinstance(m, (geo.VertexSet, pimms.ITable)): tbl = m.properties if isinstance(m, geo.VertexSet) else m n = tbl.row_count # if the weight or property arguments are lists, we need to thread these along if 'property' in kwargs: props = kwargs['property'] del kwargs['property'] if not (pimms.is_vector(props) or pimms.is_matrix(props)): props = [props for _ in range(n)] else: props = None if 'weight' in kwargs: ws = kwargs['weight'] del kwargs['weight'] if not pimms.is_vector(ws) and not pimms.is_matrix(ws): ws = [ws for _ in range(n)] else: ws = None vcolorfn0 = vcolorfn(Ellipsis, **kwargs) if len(kwargs) > 0 else vcolorfn if props is None and ws is None: vcfn = lambda m,k:vcolorfn0(m) elif props is None: vcfn = lambda m,k:vcolorfn0(m, weight=ws[k]) elif ws is None: vcfn = lambda m,k:vcolorfn0(m, property=props[k]) else: vcfn = lambda m,k:vcolorfn0(m, property=props[k], weight=ws[k]) return np.asarray([vcfn(r,kk) for (kk,r) in enumerate(tbl.rows)]) else: return vcolorfn(m, **kwargs)
python
def retino_colors(vcolorfn, *args, **kwargs): 'See eccen_colors, angle_colors, sigma_colors, and varea_colors.' if len(args) == 0: def _retino_color_pass(*args, **new_kwargs): return retino_colors(vcolorfn, *args, **{k:(new_kwargs[k] if k in new_kwargs else kwargs[k]) for k in set(kwargs.keys() + new_kwargs.keys())}) return _retino_color_pass elif len(args) > 1: raise ValueError('retinotopy color functions accepts at most one argument') m = args[0] # we need to handle the arguments if isinstance(m, (geo.VertexSet, pimms.ITable)): tbl = m.properties if isinstance(m, geo.VertexSet) else m n = tbl.row_count # if the weight or property arguments are lists, we need to thread these along if 'property' in kwargs: props = kwargs['property'] del kwargs['property'] if not (pimms.is_vector(props) or pimms.is_matrix(props)): props = [props for _ in range(n)] else: props = None if 'weight' in kwargs: ws = kwargs['weight'] del kwargs['weight'] if not pimms.is_vector(ws) and not pimms.is_matrix(ws): ws = [ws for _ in range(n)] else: ws = None vcolorfn0 = vcolorfn(Ellipsis, **kwargs) if len(kwargs) > 0 else vcolorfn if props is None and ws is None: vcfn = lambda m,k:vcolorfn0(m) elif props is None: vcfn = lambda m,k:vcolorfn0(m, weight=ws[k]) elif ws is None: vcfn = lambda m,k:vcolorfn0(m, property=props[k]) else: vcfn = lambda m,k:vcolorfn0(m, property=props[k], weight=ws[k]) return np.asarray([vcfn(r,kk) for (kk,r) in enumerate(tbl.rows)]) else: return vcolorfn(m, **kwargs)
[ "def", "retino_colors", "(", "vcolorfn", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", "==", "0", ":", "def", "_retino_color_pass", "(", "*", "args", ",", "*", "*", "new_kwargs", ")", ":", "return", "retino_colors", "(", "vcolorfn", ",", "*", "args", ",", "*", "*", "{", "k", ":", "(", "new_kwargs", "[", "k", "]", "if", "k", "in", "new_kwargs", "else", "kwargs", "[", "k", "]", ")", "for", "k", "in", "set", "(", "kwargs", ".", "keys", "(", ")", "+", "new_kwargs", ".", "keys", "(", ")", ")", "}", ")", "return", "_retino_color_pass", "elif", "len", "(", "args", ")", ">", "1", ":", "raise", "ValueError", "(", "'retinotopy color functions accepts at most one argument'", ")", "m", "=", "args", "[", "0", "]", "# we need to handle the arguments", "if", "isinstance", "(", "m", ",", "(", "geo", ".", "VertexSet", ",", "pimms", ".", "ITable", ")", ")", ":", "tbl", "=", "m", ".", "properties", "if", "isinstance", "(", "m", ",", "geo", ".", "VertexSet", ")", "else", "m", "n", "=", "tbl", ".", "row_count", "# if the weight or property arguments are lists, we need to thread these along", "if", "'property'", "in", "kwargs", ":", "props", "=", "kwargs", "[", "'property'", "]", "del", "kwargs", "[", "'property'", "]", "if", "not", "(", "pimms", ".", "is_vector", "(", "props", ")", "or", "pimms", ".", "is_matrix", "(", "props", ")", ")", ":", "props", "=", "[", "props", "for", "_", "in", "range", "(", "n", ")", "]", "else", ":", "props", "=", "None", "if", "'weight'", "in", "kwargs", ":", "ws", "=", "kwargs", "[", "'weight'", "]", "del", "kwargs", "[", "'weight'", "]", "if", "not", "pimms", ".", "is_vector", "(", "ws", ")", "and", "not", "pimms", ".", "is_matrix", "(", "ws", ")", ":", "ws", "=", "[", "ws", "for", "_", "in", "range", "(", "n", ")", "]", "else", ":", "ws", "=", "None", "vcolorfn0", "=", "vcolorfn", "(", "Ellipsis", ",", "*", "*", "kwargs", ")", "if", "len", "(", "kwargs", ")", ">", "0", "else", "vcolorfn", "if", "props", "is", "None", "and", "ws", "is", "None", ":", "vcfn", "=", "lambda", "m", ",", "k", ":", "vcolorfn0", "(", "m", ")", "elif", "props", "is", "None", ":", "vcfn", "=", "lambda", "m", ",", "k", ":", "vcolorfn0", "(", "m", ",", "weight", "=", "ws", "[", "k", "]", ")", "elif", "ws", "is", "None", ":", "vcfn", "=", "lambda", "m", ",", "k", ":", "vcolorfn0", "(", "m", ",", "property", "=", "props", "[", "k", "]", ")", "else", ":", "vcfn", "=", "lambda", "m", ",", "k", ":", "vcolorfn0", "(", "m", ",", "property", "=", "props", "[", "k", "]", ",", "weight", "=", "ws", "[", "k", "]", ")", "return", "np", ".", "asarray", "(", "[", "vcfn", "(", "r", ",", "kk", ")", "for", "(", "kk", ",", "r", ")", "in", "enumerate", "(", "tbl", ".", "rows", ")", "]", ")", "else", ":", "return", "vcolorfn", "(", "m", ",", "*", "*", "kwargs", ")" ]
See eccen_colors, angle_colors, sigma_colors, and varea_colors.
[ "See", "eccen_colors", "angle_colors", "sigma_colors", "and", "varea_colors", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/graphics/core.py#L482-L516
-1
251,734
noahbenson/neuropythy
neuropythy/hcp/files.py
_load_fsLR_atlasroi
def _load_fsLR_atlasroi(filename, data): ''' Loads the appropriate atlas for the given data; data may point to a cifti file whose atlas is needed or to an atlas file. ''' (fdir, fnm) = os.path.split(filename) fparts = fnm.split('.') atl = fparts[-3] if atl in _load_fsLR_atlasroi.atlases: return _load_fsLR_atlasroi.atlases[atl] sid = data['id'] fnm = [os.path.join(fdir, '%d.%s.atlasroi.%s.shape.gii' % (sid, h, atl)) for h in ('L', 'R')] if data['cifti']: dat = [{'id':data['id'], 'type':'property', 'name':'atlas', 'hemi':h} for h in data['hemi']] else: dat = [{'id':data['id'], 'type':'property', 'name':'atlas', 'hemi':(h + data['hemi'][2:])} for h in ('lh','rh')] # loading an atlas file; this is easier rois = tuple([_load(f, d).astype('bool') for (f,d) in zip(fnm, dat)]) # add these to the cache if atl != 'native': _load_fsLR_atlasroi.atlases[atl] = rois return rois
python
def _load_fsLR_atlasroi(filename, data): ''' Loads the appropriate atlas for the given data; data may point to a cifti file whose atlas is needed or to an atlas file. ''' (fdir, fnm) = os.path.split(filename) fparts = fnm.split('.') atl = fparts[-3] if atl in _load_fsLR_atlasroi.atlases: return _load_fsLR_atlasroi.atlases[atl] sid = data['id'] fnm = [os.path.join(fdir, '%d.%s.atlasroi.%s.shape.gii' % (sid, h, atl)) for h in ('L', 'R')] if data['cifti']: dat = [{'id':data['id'], 'type':'property', 'name':'atlas', 'hemi':h} for h in data['hemi']] else: dat = [{'id':data['id'], 'type':'property', 'name':'atlas', 'hemi':(h + data['hemi'][2:])} for h in ('lh','rh')] # loading an atlas file; this is easier rois = tuple([_load(f, d).astype('bool') for (f,d) in zip(fnm, dat)]) # add these to the cache if atl != 'native': _load_fsLR_atlasroi.atlases[atl] = rois return rois
[ "def", "_load_fsLR_atlasroi", "(", "filename", ",", "data", ")", ":", "(", "fdir", ",", "fnm", ")", "=", "os", ".", "path", ".", "split", "(", "filename", ")", "fparts", "=", "fnm", ".", "split", "(", "'.'", ")", "atl", "=", "fparts", "[", "-", "3", "]", "if", "atl", "in", "_load_fsLR_atlasroi", ".", "atlases", ":", "return", "_load_fsLR_atlasroi", ".", "atlases", "[", "atl", "]", "sid", "=", "data", "[", "'id'", "]", "fnm", "=", "[", "os", ".", "path", ".", "join", "(", "fdir", ",", "'%d.%s.atlasroi.%s.shape.gii'", "%", "(", "sid", ",", "h", ",", "atl", ")", ")", "for", "h", "in", "(", "'L'", ",", "'R'", ")", "]", "if", "data", "[", "'cifti'", "]", ":", "dat", "=", "[", "{", "'id'", ":", "data", "[", "'id'", "]", ",", "'type'", ":", "'property'", ",", "'name'", ":", "'atlas'", ",", "'hemi'", ":", "h", "}", "for", "h", "in", "data", "[", "'hemi'", "]", "]", "else", ":", "dat", "=", "[", "{", "'id'", ":", "data", "[", "'id'", "]", ",", "'type'", ":", "'property'", ",", "'name'", ":", "'atlas'", ",", "'hemi'", ":", "(", "h", "+", "data", "[", "'hemi'", "]", "[", "2", ":", "]", ")", "}", "for", "h", "in", "(", "'lh'", ",", "'rh'", ")", "]", "# loading an atlas file; this is easier", "rois", "=", "tuple", "(", "[", "_load", "(", "f", ",", "d", ")", ".", "astype", "(", "'bool'", ")", "for", "(", "f", ",", "d", ")", "in", "zip", "(", "fnm", ",", "dat", ")", "]", ")", "# add these to the cache", "if", "atl", "!=", "'native'", ":", "_load_fsLR_atlasroi", ".", "atlases", "[", "atl", "]", "=", "rois", "return", "rois" ]
Loads the appropriate atlas for the given data; data may point to a cifti file whose atlas is needed or to an atlas file.
[ "Loads", "the", "appropriate", "atlas", "for", "the", "given", "data", ";", "data", "may", "point", "to", "a", "cifti", "file", "whose", "atlas", "is", "needed", "or", "to", "an", "atlas", "file", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/hcp/files.py#L248-L268
-1
251,735
noahbenson/neuropythy
neuropythy/hcp/files.py
_load_fsLR_atlasroi_for_size
def _load_fsLR_atlasroi_for_size(size, sid=100610): ''' Loads the appropriate atlas for the given size of data; size should be the number of stored vertices and sub-corticel voxels stored in the cifti file. ''' from .core import subject # it doesn't matter what subject we request, so just use any one fls = _load_fsLR_atlasroi_for_size.sizes if size not in fls: raise ValueError('unknown fs_LR atlas size: %s' % size) (n,fls) = _load_fsLR_atlasroi_for_size.sizes[size] fl = os.path.join(subject(sid).path, 'MNINonLinear', *fls) dat = {'id':sid, 'cifti':True, 'hemi':('lh_LR%dk_MSMAll' % n ,'rh_LR%dk_MSMAll' % n)} return _load_fsLR_atlasroi(fl, dat)
python
def _load_fsLR_atlasroi_for_size(size, sid=100610): ''' Loads the appropriate atlas for the given size of data; size should be the number of stored vertices and sub-corticel voxels stored in the cifti file. ''' from .core import subject # it doesn't matter what subject we request, so just use any one fls = _load_fsLR_atlasroi_for_size.sizes if size not in fls: raise ValueError('unknown fs_LR atlas size: %s' % size) (n,fls) = _load_fsLR_atlasroi_for_size.sizes[size] fl = os.path.join(subject(sid).path, 'MNINonLinear', *fls) dat = {'id':sid, 'cifti':True, 'hemi':('lh_LR%dk_MSMAll' % n ,'rh_LR%dk_MSMAll' % n)} return _load_fsLR_atlasroi(fl, dat)
[ "def", "_load_fsLR_atlasroi_for_size", "(", "size", ",", "sid", "=", "100610", ")", ":", "from", ".", "core", "import", "subject", "# it doesn't matter what subject we request, so just use any one", "fls", "=", "_load_fsLR_atlasroi_for_size", ".", "sizes", "if", "size", "not", "in", "fls", ":", "raise", "ValueError", "(", "'unknown fs_LR atlas size: %s'", "%", "size", ")", "(", "n", ",", "fls", ")", "=", "_load_fsLR_atlasroi_for_size", ".", "sizes", "[", "size", "]", "fl", "=", "os", ".", "path", ".", "join", "(", "subject", "(", "sid", ")", ".", "path", ",", "'MNINonLinear'", ",", "*", "fls", ")", "dat", "=", "{", "'id'", ":", "sid", ",", "'cifti'", ":", "True", ",", "'hemi'", ":", "(", "'lh_LR%dk_MSMAll'", "%", "n", ",", "'rh_LR%dk_MSMAll'", "%", "n", ")", "}", "return", "_load_fsLR_atlasroi", "(", "fl", ",", "dat", ")" ]
Loads the appropriate atlas for the given size of data; size should be the number of stored vertices and sub-corticel voxels stored in the cifti file.
[ "Loads", "the", "appropriate", "atlas", "for", "the", "given", "size", "of", "data", ";", "size", "should", "be", "the", "number", "of", "stored", "vertices", "and", "sub", "-", "corticel", "voxels", "stored", "in", "the", "cifti", "file", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/hcp/files.py#L270-L282
-1
251,736
noahbenson/neuropythy
neuropythy/commands/register_retinotopy.py
calc_arguments
def calc_arguments(args): ''' calc_arguments is a calculator that parses the command-line arguments for the registration command and produces the subject, the model, the log function, and the additional options. ''' (args, opts) = _retinotopy_parser(args) # We do some of the options right here... if opts['help']: print(info, file=sys.stdout) sys.exit(1) # and if we are verbose, lets setup a note function verbose = opts['verbose'] def note(s): if verbose: print(s, file=sys.stdout) sys.stdout.flush() return verbose def error(s): print(s, file=sys.stderr) sys.stderr.flush() sys.exit(1) if len(args) < 1: error('subject argument is required') # Add the subjects directory, if there is one if 'subjects_dir' in opts and opts['subjects_dir'] is not None: add_subject_path(opts['subjects_dir']) # Get the subject now try: sub = subject(args[0]) except Exception: error('Failed to load subject %s' % args[0]) # and the model if len(args) > 1: mdl_name = args[1] elif opts['model_sym']: mdl_name = 'schira' else: mdl_name = 'benson17' try: if opts['model_sym']: model = {h:retinotopy_model(mdl_name).persist() for h in ['lh', 'rh']} else: model = {h:retinotopy_model(mdl_name, hemi=h).persist() for h in ['lh', 'rh']} except Exception: error('Could not load retinotopy model %s' % mdl_name) # Now, we want to run a few filters on the options # Parse the simple numbers for o in ['weight_min', 'scale', 'max_step_size', 'max_out_eccen', 'max_in_eccen', 'min_in_eccen', 'field_sign_weight', 'radius_weight']: opts[o] = float(opts[o]) opts['max_steps'] = int(opts['max_steps']) # Make a note: note('Processing subject: %s' % sub.name) del opts['help'] del opts['verbose'] del opts['subjects_dir'] # That's all we need! return pimms.merge(opts, {'subject': sub.persist(), 'model': pyr.pmap(model), 'options': pyr.pmap(opts), 'note': note, 'error': error})
python
def calc_arguments(args): ''' calc_arguments is a calculator that parses the command-line arguments for the registration command and produces the subject, the model, the log function, and the additional options. ''' (args, opts) = _retinotopy_parser(args) # We do some of the options right here... if opts['help']: print(info, file=sys.stdout) sys.exit(1) # and if we are verbose, lets setup a note function verbose = opts['verbose'] def note(s): if verbose: print(s, file=sys.stdout) sys.stdout.flush() return verbose def error(s): print(s, file=sys.stderr) sys.stderr.flush() sys.exit(1) if len(args) < 1: error('subject argument is required') # Add the subjects directory, if there is one if 'subjects_dir' in opts and opts['subjects_dir'] is not None: add_subject_path(opts['subjects_dir']) # Get the subject now try: sub = subject(args[0]) except Exception: error('Failed to load subject %s' % args[0]) # and the model if len(args) > 1: mdl_name = args[1] elif opts['model_sym']: mdl_name = 'schira' else: mdl_name = 'benson17' try: if opts['model_sym']: model = {h:retinotopy_model(mdl_name).persist() for h in ['lh', 'rh']} else: model = {h:retinotopy_model(mdl_name, hemi=h).persist() for h in ['lh', 'rh']} except Exception: error('Could not load retinotopy model %s' % mdl_name) # Now, we want to run a few filters on the options # Parse the simple numbers for o in ['weight_min', 'scale', 'max_step_size', 'max_out_eccen', 'max_in_eccen', 'min_in_eccen', 'field_sign_weight', 'radius_weight']: opts[o] = float(opts[o]) opts['max_steps'] = int(opts['max_steps']) # Make a note: note('Processing subject: %s' % sub.name) del opts['help'] del opts['verbose'] del opts['subjects_dir'] # That's all we need! return pimms.merge(opts, {'subject': sub.persist(), 'model': pyr.pmap(model), 'options': pyr.pmap(opts), 'note': note, 'error': error})
[ "def", "calc_arguments", "(", "args", ")", ":", "(", "args", ",", "opts", ")", "=", "_retinotopy_parser", "(", "args", ")", "# We do some of the options right here...", "if", "opts", "[", "'help'", "]", ":", "print", "(", "info", ",", "file", "=", "sys", ".", "stdout", ")", "sys", ".", "exit", "(", "1", ")", "# and if we are verbose, lets setup a note function", "verbose", "=", "opts", "[", "'verbose'", "]", "def", "note", "(", "s", ")", ":", "if", "verbose", ":", "print", "(", "s", ",", "file", "=", "sys", ".", "stdout", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "return", "verbose", "def", "error", "(", "s", ")", ":", "print", "(", "s", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "sys", ".", "exit", "(", "1", ")", "if", "len", "(", "args", ")", "<", "1", ":", "error", "(", "'subject argument is required'", ")", "# Add the subjects directory, if there is one", "if", "'subjects_dir'", "in", "opts", "and", "opts", "[", "'subjects_dir'", "]", "is", "not", "None", ":", "add_subject_path", "(", "opts", "[", "'subjects_dir'", "]", ")", "# Get the subject now", "try", ":", "sub", "=", "subject", "(", "args", "[", "0", "]", ")", "except", "Exception", ":", "error", "(", "'Failed to load subject %s'", "%", "args", "[", "0", "]", ")", "# and the model", "if", "len", "(", "args", ")", ">", "1", ":", "mdl_name", "=", "args", "[", "1", "]", "elif", "opts", "[", "'model_sym'", "]", ":", "mdl_name", "=", "'schira'", "else", ":", "mdl_name", "=", "'benson17'", "try", ":", "if", "opts", "[", "'model_sym'", "]", ":", "model", "=", "{", "h", ":", "retinotopy_model", "(", "mdl_name", ")", ".", "persist", "(", ")", "for", "h", "in", "[", "'lh'", ",", "'rh'", "]", "}", "else", ":", "model", "=", "{", "h", ":", "retinotopy_model", "(", "mdl_name", ",", "hemi", "=", "h", ")", ".", "persist", "(", ")", "for", "h", "in", "[", "'lh'", ",", "'rh'", "]", "}", "except", "Exception", ":", "error", "(", "'Could not load retinotopy model %s'", "%", "mdl_name", ")", "# Now, we want to run a few filters on the options", "# Parse the simple numbers", "for", "o", "in", "[", "'weight_min'", ",", "'scale'", ",", "'max_step_size'", ",", "'max_out_eccen'", ",", "'max_in_eccen'", ",", "'min_in_eccen'", ",", "'field_sign_weight'", ",", "'radius_weight'", "]", ":", "opts", "[", "o", "]", "=", "float", "(", "opts", "[", "o", "]", ")", "opts", "[", "'max_steps'", "]", "=", "int", "(", "opts", "[", "'max_steps'", "]", ")", "# Make a note:", "note", "(", "'Processing subject: %s'", "%", "sub", ".", "name", ")", "del", "opts", "[", "'help'", "]", "del", "opts", "[", "'verbose'", "]", "del", "opts", "[", "'subjects_dir'", "]", "# That's all we need!", "return", "pimms", ".", "merge", "(", "opts", ",", "{", "'subject'", ":", "sub", ".", "persist", "(", ")", ",", "'model'", ":", "pyr", ".", "pmap", "(", "model", ")", ",", "'options'", ":", "pyr", ".", "pmap", "(", "opts", ")", ",", "'note'", ":", "note", ",", "'error'", ":", "error", "}", ")" ]
calc_arguments is a calculator that parses the command-line arguments for the registration command and produces the subject, the model, the log function, and the additional options.
[ "calc_arguments", "is", "a", "calculator", "that", "parses", "the", "command", "-", "line", "arguments", "for", "the", "registration", "command", "and", "produces", "the", "subject", "the", "model", "the", "log", "function", "and", "the", "additional", "options", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/register_retinotopy.py#L305-L361
-1
251,737
noahbenson/neuropythy
neuropythy/commands/register_retinotopy.py
calc_retinotopy
def calc_retinotopy(note, error, subject, clean, run_lh, run_rh, invert_rh_angle, max_in_eccen, min_in_eccen, angle_lh_file, theta_lh_file, eccen_lh_file, rho_lh_file, weight_lh_file, radius_lh_file, angle_rh_file, theta_rh_file, eccen_rh_file, rho_rh_file, weight_rh_file, radius_rh_file): ''' calc_retinotopy extracts the retinotopy options from the command line, loads the relevant files, and stores them as properties on the subject's lh and rh cortices. ''' ctcs = {} for (h,ang,tht,ecc,rho,wgt,rad,run) in [ ('lh', angle_lh_file,theta_lh_file, eccen_lh_file,rho_lh_file, weight_lh_file, radius_lh_file, run_lh), ('rh', angle_rh_file,theta_rh_file, eccen_rh_file,rho_rh_file, weight_rh_file, radius_rh_file, run_rh)]: if not run: continue hemi = getattr(subject, h) props = {} # load the properties or find them in the auto-properties if ang: try: props['polar_angle'] = _guess_surf_file(ang) except Exception: error('could not load surface file %s' % ang) elif tht: try: tmp = _guess_surf_file(tht) props['polar_angle'] = 90.0 - 180.0 / np.pi * tmp except Exception: error('could not load surface file %s' % tht) else: props['polar_angle'] = empirical_retinotopy_data(hemi, 'polar_angle') if ecc: try: props['eccentricity'] = _guess_surf_file(ecc) except Exception: error('could not load surface file %s' % ecc) elif rho: try: tmp = _guess_surf_file(rhp) props['eccentricity'] = 180.0 / np.pi * tmp except Exception: error('could not load surface file %s' % rho) else: props['eccentricity'] = empirical_retinotopy_data(hemi, 'eccentricity') if wgt: try: props['weight'] = _guess_surf_file(wgt) except Exception: error('could not load surface file %s' % wgt) else: props['weight'] = empirical_retinotopy_data(hemi, 'weight') if rad: try: props['radius'] = _guess_surf_file(rad) except Exception: error('could not load surface file %s' % rad) else: props['radius'] = empirical_retinotopy_data(hemi, 'radius') # Check for inverted rh if h == 'rh' and invert_rh_angle: props['polar_angle'] = -props['polar_angle'] # and zero-out weights for high eccentricities props['weight'] = np.array(props['weight']) if max_in_eccen is not None: props['weight'][props['eccentricity'] > max_in_eccen] = 0 if min_in_eccen is not None: props['weight'][props['eccentricity'] < min_in_eccen] = 0 # Do smoothing, if requested if clean: note('Cleaning %s retinotopy...' % h.upper()) (ang,ecc) = clean_retinotopy(hemi, retinotopy=props, mask=None, weight='weight') props['polar_angle'] = ang props['eccentricity'] = ecc ctcs[h] = hemi.with_prop(props) return {'cortices': pyr.pmap(ctcs)}
python
def calc_retinotopy(note, error, subject, clean, run_lh, run_rh, invert_rh_angle, max_in_eccen, min_in_eccen, angle_lh_file, theta_lh_file, eccen_lh_file, rho_lh_file, weight_lh_file, radius_lh_file, angle_rh_file, theta_rh_file, eccen_rh_file, rho_rh_file, weight_rh_file, radius_rh_file): ''' calc_retinotopy extracts the retinotopy options from the command line, loads the relevant files, and stores them as properties on the subject's lh and rh cortices. ''' ctcs = {} for (h,ang,tht,ecc,rho,wgt,rad,run) in [ ('lh', angle_lh_file,theta_lh_file, eccen_lh_file,rho_lh_file, weight_lh_file, radius_lh_file, run_lh), ('rh', angle_rh_file,theta_rh_file, eccen_rh_file,rho_rh_file, weight_rh_file, radius_rh_file, run_rh)]: if not run: continue hemi = getattr(subject, h) props = {} # load the properties or find them in the auto-properties if ang: try: props['polar_angle'] = _guess_surf_file(ang) except Exception: error('could not load surface file %s' % ang) elif tht: try: tmp = _guess_surf_file(tht) props['polar_angle'] = 90.0 - 180.0 / np.pi * tmp except Exception: error('could not load surface file %s' % tht) else: props['polar_angle'] = empirical_retinotopy_data(hemi, 'polar_angle') if ecc: try: props['eccentricity'] = _guess_surf_file(ecc) except Exception: error('could not load surface file %s' % ecc) elif rho: try: tmp = _guess_surf_file(rhp) props['eccentricity'] = 180.0 / np.pi * tmp except Exception: error('could not load surface file %s' % rho) else: props['eccentricity'] = empirical_retinotopy_data(hemi, 'eccentricity') if wgt: try: props['weight'] = _guess_surf_file(wgt) except Exception: error('could not load surface file %s' % wgt) else: props['weight'] = empirical_retinotopy_data(hemi, 'weight') if rad: try: props['radius'] = _guess_surf_file(rad) except Exception: error('could not load surface file %s' % rad) else: props['radius'] = empirical_retinotopy_data(hemi, 'radius') # Check for inverted rh if h == 'rh' and invert_rh_angle: props['polar_angle'] = -props['polar_angle'] # and zero-out weights for high eccentricities props['weight'] = np.array(props['weight']) if max_in_eccen is not None: props['weight'][props['eccentricity'] > max_in_eccen] = 0 if min_in_eccen is not None: props['weight'][props['eccentricity'] < min_in_eccen] = 0 # Do smoothing, if requested if clean: note('Cleaning %s retinotopy...' % h.upper()) (ang,ecc) = clean_retinotopy(hemi, retinotopy=props, mask=None, weight='weight') props['polar_angle'] = ang props['eccentricity'] = ecc ctcs[h] = hemi.with_prop(props) return {'cortices': pyr.pmap(ctcs)}
[ "def", "calc_retinotopy", "(", "note", ",", "error", ",", "subject", ",", "clean", ",", "run_lh", ",", "run_rh", ",", "invert_rh_angle", ",", "max_in_eccen", ",", "min_in_eccen", ",", "angle_lh_file", ",", "theta_lh_file", ",", "eccen_lh_file", ",", "rho_lh_file", ",", "weight_lh_file", ",", "radius_lh_file", ",", "angle_rh_file", ",", "theta_rh_file", ",", "eccen_rh_file", ",", "rho_rh_file", ",", "weight_rh_file", ",", "radius_rh_file", ")", ":", "ctcs", "=", "{", "}", "for", "(", "h", ",", "ang", ",", "tht", ",", "ecc", ",", "rho", ",", "wgt", ",", "rad", ",", "run", ")", "in", "[", "(", "'lh'", ",", "angle_lh_file", ",", "theta_lh_file", ",", "eccen_lh_file", ",", "rho_lh_file", ",", "weight_lh_file", ",", "radius_lh_file", ",", "run_lh", ")", ",", "(", "'rh'", ",", "angle_rh_file", ",", "theta_rh_file", ",", "eccen_rh_file", ",", "rho_rh_file", ",", "weight_rh_file", ",", "radius_rh_file", ",", "run_rh", ")", "]", ":", "if", "not", "run", ":", "continue", "hemi", "=", "getattr", "(", "subject", ",", "h", ")", "props", "=", "{", "}", "# load the properties or find them in the auto-properties", "if", "ang", ":", "try", ":", "props", "[", "'polar_angle'", "]", "=", "_guess_surf_file", "(", "ang", ")", "except", "Exception", ":", "error", "(", "'could not load surface file %s'", "%", "ang", ")", "elif", "tht", ":", "try", ":", "tmp", "=", "_guess_surf_file", "(", "tht", ")", "props", "[", "'polar_angle'", "]", "=", "90.0", "-", "180.0", "/", "np", ".", "pi", "*", "tmp", "except", "Exception", ":", "error", "(", "'could not load surface file %s'", "%", "tht", ")", "else", ":", "props", "[", "'polar_angle'", "]", "=", "empirical_retinotopy_data", "(", "hemi", ",", "'polar_angle'", ")", "if", "ecc", ":", "try", ":", "props", "[", "'eccentricity'", "]", "=", "_guess_surf_file", "(", "ecc", ")", "except", "Exception", ":", "error", "(", "'could not load surface file %s'", "%", "ecc", ")", "elif", "rho", ":", "try", ":", "tmp", "=", "_guess_surf_file", "(", "rhp", ")", "props", "[", "'eccentricity'", "]", "=", "180.0", "/", "np", ".", "pi", "*", "tmp", "except", "Exception", ":", "error", "(", "'could not load surface file %s'", "%", "rho", ")", "else", ":", "props", "[", "'eccentricity'", "]", "=", "empirical_retinotopy_data", "(", "hemi", ",", "'eccentricity'", ")", "if", "wgt", ":", "try", ":", "props", "[", "'weight'", "]", "=", "_guess_surf_file", "(", "wgt", ")", "except", "Exception", ":", "error", "(", "'could not load surface file %s'", "%", "wgt", ")", "else", ":", "props", "[", "'weight'", "]", "=", "empirical_retinotopy_data", "(", "hemi", ",", "'weight'", ")", "if", "rad", ":", "try", ":", "props", "[", "'radius'", "]", "=", "_guess_surf_file", "(", "rad", ")", "except", "Exception", ":", "error", "(", "'could not load surface file %s'", "%", "rad", ")", "else", ":", "props", "[", "'radius'", "]", "=", "empirical_retinotopy_data", "(", "hemi", ",", "'radius'", ")", "# Check for inverted rh", "if", "h", "==", "'rh'", "and", "invert_rh_angle", ":", "props", "[", "'polar_angle'", "]", "=", "-", "props", "[", "'polar_angle'", "]", "# and zero-out weights for high eccentricities", "props", "[", "'weight'", "]", "=", "np", ".", "array", "(", "props", "[", "'weight'", "]", ")", "if", "max_in_eccen", "is", "not", "None", ":", "props", "[", "'weight'", "]", "[", "props", "[", "'eccentricity'", "]", ">", "max_in_eccen", "]", "=", "0", "if", "min_in_eccen", "is", "not", "None", ":", "props", "[", "'weight'", "]", "[", "props", "[", "'eccentricity'", "]", "<", "min_in_eccen", "]", "=", "0", "# Do smoothing, if requested", "if", "clean", ":", "note", "(", "'Cleaning %s retinotopy...'", "%", "h", ".", "upper", "(", ")", ")", "(", "ang", ",", "ecc", ")", "=", "clean_retinotopy", "(", "hemi", ",", "retinotopy", "=", "props", ",", "mask", "=", "None", ",", "weight", "=", "'weight'", ")", "props", "[", "'polar_angle'", "]", "=", "ang", "props", "[", "'eccentricity'", "]", "=", "ecc", "ctcs", "[", "h", "]", "=", "hemi", ".", "with_prop", "(", "props", ")", "return", "{", "'cortices'", ":", "pyr", ".", "pmap", "(", "ctcs", ")", "}" ]
calc_retinotopy extracts the retinotopy options from the command line, loads the relevant files, and stores them as properties on the subject's lh and rh cortices.
[ "calc_retinotopy", "extracts", "the", "retinotopy", "options", "from", "the", "command", "line", "loads", "the", "relevant", "files", "and", "stores", "them", "as", "properties", "on", "the", "subject", "s", "lh", "and", "rh", "cortices", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/register_retinotopy.py#L363-L431
-1
251,738
noahbenson/neuropythy
neuropythy/commands/register_retinotopy.py
calc_registrations
def calc_registrations(note, error, cortices, model, model_sym, weight_min, scale, prior, max_out_eccen, max_steps, max_step_size, radius_weight, field_sign_weight, resample, invert_rh_angle, part_vol_correct): ''' calc_registrations is the calculator that performs the registrations for the left and right hemisphere; these are returned as the immutable maps yielded from the register_retinotopy command. ''' rsamp = ('fsaverage_sym' if model_sym else 'fsaverage') if resample else False # Do the registration res = {} for (h,ctx) in six.iteritems(cortices): note('Preparing %s Registration...' % h.upper()) try: res[h] = register_retinotopy(ctx, model[h], model_hemi='sym' if model_sym else h, polar_angle='polar_angle', eccentricity='eccentricity', weight='weight', weight_min=weight_min, partial_voluming_correction=part_vol_correct, field_sign_weight=field_sign_weight, radius_weight=radius_weight, scale=scale, prior=prior, resample=rsamp, invert_rh_field_sign=invert_rh_angle, max_steps=max_steps, max_step_size=max_step_size, yield_imap=True) except Exception: #error('Exception caught while setting-up register_retinotopy (%s)' % h) raise return {'registrations': pyr.pmap(res)}
python
def calc_registrations(note, error, cortices, model, model_sym, weight_min, scale, prior, max_out_eccen, max_steps, max_step_size, radius_weight, field_sign_weight, resample, invert_rh_angle, part_vol_correct): ''' calc_registrations is the calculator that performs the registrations for the left and right hemisphere; these are returned as the immutable maps yielded from the register_retinotopy command. ''' rsamp = ('fsaverage_sym' if model_sym else 'fsaverage') if resample else False # Do the registration res = {} for (h,ctx) in six.iteritems(cortices): note('Preparing %s Registration...' % h.upper()) try: res[h] = register_retinotopy(ctx, model[h], model_hemi='sym' if model_sym else h, polar_angle='polar_angle', eccentricity='eccentricity', weight='weight', weight_min=weight_min, partial_voluming_correction=part_vol_correct, field_sign_weight=field_sign_weight, radius_weight=radius_weight, scale=scale, prior=prior, resample=rsamp, invert_rh_field_sign=invert_rh_angle, max_steps=max_steps, max_step_size=max_step_size, yield_imap=True) except Exception: #error('Exception caught while setting-up register_retinotopy (%s)' % h) raise return {'registrations': pyr.pmap(res)}
[ "def", "calc_registrations", "(", "note", ",", "error", ",", "cortices", ",", "model", ",", "model_sym", ",", "weight_min", ",", "scale", ",", "prior", ",", "max_out_eccen", ",", "max_steps", ",", "max_step_size", ",", "radius_weight", ",", "field_sign_weight", ",", "resample", ",", "invert_rh_angle", ",", "part_vol_correct", ")", ":", "rsamp", "=", "(", "'fsaverage_sym'", "if", "model_sym", "else", "'fsaverage'", ")", "if", "resample", "else", "False", "# Do the registration", "res", "=", "{", "}", "for", "(", "h", ",", "ctx", ")", "in", "six", ".", "iteritems", "(", "cortices", ")", ":", "note", "(", "'Preparing %s Registration...'", "%", "h", ".", "upper", "(", ")", ")", "try", ":", "res", "[", "h", "]", "=", "register_retinotopy", "(", "ctx", ",", "model", "[", "h", "]", ",", "model_hemi", "=", "'sym'", "if", "model_sym", "else", "h", ",", "polar_angle", "=", "'polar_angle'", ",", "eccentricity", "=", "'eccentricity'", ",", "weight", "=", "'weight'", ",", "weight_min", "=", "weight_min", ",", "partial_voluming_correction", "=", "part_vol_correct", ",", "field_sign_weight", "=", "field_sign_weight", ",", "radius_weight", "=", "radius_weight", ",", "scale", "=", "scale", ",", "prior", "=", "prior", ",", "resample", "=", "rsamp", ",", "invert_rh_field_sign", "=", "invert_rh_angle", ",", "max_steps", "=", "max_steps", ",", "max_step_size", "=", "max_step_size", ",", "yield_imap", "=", "True", ")", "except", "Exception", ":", "#error('Exception caught while setting-up register_retinotopy (%s)' % h)", "raise", "return", "{", "'registrations'", ":", "pyr", ".", "pmap", "(", "res", ")", "}" ]
calc_registrations is the calculator that performs the registrations for the left and right hemisphere; these are returned as the immutable maps yielded from the register_retinotopy command.
[ "calc_registrations", "is", "the", "calculator", "that", "performs", "the", "registrations", "for", "the", "left", "and", "right", "hemisphere", ";", "these", "are", "returned", "as", "the", "immutable", "maps", "yielded", "from", "the", "register_retinotopy", "command", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/register_retinotopy.py#L433-L466
-1
251,739
noahbenson/neuropythy
neuropythy/commands/register_retinotopy.py
save_surface_files
def save_surface_files(note, error, registrations, subject, no_surf_export, no_reg_export, surface_format, surface_path, angle_tag, eccen_tag, label_tag, radius_tag, registration_name): ''' save_surface_files is the calculator that saves the registration data out as surface files, which are put back in the registration as the value 'surface_files'. ''' if no_surf_export: return {'surface_files': ()} surface_format = surface_format.lower() # make an exporter for properties: if surface_format in ['curv', 'morph', 'auto', 'automatic']: def export(flnm, p): fsio.write_morph_data(flnm, p) return flnm elif surface_format in ['mgh', 'mgz']: def export(flnm, p): flnm = flnm + '.' + surface_format dt = np.int32 if np.issubdtype(p.dtype, np.dtype(int).type) else np.float32 img = fsmgh.MGHImage(np.asarray([[p]], dtype=dt), np.eye(4)) img.to_filename(flnm) return flnm elif surface_format in ['nifti', 'nii', 'niigz', 'nii.gz']: surface_format = 'nii' if surface_format == 'nii' else 'nii.gz' def export(flnm, p): flnm = flnm + '.' + surface_format dt = np.int32 if np.issubdtype(p.dtype, np.dtype(int).type) else np.float32 img = nib.Nifti1Image(np.asarray([[p]], dtype=dt), np.eye(4)) img.to_filename(flnm) return flnm else: error('Could not understand surface file-format %s' % surface_format) path = surface_path if surface_path else os.path.join(subject.path, 'surf') files = [] note('Exporting files...') for h in six.iterkeys(registrations): hemi = subject.hemis[h] reg = registrations[h] note('Extracting %s predicted mesh...' % h.upper()) pmesh = reg['predicted_mesh'] for (pname,tag) in zip(['polar_angle', 'eccentricity', 'visual_area', 'radius'], [angle_tag, eccen_tag, label_tag, radius_tag]): flnm = export(os.path.join(path, h + '.' + tag), pmesh.prop(pname)) files.append(flnm) # last do the registration itself if registration_name and not no_reg_export: flnm = os.path.join(path, h + '.' + registration_name + '.sphere.reg') fsio.write_geometry(flnm, pmesh.coordinates.T, pmesh.tess.faces.T) return {'surface_files': tuple(files)}
python
def save_surface_files(note, error, registrations, subject, no_surf_export, no_reg_export, surface_format, surface_path, angle_tag, eccen_tag, label_tag, radius_tag, registration_name): ''' save_surface_files is the calculator that saves the registration data out as surface files, which are put back in the registration as the value 'surface_files'. ''' if no_surf_export: return {'surface_files': ()} surface_format = surface_format.lower() # make an exporter for properties: if surface_format in ['curv', 'morph', 'auto', 'automatic']: def export(flnm, p): fsio.write_morph_data(flnm, p) return flnm elif surface_format in ['mgh', 'mgz']: def export(flnm, p): flnm = flnm + '.' + surface_format dt = np.int32 if np.issubdtype(p.dtype, np.dtype(int).type) else np.float32 img = fsmgh.MGHImage(np.asarray([[p]], dtype=dt), np.eye(4)) img.to_filename(flnm) return flnm elif surface_format in ['nifti', 'nii', 'niigz', 'nii.gz']: surface_format = 'nii' if surface_format == 'nii' else 'nii.gz' def export(flnm, p): flnm = flnm + '.' + surface_format dt = np.int32 if np.issubdtype(p.dtype, np.dtype(int).type) else np.float32 img = nib.Nifti1Image(np.asarray([[p]], dtype=dt), np.eye(4)) img.to_filename(flnm) return flnm else: error('Could not understand surface file-format %s' % surface_format) path = surface_path if surface_path else os.path.join(subject.path, 'surf') files = [] note('Exporting files...') for h in six.iterkeys(registrations): hemi = subject.hemis[h] reg = registrations[h] note('Extracting %s predicted mesh...' % h.upper()) pmesh = reg['predicted_mesh'] for (pname,tag) in zip(['polar_angle', 'eccentricity', 'visual_area', 'radius'], [angle_tag, eccen_tag, label_tag, radius_tag]): flnm = export(os.path.join(path, h + '.' + tag), pmesh.prop(pname)) files.append(flnm) # last do the registration itself if registration_name and not no_reg_export: flnm = os.path.join(path, h + '.' + registration_name + '.sphere.reg') fsio.write_geometry(flnm, pmesh.coordinates.T, pmesh.tess.faces.T) return {'surface_files': tuple(files)}
[ "def", "save_surface_files", "(", "note", ",", "error", ",", "registrations", ",", "subject", ",", "no_surf_export", ",", "no_reg_export", ",", "surface_format", ",", "surface_path", ",", "angle_tag", ",", "eccen_tag", ",", "label_tag", ",", "radius_tag", ",", "registration_name", ")", ":", "if", "no_surf_export", ":", "return", "{", "'surface_files'", ":", "(", ")", "}", "surface_format", "=", "surface_format", ".", "lower", "(", ")", "# make an exporter for properties:", "if", "surface_format", "in", "[", "'curv'", ",", "'morph'", ",", "'auto'", ",", "'automatic'", "]", ":", "def", "export", "(", "flnm", ",", "p", ")", ":", "fsio", ".", "write_morph_data", "(", "flnm", ",", "p", ")", "return", "flnm", "elif", "surface_format", "in", "[", "'mgh'", ",", "'mgz'", "]", ":", "def", "export", "(", "flnm", ",", "p", ")", ":", "flnm", "=", "flnm", "+", "'.'", "+", "surface_format", "dt", "=", "np", ".", "int32", "if", "np", ".", "issubdtype", "(", "p", ".", "dtype", ",", "np", ".", "dtype", "(", "int", ")", ".", "type", ")", "else", "np", ".", "float32", "img", "=", "fsmgh", ".", "MGHImage", "(", "np", ".", "asarray", "(", "[", "[", "p", "]", "]", ",", "dtype", "=", "dt", ")", ",", "np", ".", "eye", "(", "4", ")", ")", "img", ".", "to_filename", "(", "flnm", ")", "return", "flnm", "elif", "surface_format", "in", "[", "'nifti'", ",", "'nii'", ",", "'niigz'", ",", "'nii.gz'", "]", ":", "surface_format", "=", "'nii'", "if", "surface_format", "==", "'nii'", "else", "'nii.gz'", "def", "export", "(", "flnm", ",", "p", ")", ":", "flnm", "=", "flnm", "+", "'.'", "+", "surface_format", "dt", "=", "np", ".", "int32", "if", "np", ".", "issubdtype", "(", "p", ".", "dtype", ",", "np", ".", "dtype", "(", "int", ")", ".", "type", ")", "else", "np", ".", "float32", "img", "=", "nib", ".", "Nifti1Image", "(", "np", ".", "asarray", "(", "[", "[", "p", "]", "]", ",", "dtype", "=", "dt", ")", ",", "np", ".", "eye", "(", "4", ")", ")", "img", ".", "to_filename", "(", "flnm", ")", "return", "flnm", "else", ":", "error", "(", "'Could not understand surface file-format %s'", "%", "surface_format", ")", "path", "=", "surface_path", "if", "surface_path", "else", "os", ".", "path", ".", "join", "(", "subject", ".", "path", ",", "'surf'", ")", "files", "=", "[", "]", "note", "(", "'Exporting files...'", ")", "for", "h", "in", "six", ".", "iterkeys", "(", "registrations", ")", ":", "hemi", "=", "subject", ".", "hemis", "[", "h", "]", "reg", "=", "registrations", "[", "h", "]", "note", "(", "'Extracting %s predicted mesh...'", "%", "h", ".", "upper", "(", ")", ")", "pmesh", "=", "reg", "[", "'predicted_mesh'", "]", "for", "(", "pname", ",", "tag", ")", "in", "zip", "(", "[", "'polar_angle'", ",", "'eccentricity'", ",", "'visual_area'", ",", "'radius'", "]", ",", "[", "angle_tag", ",", "eccen_tag", ",", "label_tag", ",", "radius_tag", "]", ")", ":", "flnm", "=", "export", "(", "os", ".", "path", ".", "join", "(", "path", ",", "h", "+", "'.'", "+", "tag", ")", ",", "pmesh", ".", "prop", "(", "pname", ")", ")", "files", ".", "append", "(", "flnm", ")", "# last do the registration itself", "if", "registration_name", "and", "not", "no_reg_export", ":", "flnm", "=", "os", ".", "path", ".", "join", "(", "path", ",", "h", "+", "'.'", "+", "registration_name", "+", "'.sphere.reg'", ")", "fsio", ".", "write_geometry", "(", "flnm", ",", "pmesh", ".", "coordinates", ".", "T", ",", "pmesh", ".", "tess", ".", "faces", ".", "T", ")", "return", "{", "'surface_files'", ":", "tuple", "(", "files", ")", "}" ]
save_surface_files is the calculator that saves the registration data out as surface files, which are put back in the registration as the value 'surface_files'.
[ "save_surface_files", "is", "the", "calculator", "that", "saves", "the", "registration", "data", "out", "as", "surface", "files", "which", "are", "put", "back", "in", "the", "registration", "as", "the", "value", "surface_files", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/register_retinotopy.py#L468-L515
-1
251,740
noahbenson/neuropythy
neuropythy/commands/register_retinotopy.py
save_volume_files
def save_volume_files(note, error, registrations, subject, no_vol_export, volume_format, volume_path, angle_tag, eccen_tag, label_tag, radius_tag): ''' save_volume_files is the calculator that saves the registration data out as volume files, which are put back in the registration as the value 'volume_files'. ''' if no_vol_export: return {'volume_files': ()} volume_format = volume_format.lower() # make an exporter for properties: if volume_format in ['mgh', 'mgz', 'auto', 'automatic', 'default']: volume_format = 'mgh' if volume_format == 'mgh' else 'mgz' def export(flnm, d): flnm = flnm + '.' + volume_format dt = np.int32 if np.issubdtype(d.dtype, np.dtype(int).type) else np.float32 img = fsmgh.MGHImage(np.asarray(d, dtype=dt), subject.voxel_to_native_matrix) img.to_filename(flnm) return flnm elif volume_format in ['nifti', 'nii', 'niigz', 'nii.gz']: volume_format = 'nii' if volume_format == 'nii' else 'nii.gz' def export(flnm, p): flnm = flnm + '.' + volume_format dt = np.int32 if np.issubdtype(p.dtype, np.dtype(int).type) else np.float32 img = nib.Nifti1Image(np.asarray(p, dtype=dt), subject.voxel_to_native_matrix) img.to_filename(flnm) return flnm else: error('Could not understand volume file-format %s' % volume_format) path = volume_path if volume_path else os.path.join(subject.path, 'mri') files = [] note('Extracting predicted meshes for volume export...') hemis = [registrations[h]['predicted_mesh'] if h in registrations else None for h in ['lh', 'rh']] for (pname,tag) in zip(['polar_angle', 'eccentricity', 'visual_area', 'radius'], [angle_tag, eccen_tag, label_tag, radius_tag]): # we have to make the volume first... dat = tuple([None if h is None else h.prop(pname) for h in hemis]) (mtd,dt) = ('nearest',np.int32) if pname == 'visual_area' else ('linear',np.float32) note('Constructing %s image...' % pname) img = subject.cortex_to_image(dat, method=mtd, dtype=dt) flnm = export(os.path.join(path, tag), img) files.append(flnm) return {'volume_files': tuple(files)}
python
def save_volume_files(note, error, registrations, subject, no_vol_export, volume_format, volume_path, angle_tag, eccen_tag, label_tag, radius_tag): ''' save_volume_files is the calculator that saves the registration data out as volume files, which are put back in the registration as the value 'volume_files'. ''' if no_vol_export: return {'volume_files': ()} volume_format = volume_format.lower() # make an exporter for properties: if volume_format in ['mgh', 'mgz', 'auto', 'automatic', 'default']: volume_format = 'mgh' if volume_format == 'mgh' else 'mgz' def export(flnm, d): flnm = flnm + '.' + volume_format dt = np.int32 if np.issubdtype(d.dtype, np.dtype(int).type) else np.float32 img = fsmgh.MGHImage(np.asarray(d, dtype=dt), subject.voxel_to_native_matrix) img.to_filename(flnm) return flnm elif volume_format in ['nifti', 'nii', 'niigz', 'nii.gz']: volume_format = 'nii' if volume_format == 'nii' else 'nii.gz' def export(flnm, p): flnm = flnm + '.' + volume_format dt = np.int32 if np.issubdtype(p.dtype, np.dtype(int).type) else np.float32 img = nib.Nifti1Image(np.asarray(p, dtype=dt), subject.voxel_to_native_matrix) img.to_filename(flnm) return flnm else: error('Could not understand volume file-format %s' % volume_format) path = volume_path if volume_path else os.path.join(subject.path, 'mri') files = [] note('Extracting predicted meshes for volume export...') hemis = [registrations[h]['predicted_mesh'] if h in registrations else None for h in ['lh', 'rh']] for (pname,tag) in zip(['polar_angle', 'eccentricity', 'visual_area', 'radius'], [angle_tag, eccen_tag, label_tag, radius_tag]): # we have to make the volume first... dat = tuple([None if h is None else h.prop(pname) for h in hemis]) (mtd,dt) = ('nearest',np.int32) if pname == 'visual_area' else ('linear',np.float32) note('Constructing %s image...' % pname) img = subject.cortex_to_image(dat, method=mtd, dtype=dt) flnm = export(os.path.join(path, tag), img) files.append(flnm) return {'volume_files': tuple(files)}
[ "def", "save_volume_files", "(", "note", ",", "error", ",", "registrations", ",", "subject", ",", "no_vol_export", ",", "volume_format", ",", "volume_path", ",", "angle_tag", ",", "eccen_tag", ",", "label_tag", ",", "radius_tag", ")", ":", "if", "no_vol_export", ":", "return", "{", "'volume_files'", ":", "(", ")", "}", "volume_format", "=", "volume_format", ".", "lower", "(", ")", "# make an exporter for properties:", "if", "volume_format", "in", "[", "'mgh'", ",", "'mgz'", ",", "'auto'", ",", "'automatic'", ",", "'default'", "]", ":", "volume_format", "=", "'mgh'", "if", "volume_format", "==", "'mgh'", "else", "'mgz'", "def", "export", "(", "flnm", ",", "d", ")", ":", "flnm", "=", "flnm", "+", "'.'", "+", "volume_format", "dt", "=", "np", ".", "int32", "if", "np", ".", "issubdtype", "(", "d", ".", "dtype", ",", "np", ".", "dtype", "(", "int", ")", ".", "type", ")", "else", "np", ".", "float32", "img", "=", "fsmgh", ".", "MGHImage", "(", "np", ".", "asarray", "(", "d", ",", "dtype", "=", "dt", ")", ",", "subject", ".", "voxel_to_native_matrix", ")", "img", ".", "to_filename", "(", "flnm", ")", "return", "flnm", "elif", "volume_format", "in", "[", "'nifti'", ",", "'nii'", ",", "'niigz'", ",", "'nii.gz'", "]", ":", "volume_format", "=", "'nii'", "if", "volume_format", "==", "'nii'", "else", "'nii.gz'", "def", "export", "(", "flnm", ",", "p", ")", ":", "flnm", "=", "flnm", "+", "'.'", "+", "volume_format", "dt", "=", "np", ".", "int32", "if", "np", ".", "issubdtype", "(", "p", ".", "dtype", ",", "np", ".", "dtype", "(", "int", ")", ".", "type", ")", "else", "np", ".", "float32", "img", "=", "nib", ".", "Nifti1Image", "(", "np", ".", "asarray", "(", "p", ",", "dtype", "=", "dt", ")", ",", "subject", ".", "voxel_to_native_matrix", ")", "img", ".", "to_filename", "(", "flnm", ")", "return", "flnm", "else", ":", "error", "(", "'Could not understand volume file-format %s'", "%", "volume_format", ")", "path", "=", "volume_path", "if", "volume_path", "else", "os", ".", "path", ".", "join", "(", "subject", ".", "path", ",", "'mri'", ")", "files", "=", "[", "]", "note", "(", "'Extracting predicted meshes for volume export...'", ")", "hemis", "=", "[", "registrations", "[", "h", "]", "[", "'predicted_mesh'", "]", "if", "h", "in", "registrations", "else", "None", "for", "h", "in", "[", "'lh'", ",", "'rh'", "]", "]", "for", "(", "pname", ",", "tag", ")", "in", "zip", "(", "[", "'polar_angle'", ",", "'eccentricity'", ",", "'visual_area'", ",", "'radius'", "]", ",", "[", "angle_tag", ",", "eccen_tag", ",", "label_tag", ",", "radius_tag", "]", ")", ":", "# we have to make the volume first...", "dat", "=", "tuple", "(", "[", "None", "if", "h", "is", "None", "else", "h", ".", "prop", "(", "pname", ")", "for", "h", "in", "hemis", "]", ")", "(", "mtd", ",", "dt", ")", "=", "(", "'nearest'", ",", "np", ".", "int32", ")", "if", "pname", "==", "'visual_area'", "else", "(", "'linear'", ",", "np", ".", "float32", ")", "note", "(", "'Constructing %s image...'", "%", "pname", ")", "img", "=", "subject", ".", "cortex_to_image", "(", "dat", ",", "method", "=", "mtd", ",", "dtype", "=", "dt", ")", "flnm", "=", "export", "(", "os", ".", "path", ".", "join", "(", "path", ",", "tag", ")", ",", "img", ")", "files", ".", "append", "(", "flnm", ")", "return", "{", "'volume_files'", ":", "tuple", "(", "files", ")", "}" ]
save_volume_files is the calculator that saves the registration data out as volume files, which are put back in the registration as the value 'volume_files'.
[ "save_volume_files", "is", "the", "calculator", "that", "saves", "the", "registration", "data", "out", "as", "volume", "files", "which", "are", "put", "back", "in", "the", "registration", "as", "the", "value", "volume_files", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/commands/register_retinotopy.py#L517-L559
-1
251,741
noahbenson/neuropythy
neuropythy/vision/retinotopy.py
calc_empirical_retinotopy
def calc_empirical_retinotopy(cortex, polar_angle=None, eccentricity=None, pRF_radius=None, weight=None, eccentricity_range=None, weight_min=0, invert_rh_angle=False, partial_voluming_correction=False): ''' calc_empirical_retinotopy computes the value empirical_retinotopy, which is an itable object storing the retinotopy data for the registration. Required afferent parameters: @ cortex Must be the cortex object that is to be registered to the model of retinotopy. Optional afferent parameters: @ polar_angle May be an array of polar angle values or a polar angle property name; if None (the default), attempts to auto-detect an empirical polar angle property. @ eccentricity May be an array of eccentricity values or an eccentricity property name; if None (the default), attempts to auto-detect an empirical eccentricity property. @ pRF_radius May be an array of receptive field radius values or the property name for such an array; if None (the default), attempts to auto-detect an empirical radius property. @ weight May be an array of weight values or a weight property name; if None (the default), attempts to auto-detect an empirical weight property, such as variance_explained. @ eccentricity_range May be a maximum eccentricity value or a (min, max) eccentricity range to be used in the registration; if None, then no clipping is done. @ weight_min May be given to indicate that weight values below this value should not be included in the registration; the default is 0. @ partial_voluming_correction May be set to True (default is False) to indicate that partial voluming correction should be used to adjust the weights. @ invert_rh_angle May be set to True (default is False) to indicate that the right hemisphere has its polar angle stored with opposite sign to the model polar angle. Efferent values: @ empirical_retinotopy Will be a pimms itable of the empirical retinotopy data to be used in the registration; the table's keys will be 'polar_angle', 'eccentricity', and 'weight'; values that should be excluded for any reason will have 0 weight and undefined angles. ''' data = {} # the map we build up in this function n = cortex.vertex_count (emin,emax) = (-np.inf,np.inf) if eccentricity_range is None else \ (0,eccentricity_range) if pimms.is_number(eccentricity_range) else \ eccentricity_range # Step 1: get our properties straight ########################################################## (ang, ecc, rad, wgt) = [ np.array(extract_retinotopy_argument(cortex, name, arg, default='empirical')) for (name, arg) in [ ('polar_angle', polar_angle), ('eccentricity', eccentricity), ('radius', pRF_radius), ('weight', np.full(n, weight) if pimms.is_number(weight) else weight)]] if wgt is None: wgt = np.ones(len(ecc)) bad = np.logical_not(np.isfinite(np.prod([ang, ecc, wgt], axis=0))) ecc[bad] = 0 wgt[bad] = 0 if rad is not None: rad[bad] = 0 # do partial voluming correction if requested if partial_voluming_correction: wgt = wgt * (1 - cortex.partial_voluming_factor) # now trim and finalize bad = bad | (wgt <= weight_min) | (ecc < emin) | (ecc > emax) wgt[bad] = 0 ang[bad] = 0 ecc[bad] = 0 for x in [ang, ecc, wgt, rad]: if x is not None: x.setflags(write=False) # that's it! dat = dict(polar_angle=ang, eccentricity=ecc, weight=wgt) if rad is not None: dat['radius'] = rad return (pimms.itable(dat),)
python
def calc_empirical_retinotopy(cortex, polar_angle=None, eccentricity=None, pRF_radius=None, weight=None, eccentricity_range=None, weight_min=0, invert_rh_angle=False, partial_voluming_correction=False): ''' calc_empirical_retinotopy computes the value empirical_retinotopy, which is an itable object storing the retinotopy data for the registration. Required afferent parameters: @ cortex Must be the cortex object that is to be registered to the model of retinotopy. Optional afferent parameters: @ polar_angle May be an array of polar angle values or a polar angle property name; if None (the default), attempts to auto-detect an empirical polar angle property. @ eccentricity May be an array of eccentricity values or an eccentricity property name; if None (the default), attempts to auto-detect an empirical eccentricity property. @ pRF_radius May be an array of receptive field radius values or the property name for such an array; if None (the default), attempts to auto-detect an empirical radius property. @ weight May be an array of weight values or a weight property name; if None (the default), attempts to auto-detect an empirical weight property, such as variance_explained. @ eccentricity_range May be a maximum eccentricity value or a (min, max) eccentricity range to be used in the registration; if None, then no clipping is done. @ weight_min May be given to indicate that weight values below this value should not be included in the registration; the default is 0. @ partial_voluming_correction May be set to True (default is False) to indicate that partial voluming correction should be used to adjust the weights. @ invert_rh_angle May be set to True (default is False) to indicate that the right hemisphere has its polar angle stored with opposite sign to the model polar angle. Efferent values: @ empirical_retinotopy Will be a pimms itable of the empirical retinotopy data to be used in the registration; the table's keys will be 'polar_angle', 'eccentricity', and 'weight'; values that should be excluded for any reason will have 0 weight and undefined angles. ''' data = {} # the map we build up in this function n = cortex.vertex_count (emin,emax) = (-np.inf,np.inf) if eccentricity_range is None else \ (0,eccentricity_range) if pimms.is_number(eccentricity_range) else \ eccentricity_range # Step 1: get our properties straight ########################################################## (ang, ecc, rad, wgt) = [ np.array(extract_retinotopy_argument(cortex, name, arg, default='empirical')) for (name, arg) in [ ('polar_angle', polar_angle), ('eccentricity', eccentricity), ('radius', pRF_radius), ('weight', np.full(n, weight) if pimms.is_number(weight) else weight)]] if wgt is None: wgt = np.ones(len(ecc)) bad = np.logical_not(np.isfinite(np.prod([ang, ecc, wgt], axis=0))) ecc[bad] = 0 wgt[bad] = 0 if rad is not None: rad[bad] = 0 # do partial voluming correction if requested if partial_voluming_correction: wgt = wgt * (1 - cortex.partial_voluming_factor) # now trim and finalize bad = bad | (wgt <= weight_min) | (ecc < emin) | (ecc > emax) wgt[bad] = 0 ang[bad] = 0 ecc[bad] = 0 for x in [ang, ecc, wgt, rad]: if x is not None: x.setflags(write=False) # that's it! dat = dict(polar_angle=ang, eccentricity=ecc, weight=wgt) if rad is not None: dat['radius'] = rad return (pimms.itable(dat),)
[ "def", "calc_empirical_retinotopy", "(", "cortex", ",", "polar_angle", "=", "None", ",", "eccentricity", "=", "None", ",", "pRF_radius", "=", "None", ",", "weight", "=", "None", ",", "eccentricity_range", "=", "None", ",", "weight_min", "=", "0", ",", "invert_rh_angle", "=", "False", ",", "partial_voluming_correction", "=", "False", ")", ":", "data", "=", "{", "}", "# the map we build up in this function", "n", "=", "cortex", ".", "vertex_count", "(", "emin", ",", "emax", ")", "=", "(", "-", "np", ".", "inf", ",", "np", ".", "inf", ")", "if", "eccentricity_range", "is", "None", "else", "(", "0", ",", "eccentricity_range", ")", "if", "pimms", ".", "is_number", "(", "eccentricity_range", ")", "else", "eccentricity_range", "# Step 1: get our properties straight ##########################################################", "(", "ang", ",", "ecc", ",", "rad", ",", "wgt", ")", "=", "[", "np", ".", "array", "(", "extract_retinotopy_argument", "(", "cortex", ",", "name", ",", "arg", ",", "default", "=", "'empirical'", ")", ")", "for", "(", "name", ",", "arg", ")", "in", "[", "(", "'polar_angle'", ",", "polar_angle", ")", ",", "(", "'eccentricity'", ",", "eccentricity", ")", ",", "(", "'radius'", ",", "pRF_radius", ")", ",", "(", "'weight'", ",", "np", ".", "full", "(", "n", ",", "weight", ")", "if", "pimms", ".", "is_number", "(", "weight", ")", "else", "weight", ")", "]", "]", "if", "wgt", "is", "None", ":", "wgt", "=", "np", ".", "ones", "(", "len", "(", "ecc", ")", ")", "bad", "=", "np", ".", "logical_not", "(", "np", ".", "isfinite", "(", "np", ".", "prod", "(", "[", "ang", ",", "ecc", ",", "wgt", "]", ",", "axis", "=", "0", ")", ")", ")", "ecc", "[", "bad", "]", "=", "0", "wgt", "[", "bad", "]", "=", "0", "if", "rad", "is", "not", "None", ":", "rad", "[", "bad", "]", "=", "0", "# do partial voluming correction if requested", "if", "partial_voluming_correction", ":", "wgt", "=", "wgt", "*", "(", "1", "-", "cortex", ".", "partial_voluming_factor", ")", "# now trim and finalize", "bad", "=", "bad", "|", "(", "wgt", "<=", "weight_min", ")", "|", "(", "ecc", "<", "emin", ")", "|", "(", "ecc", ">", "emax", ")", "wgt", "[", "bad", "]", "=", "0", "ang", "[", "bad", "]", "=", "0", "ecc", "[", "bad", "]", "=", "0", "for", "x", "in", "[", "ang", ",", "ecc", ",", "wgt", ",", "rad", "]", ":", "if", "x", "is", "not", "None", ":", "x", ".", "setflags", "(", "write", "=", "False", ")", "# that's it!", "dat", "=", "dict", "(", "polar_angle", "=", "ang", ",", "eccentricity", "=", "ecc", ",", "weight", "=", "wgt", ")", "if", "rad", "is", "not", "None", ":", "dat", "[", "'radius'", "]", "=", "rad", "return", "(", "pimms", ".", "itable", "(", "dat", ")", ",", ")" ]
calc_empirical_retinotopy computes the value empirical_retinotopy, which is an itable object storing the retinotopy data for the registration. Required afferent parameters: @ cortex Must be the cortex object that is to be registered to the model of retinotopy. Optional afferent parameters: @ polar_angle May be an array of polar angle values or a polar angle property name; if None (the default), attempts to auto-detect an empirical polar angle property. @ eccentricity May be an array of eccentricity values or an eccentricity property name; if None (the default), attempts to auto-detect an empirical eccentricity property. @ pRF_radius May be an array of receptive field radius values or the property name for such an array; if None (the default), attempts to auto-detect an empirical radius property. @ weight May be an array of weight values or a weight property name; if None (the default), attempts to auto-detect an empirical weight property, such as variance_explained. @ eccentricity_range May be a maximum eccentricity value or a (min, max) eccentricity range to be used in the registration; if None, then no clipping is done. @ weight_min May be given to indicate that weight values below this value should not be included in the registration; the default is 0. @ partial_voluming_correction May be set to True (default is False) to indicate that partial voluming correction should be used to adjust the weights. @ invert_rh_angle May be set to True (default is False) to indicate that the right hemisphere has its polar angle stored with opposite sign to the model polar angle. Efferent values: @ empirical_retinotopy Will be a pimms itable of the empirical retinotopy data to be used in the registration; the table's keys will be 'polar_angle', 'eccentricity', and 'weight'; values that should be excluded for any reason will have 0 weight and undefined angles.
[ "calc_empirical_retinotopy", "computes", "the", "value", "empirical_retinotopy", "which", "is", "an", "itable", "object", "storing", "the", "retinotopy", "data", "for", "the", "registration", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/vision/retinotopy.py#L1051-L1117
-1
251,742
noahbenson/neuropythy
neuropythy/vision/retinotopy.py
calc_model
def calc_model(cortex, model_argument, model_hemi=Ellipsis, radius=np.pi/3): ''' calc_model loads the appropriate model object given the model argument, which may given the name of the model or a model object itself. Required afferent parameters: @ model_argument Must be either a RegisteredRetinotopyModel object or the name of a model that can be loaded. Optional afferent parameters: @ model_hemi May be used to specify the hemisphere of the model; this is usually only used when the fsaverage_sym hemisphere is desired, in which case this should be set to None; if left at the default value (Ellipsis), then it will use the hemisphere of the cortex param. Provided efferent values: @ model Will be the RegisteredRetinotopyModel object to which the mesh should be registered. ''' if pimms.is_str(model_argument): h = cortex.chirality if model_hemi is Ellipsis else \ None if model_hemi is None else \ model_hemi model = retinotopy_model(model_argument, hemi=h, radius=radius) else: model = model_argument if not isinstance(model, RegisteredRetinotopyModel): raise ValueError('model must be a RegisteredRetinotopyModel') return model
python
def calc_model(cortex, model_argument, model_hemi=Ellipsis, radius=np.pi/3): ''' calc_model loads the appropriate model object given the model argument, which may given the name of the model or a model object itself. Required afferent parameters: @ model_argument Must be either a RegisteredRetinotopyModel object or the name of a model that can be loaded. Optional afferent parameters: @ model_hemi May be used to specify the hemisphere of the model; this is usually only used when the fsaverage_sym hemisphere is desired, in which case this should be set to None; if left at the default value (Ellipsis), then it will use the hemisphere of the cortex param. Provided efferent values: @ model Will be the RegisteredRetinotopyModel object to which the mesh should be registered. ''' if pimms.is_str(model_argument): h = cortex.chirality if model_hemi is Ellipsis else \ None if model_hemi is None else \ model_hemi model = retinotopy_model(model_argument, hemi=h, radius=radius) else: model = model_argument if not isinstance(model, RegisteredRetinotopyModel): raise ValueError('model must be a RegisteredRetinotopyModel') return model
[ "def", "calc_model", "(", "cortex", ",", "model_argument", ",", "model_hemi", "=", "Ellipsis", ",", "radius", "=", "np", ".", "pi", "/", "3", ")", ":", "if", "pimms", ".", "is_str", "(", "model_argument", ")", ":", "h", "=", "cortex", ".", "chirality", "if", "model_hemi", "is", "Ellipsis", "else", "None", "if", "model_hemi", "is", "None", "else", "model_hemi", "model", "=", "retinotopy_model", "(", "model_argument", ",", "hemi", "=", "h", ",", "radius", "=", "radius", ")", "else", ":", "model", "=", "model_argument", "if", "not", "isinstance", "(", "model", ",", "RegisteredRetinotopyModel", ")", ":", "raise", "ValueError", "(", "'model must be a RegisteredRetinotopyModel'", ")", "return", "model" ]
calc_model loads the appropriate model object given the model argument, which may given the name of the model or a model object itself. Required afferent parameters: @ model_argument Must be either a RegisteredRetinotopyModel object or the name of a model that can be loaded. Optional afferent parameters: @ model_hemi May be used to specify the hemisphere of the model; this is usually only used when the fsaverage_sym hemisphere is desired, in which case this should be set to None; if left at the default value (Ellipsis), then it will use the hemisphere of the cortex param. Provided efferent values: @ model Will be the RegisteredRetinotopyModel object to which the mesh should be registered.
[ "calc_model", "loads", "the", "appropriate", "model", "object", "given", "the", "model", "argument", "which", "may", "given", "the", "name", "of", "the", "model", "or", "a", "model", "object", "itself", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/vision/retinotopy.py#L1119-L1145
-1
251,743
noahbenson/neuropythy
neuropythy/vision/retinotopy.py
calc_anchors
def calc_anchors(preregistration_map, model, model_hemi, scale=1, sigma=Ellipsis, radius_weight=0, field_sign_weight=0, invert_rh_field_sign=False): ''' calc_anchors is a calculator that creates a set of anchor instructions for a registration. Required afferent parameters: @ invert_rh_field_sign May be set to True (default is False) to indicate that the right hemisphere's field signs will be incorrect relative to the model; this generally should be used whenever invert_rh_angle is also set to True. ''' wgts = preregistration_map.prop('weight') rads = preregistration_map.prop('radius') if np.isclose(radius_weight, 0): radius_weight = 0 ancs = retinotopy_anchors(preregistration_map, model, polar_angle='polar_angle', eccentricity='eccentricity', radius='radius', weight=wgts, weight_min=0, # taken care of already radius_weight=radius_weight, field_sign_weight=field_sign_weight, scale=scale, invert_field_sign=(model_hemi == 'rh' and invert_rh_field_sign), **({} if sigma is Ellipsis else {'sigma':sigma})) return ancs
python
def calc_anchors(preregistration_map, model, model_hemi, scale=1, sigma=Ellipsis, radius_weight=0, field_sign_weight=0, invert_rh_field_sign=False): ''' calc_anchors is a calculator that creates a set of anchor instructions for a registration. Required afferent parameters: @ invert_rh_field_sign May be set to True (default is False) to indicate that the right hemisphere's field signs will be incorrect relative to the model; this generally should be used whenever invert_rh_angle is also set to True. ''' wgts = preregistration_map.prop('weight') rads = preregistration_map.prop('radius') if np.isclose(radius_weight, 0): radius_weight = 0 ancs = retinotopy_anchors(preregistration_map, model, polar_angle='polar_angle', eccentricity='eccentricity', radius='radius', weight=wgts, weight_min=0, # taken care of already radius_weight=radius_weight, field_sign_weight=field_sign_weight, scale=scale, invert_field_sign=(model_hemi == 'rh' and invert_rh_field_sign), **({} if sigma is Ellipsis else {'sigma':sigma})) return ancs
[ "def", "calc_anchors", "(", "preregistration_map", ",", "model", ",", "model_hemi", ",", "scale", "=", "1", ",", "sigma", "=", "Ellipsis", ",", "radius_weight", "=", "0", ",", "field_sign_weight", "=", "0", ",", "invert_rh_field_sign", "=", "False", ")", ":", "wgts", "=", "preregistration_map", ".", "prop", "(", "'weight'", ")", "rads", "=", "preregistration_map", ".", "prop", "(", "'radius'", ")", "if", "np", ".", "isclose", "(", "radius_weight", ",", "0", ")", ":", "radius_weight", "=", "0", "ancs", "=", "retinotopy_anchors", "(", "preregistration_map", ",", "model", ",", "polar_angle", "=", "'polar_angle'", ",", "eccentricity", "=", "'eccentricity'", ",", "radius", "=", "'radius'", ",", "weight", "=", "wgts", ",", "weight_min", "=", "0", ",", "# taken care of already", "radius_weight", "=", "radius_weight", ",", "field_sign_weight", "=", "field_sign_weight", ",", "scale", "=", "scale", ",", "invert_field_sign", "=", "(", "model_hemi", "==", "'rh'", "and", "invert_rh_field_sign", ")", ",", "*", "*", "(", "{", "}", "if", "sigma", "is", "Ellipsis", "else", "{", "'sigma'", ":", "sigma", "}", ")", ")", "return", "ancs" ]
calc_anchors is a calculator that creates a set of anchor instructions for a registration. Required afferent parameters: @ invert_rh_field_sign May be set to True (default is False) to indicate that the right hemisphere's field signs will be incorrect relative to the model; this generally should be used whenever invert_rh_angle is also set to True.
[ "calc_anchors", "is", "a", "calculator", "that", "creates", "a", "set", "of", "anchor", "instructions", "for", "a", "registration", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/vision/retinotopy.py#L1212-L1236
-1
251,744
noahbenson/neuropythy
neuropythy/vision/retinotopy.py
calc_registration
def calc_registration(preregistration_map, anchors, max_steps=2000, max_step_size=0.05, method='random'): ''' calc_registration is a calculator that creates the registration coordinates. ''' # if max steps is a tuple (max, stride) then a trajectory is saved into # the registered_map meta-data pmap = preregistration_map if is_tuple(max_steps) or is_list(max_steps): (max_steps, stride) = max_steps traj = [preregistration_map.coordinates] x = preregistration_map.coordinates for s in np.arange(0, max_steps, stride): x = mesh_register( preregistration_map, [['edge', 'harmonic', 'scale', 1.0], ['angle', 'infinite-well', 'scale', 1.0], ['perimeter', 'harmonic'], anchors], initial_coordinates=x, method=method, max_steps=stride, max_step_size=max_step_size) traj.append(x) pmap = pmap.with_meta(trajectory=np.asarray(traj)) else: x = mesh_register( preregistration_map, [['edge', 'harmonic', 'scale', 1.0], ['angle', 'infinite-well', 'scale', 1.0], ['perimeter', 'harmonic'], anchors], method=method, max_steps=max_steps, max_step_size=max_step_size) return pmap.copy(coordinates=x)
python
def calc_registration(preregistration_map, anchors, max_steps=2000, max_step_size=0.05, method='random'): ''' calc_registration is a calculator that creates the registration coordinates. ''' # if max steps is a tuple (max, stride) then a trajectory is saved into # the registered_map meta-data pmap = preregistration_map if is_tuple(max_steps) or is_list(max_steps): (max_steps, stride) = max_steps traj = [preregistration_map.coordinates] x = preregistration_map.coordinates for s in np.arange(0, max_steps, stride): x = mesh_register( preregistration_map, [['edge', 'harmonic', 'scale', 1.0], ['angle', 'infinite-well', 'scale', 1.0], ['perimeter', 'harmonic'], anchors], initial_coordinates=x, method=method, max_steps=stride, max_step_size=max_step_size) traj.append(x) pmap = pmap.with_meta(trajectory=np.asarray(traj)) else: x = mesh_register( preregistration_map, [['edge', 'harmonic', 'scale', 1.0], ['angle', 'infinite-well', 'scale', 1.0], ['perimeter', 'harmonic'], anchors], method=method, max_steps=max_steps, max_step_size=max_step_size) return pmap.copy(coordinates=x)
[ "def", "calc_registration", "(", "preregistration_map", ",", "anchors", ",", "max_steps", "=", "2000", ",", "max_step_size", "=", "0.05", ",", "method", "=", "'random'", ")", ":", "# if max steps is a tuple (max, stride) then a trajectory is saved into", "# the registered_map meta-data", "pmap", "=", "preregistration_map", "if", "is_tuple", "(", "max_steps", ")", "or", "is_list", "(", "max_steps", ")", ":", "(", "max_steps", ",", "stride", ")", "=", "max_steps", "traj", "=", "[", "preregistration_map", ".", "coordinates", "]", "x", "=", "preregistration_map", ".", "coordinates", "for", "s", "in", "np", ".", "arange", "(", "0", ",", "max_steps", ",", "stride", ")", ":", "x", "=", "mesh_register", "(", "preregistration_map", ",", "[", "[", "'edge'", ",", "'harmonic'", ",", "'scale'", ",", "1.0", "]", ",", "[", "'angle'", ",", "'infinite-well'", ",", "'scale'", ",", "1.0", "]", ",", "[", "'perimeter'", ",", "'harmonic'", "]", ",", "anchors", "]", ",", "initial_coordinates", "=", "x", ",", "method", "=", "method", ",", "max_steps", "=", "stride", ",", "max_step_size", "=", "max_step_size", ")", "traj", ".", "append", "(", "x", ")", "pmap", "=", "pmap", ".", "with_meta", "(", "trajectory", "=", "np", ".", "asarray", "(", "traj", ")", ")", "else", ":", "x", "=", "mesh_register", "(", "preregistration_map", ",", "[", "[", "'edge'", ",", "'harmonic'", ",", "'scale'", ",", "1.0", "]", ",", "[", "'angle'", ",", "'infinite-well'", ",", "'scale'", ",", "1.0", "]", ",", "[", "'perimeter'", ",", "'harmonic'", "]", ",", "anchors", "]", ",", "method", "=", "method", ",", "max_steps", "=", "max_steps", ",", "max_step_size", "=", "max_step_size", ")", "return", "pmap", ".", "copy", "(", "coordinates", "=", "x", ")" ]
calc_registration is a calculator that creates the registration coordinates.
[ "calc_registration", "is", "a", "calculator", "that", "creates", "the", "registration", "coordinates", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/vision/retinotopy.py#L1239-L1274
-1
251,745
noahbenson/neuropythy
neuropythy/vision/retinotopy.py
calc_prediction
def calc_prediction(registered_map, preregistration_mesh, native_mesh, model): ''' calc_registration_prediction is a pimms calculator that creates the both the prediction and the registration_prediction, both of which are pimms itables including the fields 'polar_angle', 'eccentricity', and 'visual_area'. The registration_prediction data describe the vertices for the registered_map, not necessarily of the native_mesh, while the prediction describes the native mesh. Provided efferent values: @ registered_mesh Will be a mesh object that is equivalent to the preregistration_mesh but with the coordinates and predicted fields (from the registration) filled in. Note that this mesh is still in the resampled configuration is resampling was performed. @ registration_prediction Will be a pimms ITable object with columns 'polar_angle', 'eccentricity', and 'visual_area'. For values outside of the model region, visual_area will be 0 and other values will be undefined (but are typically 0). The registration_prediction describes the values on the registrered_mesh. @ prediction will be a pimms ITable object with columns 'polar_angle', 'eccentricity', and 'visual_area'. For values outside of the model region, visual_area will be 0 and other values will be undefined (but are typically 0). The prediction describes the values on the native_mesh and the predicted_mesh. ''' # invert the map projection to make the registration map into a mesh coords3d = np.array(preregistration_mesh.coordinates) idcs = registered_map.labels coords3d[:,idcs] = registered_map.meta('projection').inverse(registered_map.coordinates) rmesh = preregistration_mesh.copy(coordinates=coords3d) # go ahead and get the model predictions... d = model.cortex_to_angle(registered_map.coordinates) id2n = model.area_id_to_name (ang, ecc) = d[0:2] lbl = np.asarray(d[2], dtype=np.int) rad = np.asarray([predict_pRF_radius(e, id2n[l]) if l > 0 else 0 for (e,l) in zip(ecc,lbl)]) d = {'polar_angle':ang, 'eccentricity':ecc, 'visual_area':lbl, 'radius':rad} # okay, put these on the mesh rpred = {} for (k,v) in six.iteritems(d): v.setflags(write=False) tmp = np.zeros(rmesh.vertex_count, dtype=v.dtype) tmp[registered_map.labels] = v tmp.setflags(write=False) rpred[k] = tmp rpred = pyr.pmap(rpred) rmesh = rmesh.with_prop(rpred) # next, do all of this for the native mesh.. if native_mesh is preregistration_mesh: pred = rpred pmesh = rmesh else: # we need to address the native coordinates in the prereg coordinates then unaddress them # in the registered coordinates; this will let us make a native-registered-map and repeat # the exercise above addr = preregistration_mesh.address(native_mesh.coordinates) natreg_mesh = native_mesh.copy(coordinates=rmesh.unaddress(addr)) d = model.cortex_to_angle(natreg_mesh) (ang,ecc) = d[0:2] lbl = np.asarray(d[2], dtype=np.int) rad = np.asarray([predict_pRF_radius(e, id2n[l]) if l > 0 else 0 for (e,l) in zip(ecc,lbl)]) pred = pyr.m(polar_angle=ang, eccentricity=ecc, radius=rad, visual_area=lbl) pmesh = natreg_mesh.with_prop(pred) return {'registered_mesh' : rmesh, 'registration_prediction': rpred, 'prediction' : pred, 'predicted_mesh' : pmesh}
python
def calc_prediction(registered_map, preregistration_mesh, native_mesh, model): ''' calc_registration_prediction is a pimms calculator that creates the both the prediction and the registration_prediction, both of which are pimms itables including the fields 'polar_angle', 'eccentricity', and 'visual_area'. The registration_prediction data describe the vertices for the registered_map, not necessarily of the native_mesh, while the prediction describes the native mesh. Provided efferent values: @ registered_mesh Will be a mesh object that is equivalent to the preregistration_mesh but with the coordinates and predicted fields (from the registration) filled in. Note that this mesh is still in the resampled configuration is resampling was performed. @ registration_prediction Will be a pimms ITable object with columns 'polar_angle', 'eccentricity', and 'visual_area'. For values outside of the model region, visual_area will be 0 and other values will be undefined (but are typically 0). The registration_prediction describes the values on the registrered_mesh. @ prediction will be a pimms ITable object with columns 'polar_angle', 'eccentricity', and 'visual_area'. For values outside of the model region, visual_area will be 0 and other values will be undefined (but are typically 0). The prediction describes the values on the native_mesh and the predicted_mesh. ''' # invert the map projection to make the registration map into a mesh coords3d = np.array(preregistration_mesh.coordinates) idcs = registered_map.labels coords3d[:,idcs] = registered_map.meta('projection').inverse(registered_map.coordinates) rmesh = preregistration_mesh.copy(coordinates=coords3d) # go ahead and get the model predictions... d = model.cortex_to_angle(registered_map.coordinates) id2n = model.area_id_to_name (ang, ecc) = d[0:2] lbl = np.asarray(d[2], dtype=np.int) rad = np.asarray([predict_pRF_radius(e, id2n[l]) if l > 0 else 0 for (e,l) in zip(ecc,lbl)]) d = {'polar_angle':ang, 'eccentricity':ecc, 'visual_area':lbl, 'radius':rad} # okay, put these on the mesh rpred = {} for (k,v) in six.iteritems(d): v.setflags(write=False) tmp = np.zeros(rmesh.vertex_count, dtype=v.dtype) tmp[registered_map.labels] = v tmp.setflags(write=False) rpred[k] = tmp rpred = pyr.pmap(rpred) rmesh = rmesh.with_prop(rpred) # next, do all of this for the native mesh.. if native_mesh is preregistration_mesh: pred = rpred pmesh = rmesh else: # we need to address the native coordinates in the prereg coordinates then unaddress them # in the registered coordinates; this will let us make a native-registered-map and repeat # the exercise above addr = preregistration_mesh.address(native_mesh.coordinates) natreg_mesh = native_mesh.copy(coordinates=rmesh.unaddress(addr)) d = model.cortex_to_angle(natreg_mesh) (ang,ecc) = d[0:2] lbl = np.asarray(d[2], dtype=np.int) rad = np.asarray([predict_pRF_radius(e, id2n[l]) if l > 0 else 0 for (e,l) in zip(ecc,lbl)]) pred = pyr.m(polar_angle=ang, eccentricity=ecc, radius=rad, visual_area=lbl) pmesh = natreg_mesh.with_prop(pred) return {'registered_mesh' : rmesh, 'registration_prediction': rpred, 'prediction' : pred, 'predicted_mesh' : pmesh}
[ "def", "calc_prediction", "(", "registered_map", ",", "preregistration_mesh", ",", "native_mesh", ",", "model", ")", ":", "# invert the map projection to make the registration map into a mesh", "coords3d", "=", "np", ".", "array", "(", "preregistration_mesh", ".", "coordinates", ")", "idcs", "=", "registered_map", ".", "labels", "coords3d", "[", ":", ",", "idcs", "]", "=", "registered_map", ".", "meta", "(", "'projection'", ")", ".", "inverse", "(", "registered_map", ".", "coordinates", ")", "rmesh", "=", "preregistration_mesh", ".", "copy", "(", "coordinates", "=", "coords3d", ")", "# go ahead and get the model predictions...", "d", "=", "model", ".", "cortex_to_angle", "(", "registered_map", ".", "coordinates", ")", "id2n", "=", "model", ".", "area_id_to_name", "(", "ang", ",", "ecc", ")", "=", "d", "[", "0", ":", "2", "]", "lbl", "=", "np", ".", "asarray", "(", "d", "[", "2", "]", ",", "dtype", "=", "np", ".", "int", ")", "rad", "=", "np", ".", "asarray", "(", "[", "predict_pRF_radius", "(", "e", ",", "id2n", "[", "l", "]", ")", "if", "l", ">", "0", "else", "0", "for", "(", "e", ",", "l", ")", "in", "zip", "(", "ecc", ",", "lbl", ")", "]", ")", "d", "=", "{", "'polar_angle'", ":", "ang", ",", "'eccentricity'", ":", "ecc", ",", "'visual_area'", ":", "lbl", ",", "'radius'", ":", "rad", "}", "# okay, put these on the mesh", "rpred", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "d", ")", ":", "v", ".", "setflags", "(", "write", "=", "False", ")", "tmp", "=", "np", ".", "zeros", "(", "rmesh", ".", "vertex_count", ",", "dtype", "=", "v", ".", "dtype", ")", "tmp", "[", "registered_map", ".", "labels", "]", "=", "v", "tmp", ".", "setflags", "(", "write", "=", "False", ")", "rpred", "[", "k", "]", "=", "tmp", "rpred", "=", "pyr", ".", "pmap", "(", "rpred", ")", "rmesh", "=", "rmesh", ".", "with_prop", "(", "rpred", ")", "# next, do all of this for the native mesh..", "if", "native_mesh", "is", "preregistration_mesh", ":", "pred", "=", "rpred", "pmesh", "=", "rmesh", "else", ":", "# we need to address the native coordinates in the prereg coordinates then unaddress them", "# in the registered coordinates; this will let us make a native-registered-map and repeat", "# the exercise above", "addr", "=", "preregistration_mesh", ".", "address", "(", "native_mesh", ".", "coordinates", ")", "natreg_mesh", "=", "native_mesh", ".", "copy", "(", "coordinates", "=", "rmesh", ".", "unaddress", "(", "addr", ")", ")", "d", "=", "model", ".", "cortex_to_angle", "(", "natreg_mesh", ")", "(", "ang", ",", "ecc", ")", "=", "d", "[", "0", ":", "2", "]", "lbl", "=", "np", ".", "asarray", "(", "d", "[", "2", "]", ",", "dtype", "=", "np", ".", "int", ")", "rad", "=", "np", ".", "asarray", "(", "[", "predict_pRF_radius", "(", "e", ",", "id2n", "[", "l", "]", ")", "if", "l", ">", "0", "else", "0", "for", "(", "e", ",", "l", ")", "in", "zip", "(", "ecc", ",", "lbl", ")", "]", ")", "pred", "=", "pyr", ".", "m", "(", "polar_angle", "=", "ang", ",", "eccentricity", "=", "ecc", ",", "radius", "=", "rad", ",", "visual_area", "=", "lbl", ")", "pmesh", "=", "natreg_mesh", ".", "with_prop", "(", "pred", ")", "return", "{", "'registered_mesh'", ":", "rmesh", ",", "'registration_prediction'", ":", "rpred", ",", "'prediction'", ":", "pred", ",", "'predicted_mesh'", ":", "pmesh", "}" ]
calc_registration_prediction is a pimms calculator that creates the both the prediction and the registration_prediction, both of which are pimms itables including the fields 'polar_angle', 'eccentricity', and 'visual_area'. The registration_prediction data describe the vertices for the registered_map, not necessarily of the native_mesh, while the prediction describes the native mesh. Provided efferent values: @ registered_mesh Will be a mesh object that is equivalent to the preregistration_mesh but with the coordinates and predicted fields (from the registration) filled in. Note that this mesh is still in the resampled configuration is resampling was performed. @ registration_prediction Will be a pimms ITable object with columns 'polar_angle', 'eccentricity', and 'visual_area'. For values outside of the model region, visual_area will be 0 and other values will be undefined (but are typically 0). The registration_prediction describes the values on the registrered_mesh. @ prediction will be a pimms ITable object with columns 'polar_angle', 'eccentricity', and 'visual_area'. For values outside of the model region, visual_area will be 0 and other values will be undefined (but are typically 0). The prediction describes the values on the native_mesh and the predicted_mesh.
[ "calc_registration_prediction", "is", "a", "pimms", "calculator", "that", "creates", "the", "both", "the", "prediction", "and", "the", "registration_prediction", "both", "of", "which", "are", "pimms", "itables", "including", "the", "fields", "polar_angle", "eccentricity", "and", "visual_area", ".", "The", "registration_prediction", "data", "describe", "the", "vertices", "for", "the", "registered_map", "not", "necessarily", "of", "the", "native_mesh", "while", "the", "prediction", "describes", "the", "native", "mesh", "." ]
b588889f6db36ddb9602ae4a72c1c0d3f41586b2
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/vision/retinotopy.py#L1276-L1338
-1
251,746
barnumbirr/coinmarketcap
coinmarketcap/core.py
Market.ticker
def ticker(self, currency="", **kwargs): """ This endpoint displays cryptocurrency ticker data in order of rank. The maximum number of results per call is 100. Pagination is possible by using the start and limit parameters. GET /ticker/ Optional parameters: (int) start - return results from rank [start] and above (default is 1) (int) limit - return a maximum of [limit] results (default is 100; max is 100) (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH" GET /ticker/{id} Optional parameters: (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH" """ params = {} params.update(kwargs) # see https://github.com/barnumbirr/coinmarketcap/pull/28 if currency: currency = str(currency) + '/' response = self.__request('ticker/' + currency, params) return response
python
def ticker(self, currency="", **kwargs): """ This endpoint displays cryptocurrency ticker data in order of rank. The maximum number of results per call is 100. Pagination is possible by using the start and limit parameters. GET /ticker/ Optional parameters: (int) start - return results from rank [start] and above (default is 1) (int) limit - return a maximum of [limit] results (default is 100; max is 100) (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH" GET /ticker/{id} Optional parameters: (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH" """ params = {} params.update(kwargs) # see https://github.com/barnumbirr/coinmarketcap/pull/28 if currency: currency = str(currency) + '/' response = self.__request('ticker/' + currency, params) return response
[ "def", "ticker", "(", "self", ",", "currency", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "}", "params", ".", "update", "(", "kwargs", ")", "# see https://github.com/barnumbirr/coinmarketcap/pull/28", "if", "currency", ":", "currency", "=", "str", "(", "currency", ")", "+", "'/'", "response", "=", "self", ".", "__request", "(", "'ticker/'", "+", "currency", ",", "params", ")", "return", "response" ]
This endpoint displays cryptocurrency ticker data in order of rank. The maximum number of results per call is 100. Pagination is possible by using the start and limit parameters. GET /ticker/ Optional parameters: (int) start - return results from rank [start] and above (default is 1) (int) limit - return a maximum of [limit] results (default is 100; max is 100) (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH" GET /ticker/{id} Optional parameters: (string) convert - return pricing info in terms of another currency. Valid fiat currency values are: "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR" Valid cryptocurrency values are: "BTC", "ETH" "XRP", "LTC", and "BCH"
[ "This", "endpoint", "displays", "cryptocurrency", "ticker", "data", "in", "order", "of", "rank", ".", "The", "maximum", "number", "of", "results", "per", "call", "is", "100", ".", "Pagination", "is", "possible", "by", "using", "the", "start", "and", "limit", "parameters", "." ]
d1d76a73bc48a64a4c2883dd28c6199bfbd3ebc6
https://github.com/barnumbirr/coinmarketcap/blob/d1d76a73bc48a64a4c2883dd28c6199bfbd3ebc6/coinmarketcap/core.py#L59-L96
-1
251,747
HHammond/PrettyPandas
prettypandas/formatters.py
_surpress_formatting_errors
def _surpress_formatting_errors(fn): """ I know this is dangerous and the wrong way to solve the problem, but when using both row and columns summaries it's easier to just swallow errors so users can format their tables how they need. """ @wraps(fn) def inner(*args, **kwargs): try: return fn(*args, **kwargs) except ValueError: return "" return inner
python
def _surpress_formatting_errors(fn): """ I know this is dangerous and the wrong way to solve the problem, but when using both row and columns summaries it's easier to just swallow errors so users can format their tables how they need. """ @wraps(fn) def inner(*args, **kwargs): try: return fn(*args, **kwargs) except ValueError: return "" return inner
[ "def", "_surpress_formatting_errors", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "ValueError", ":", "return", "\"\"", "return", "inner" ]
I know this is dangerous and the wrong way to solve the problem, but when using both row and columns summaries it's easier to just swallow errors so users can format their tables how they need.
[ "I", "know", "this", "is", "dangerous", "and", "the", "wrong", "way", "to", "solve", "the", "problem", "but", "when", "using", "both", "row", "and", "columns", "summaries", "it", "s", "easier", "to", "just", "swallow", "errors", "so", "users", "can", "format", "their", "tables", "how", "they", "need", "." ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/formatters.py#L12-L24
-1
251,748
HHammond/PrettyPandas
prettypandas/formatters.py
_format_numer
def _format_numer(number_format, prefix='', suffix=''): """Format a number to a string.""" @_surpress_formatting_errors def inner(v): if isinstance(v, Number): return ("{{}}{{:{}}}{{}}" .format(number_format) .format(prefix, v, suffix)) else: raise TypeError("Numberic type required.") return inner
python
def _format_numer(number_format, prefix='', suffix=''): """Format a number to a string.""" @_surpress_formatting_errors def inner(v): if isinstance(v, Number): return ("{{}}{{:{}}}{{}}" .format(number_format) .format(prefix, v, suffix)) else: raise TypeError("Numberic type required.") return inner
[ "def", "_format_numer", "(", "number_format", ",", "prefix", "=", "''", ",", "suffix", "=", "''", ")", ":", "@", "_surpress_formatting_errors", "def", "inner", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "Number", ")", ":", "return", "(", "\"{{}}{{:{}}}{{}}\"", ".", "format", "(", "number_format", ")", ".", "format", "(", "prefix", ",", "v", ",", "suffix", ")", ")", "else", ":", "raise", "TypeError", "(", "\"Numberic type required.\"", ")", "return", "inner" ]
Format a number to a string.
[ "Format", "a", "number", "to", "a", "string", "." ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/formatters.py#L27-L37
-1
251,749
HHammond/PrettyPandas
prettypandas/formatters.py
as_percent
def as_percent(precision=2, **kwargs): """Convert number to percentage string. Parameters: ----------- :param v: numerical value to be converted :param precision: int decimal places to round to """ if not isinstance(precision, Integral): raise TypeError("Precision must be an integer.") return _surpress_formatting_errors( _format_numer(".{}%".format(precision)) )
python
def as_percent(precision=2, **kwargs): """Convert number to percentage string. Parameters: ----------- :param v: numerical value to be converted :param precision: int decimal places to round to """ if not isinstance(precision, Integral): raise TypeError("Precision must be an integer.") return _surpress_formatting_errors( _format_numer(".{}%".format(precision)) )
[ "def", "as_percent", "(", "precision", "=", "2", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "precision", ",", "Integral", ")", ":", "raise", "TypeError", "(", "\"Precision must be an integer.\"", ")", "return", "_surpress_formatting_errors", "(", "_format_numer", "(", "\".{}%\"", ".", "format", "(", "precision", ")", ")", ")" ]
Convert number to percentage string. Parameters: ----------- :param v: numerical value to be converted :param precision: int decimal places to round to
[ "Convert", "number", "to", "percentage", "string", "." ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/formatters.py#L40-L54
-1
251,750
HHammond/PrettyPandas
prettypandas/formatters.py
as_unit
def as_unit(unit, precision=2, location='suffix'): """Convert value to unit. Parameters: ----------- :param v: numerical value :param unit: string of unit :param precision: int decimal places to round to :param location: 'prefix' or 'suffix' representing where the currency symbol falls relative to the value """ if not isinstance(precision, Integral): raise TypeError("Precision must be an integer.") if location == 'prefix': formatter = partial(_format_numer, prefix=unit) elif location == 'suffix': formatter = partial(_format_numer, suffix=unit) else: raise ValueError("location must be either 'prefix' or 'suffix'.") return _surpress_formatting_errors( formatter("0.{}f".format(precision)) )
python
def as_unit(unit, precision=2, location='suffix'): """Convert value to unit. Parameters: ----------- :param v: numerical value :param unit: string of unit :param precision: int decimal places to round to :param location: 'prefix' or 'suffix' representing where the currency symbol falls relative to the value """ if not isinstance(precision, Integral): raise TypeError("Precision must be an integer.") if location == 'prefix': formatter = partial(_format_numer, prefix=unit) elif location == 'suffix': formatter = partial(_format_numer, suffix=unit) else: raise ValueError("location must be either 'prefix' or 'suffix'.") return _surpress_formatting_errors( formatter("0.{}f".format(precision)) )
[ "def", "as_unit", "(", "unit", ",", "precision", "=", "2", ",", "location", "=", "'suffix'", ")", ":", "if", "not", "isinstance", "(", "precision", ",", "Integral", ")", ":", "raise", "TypeError", "(", "\"Precision must be an integer.\"", ")", "if", "location", "==", "'prefix'", ":", "formatter", "=", "partial", "(", "_format_numer", ",", "prefix", "=", "unit", ")", "elif", "location", "==", "'suffix'", ":", "formatter", "=", "partial", "(", "_format_numer", ",", "suffix", "=", "unit", ")", "else", ":", "raise", "ValueError", "(", "\"location must be either 'prefix' or 'suffix'.\"", ")", "return", "_surpress_formatting_errors", "(", "formatter", "(", "\"0.{}f\"", ".", "format", "(", "precision", ")", ")", ")" ]
Convert value to unit. Parameters: ----------- :param v: numerical value :param unit: string of unit :param precision: int decimal places to round to :param location: 'prefix' or 'suffix' representing where the currency symbol falls relative to the value
[ "Convert", "value", "to", "unit", "." ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/formatters.py#L57-L82
-1
251,751
HHammond/PrettyPandas
prettypandas/summarizer.py
Aggregate.apply
def apply(self, df): """Compute aggregate over DataFrame""" if self.subset: if _axis_is_rows(self.axis): df = df[self.subset] if _axis_is_cols(self.axis): df = df.loc[self.subset] result = df.agg(self.func, axis=self.axis, *self.args, **self.kwargs) result.name = self.title return result
python
def apply(self, df): """Compute aggregate over DataFrame""" if self.subset: if _axis_is_rows(self.axis): df = df[self.subset] if _axis_is_cols(self.axis): df = df.loc[self.subset] result = df.agg(self.func, axis=self.axis, *self.args, **self.kwargs) result.name = self.title return result
[ "def", "apply", "(", "self", ",", "df", ")", ":", "if", "self", ".", "subset", ":", "if", "_axis_is_rows", "(", "self", ".", "axis", ")", ":", "df", "=", "df", "[", "self", ".", "subset", "]", "if", "_axis_is_cols", "(", "self", ".", "axis", ")", ":", "df", "=", "df", ".", "loc", "[", "self", ".", "subset", "]", "result", "=", "df", ".", "agg", "(", "self", ".", "func", ",", "axis", "=", "self", ".", "axis", ",", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kwargs", ")", "result", ".", "name", "=", "self", ".", "title", "return", "result" ]
Compute aggregate over DataFrame
[ "Compute", "aggregate", "over", "DataFrame" ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L53-L64
-1
251,752
HHammond/PrettyPandas
prettypandas/summarizer.py
Formatter.apply
def apply(self, styler): """Apply Summary over Pandas Styler""" return styler.format(self.formatter, *self.args, **self.kwargs)
python
def apply(self, styler): """Apply Summary over Pandas Styler""" return styler.format(self.formatter, *self.args, **self.kwargs)
[ "def", "apply", "(", "self", ",", "styler", ")", ":", "return", "styler", ".", "format", "(", "self", ".", "formatter", ",", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kwargs", ")" ]
Apply Summary over Pandas Styler
[ "Apply", "Summary", "over", "Pandas", "Styler" ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L85-L87
-1
251,753
HHammond/PrettyPandas
prettypandas/summarizer.py
PrettyPandas._apply_summaries
def _apply_summaries(self): """Add all summary rows and columns.""" def as_frame(r): if isinstance(r, pd.Series): return r.to_frame() else: return r df = self.data if df.index.nlevels > 1: raise ValueError( "You cannot currently have both summary rows and columns on a " "MultiIndex." ) _df = df if self.summary_rows: rows = pd.concat([agg.apply(_df) for agg in self._cleaned_summary_rows], axis=1).T df = pd.concat([df, as_frame(rows)], axis=0) if self.summary_cols: cols = pd.concat([agg.apply(_df) for agg in self._cleaned_summary_cols], axis=1) df = pd.concat([df, as_frame(cols)], axis=1) return df
python
def _apply_summaries(self): """Add all summary rows and columns.""" def as_frame(r): if isinstance(r, pd.Series): return r.to_frame() else: return r df = self.data if df.index.nlevels > 1: raise ValueError( "You cannot currently have both summary rows and columns on a " "MultiIndex." ) _df = df if self.summary_rows: rows = pd.concat([agg.apply(_df) for agg in self._cleaned_summary_rows], axis=1).T df = pd.concat([df, as_frame(rows)], axis=0) if self.summary_cols: cols = pd.concat([agg.apply(_df) for agg in self._cleaned_summary_cols], axis=1) df = pd.concat([df, as_frame(cols)], axis=1) return df
[ "def", "_apply_summaries", "(", "self", ")", ":", "def", "as_frame", "(", "r", ")", ":", "if", "isinstance", "(", "r", ",", "pd", ".", "Series", ")", ":", "return", "r", ".", "to_frame", "(", ")", "else", ":", "return", "r", "df", "=", "self", ".", "data", "if", "df", ".", "index", ".", "nlevels", ">", "1", ":", "raise", "ValueError", "(", "\"You cannot currently have both summary rows and columns on a \"", "\"MultiIndex.\"", ")", "_df", "=", "df", "if", "self", ".", "summary_rows", ":", "rows", "=", "pd", ".", "concat", "(", "[", "agg", ".", "apply", "(", "_df", ")", "for", "agg", "in", "self", ".", "_cleaned_summary_rows", "]", ",", "axis", "=", "1", ")", ".", "T", "df", "=", "pd", ".", "concat", "(", "[", "df", ",", "as_frame", "(", "rows", ")", "]", ",", "axis", "=", "0", ")", "if", "self", ".", "summary_cols", ":", "cols", "=", "pd", ".", "concat", "(", "[", "agg", ".", "apply", "(", "_df", ")", "for", "agg", "in", "self", ".", "_cleaned_summary_cols", "]", ",", "axis", "=", "1", ")", "df", "=", "pd", ".", "concat", "(", "[", "df", ",", "as_frame", "(", "cols", ")", "]", ",", "axis", "=", "1", ")", "return", "df" ]
Add all summary rows and columns.
[ "Add", "all", "summary", "rows", "and", "columns", "." ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L162-L190
-1
251,754
HHammond/PrettyPandas
prettypandas/summarizer.py
PrettyPandas.style
def style(self): """Add summaries and convert to Pandas Styler""" row_titles = [a.title for a in self._cleaned_summary_rows] col_titles = [a.title for a in self._cleaned_summary_cols] row_ix = pd.IndexSlice[row_titles, :] col_ix = pd.IndexSlice[:, col_titles] def handle_na(df): df.loc[col_ix] = df.loc[col_ix].fillna('') df.loc[row_ix] = df.loc[row_ix].fillna('') return df styler = ( self .frame .pipe(handle_na) .style .applymap(lambda r: 'font-weight: 900', subset=row_ix) .applymap(lambda r: 'font-weight: 900', subset=col_ix) ) for formatter in self.formatters: styler = formatter.apply(styler) return styler
python
def style(self): """Add summaries and convert to Pandas Styler""" row_titles = [a.title for a in self._cleaned_summary_rows] col_titles = [a.title for a in self._cleaned_summary_cols] row_ix = pd.IndexSlice[row_titles, :] col_ix = pd.IndexSlice[:, col_titles] def handle_na(df): df.loc[col_ix] = df.loc[col_ix].fillna('') df.loc[row_ix] = df.loc[row_ix].fillna('') return df styler = ( self .frame .pipe(handle_na) .style .applymap(lambda r: 'font-weight: 900', subset=row_ix) .applymap(lambda r: 'font-weight: 900', subset=col_ix) ) for formatter in self.formatters: styler = formatter.apply(styler) return styler
[ "def", "style", "(", "self", ")", ":", "row_titles", "=", "[", "a", ".", "title", "for", "a", "in", "self", ".", "_cleaned_summary_rows", "]", "col_titles", "=", "[", "a", ".", "title", "for", "a", "in", "self", ".", "_cleaned_summary_cols", "]", "row_ix", "=", "pd", ".", "IndexSlice", "[", "row_titles", ",", ":", "]", "col_ix", "=", "pd", ".", "IndexSlice", "[", ":", ",", "col_titles", "]", "def", "handle_na", "(", "df", ")", ":", "df", ".", "loc", "[", "col_ix", "]", "=", "df", ".", "loc", "[", "col_ix", "]", ".", "fillna", "(", "''", ")", "df", ".", "loc", "[", "row_ix", "]", "=", "df", ".", "loc", "[", "row_ix", "]", ".", "fillna", "(", "''", ")", "return", "df", "styler", "=", "(", "self", ".", "frame", ".", "pipe", "(", "handle_na", ")", ".", "style", ".", "applymap", "(", "lambda", "r", ":", "'font-weight: 900'", ",", "subset", "=", "row_ix", ")", ".", "applymap", "(", "lambda", "r", ":", "'font-weight: 900'", ",", "subset", "=", "col_ix", ")", ")", "for", "formatter", "in", "self", ".", "formatters", ":", "styler", "=", "formatter", ".", "apply", "(", "styler", ")", "return", "styler" ]
Add summaries and convert to Pandas Styler
[ "Add", "summaries", "and", "convert", "to", "Pandas", "Styler" ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L202-L226
-1
251,755
HHammond/PrettyPandas
prettypandas/summarizer.py
PrettyPandas.summary
def summary(self, func=methodcaller('sum'), title='Total', axis=0, subset=None, *args, **kwargs): """Add multiple summary rows or columns to the dataframe. Parameters ---------- :param func: function to be used for a summary. :param titles: Title for this summary column. :param axis: Same as numpy and pandas axis argument. A value of None will cause the summary to be applied to both rows and columns. :param args: Positional arguments passed to all the functions. :param kwargs: Keyword arguments passed to all the functions. The results of summary can be chained together. """ if axis is None: return ( self .summary( func=func, title=title, axis=0, subset=subset, *args, **kwargs ) .summary( func=func, title=title, axis=1, subset=subset, *args, **kwargs ) ) else: agg = Aggregate(title, func, subset=subset, axis=axis, *args, **kwargs) return self._add_summary(agg)
python
def summary(self, func=methodcaller('sum'), title='Total', axis=0, subset=None, *args, **kwargs): """Add multiple summary rows or columns to the dataframe. Parameters ---------- :param func: function to be used for a summary. :param titles: Title for this summary column. :param axis: Same as numpy and pandas axis argument. A value of None will cause the summary to be applied to both rows and columns. :param args: Positional arguments passed to all the functions. :param kwargs: Keyword arguments passed to all the functions. The results of summary can be chained together. """ if axis is None: return ( self .summary( func=func, title=title, axis=0, subset=subset, *args, **kwargs ) .summary( func=func, title=title, axis=1, subset=subset, *args, **kwargs ) ) else: agg = Aggregate(title, func, subset=subset, axis=axis, *args, **kwargs) return self._add_summary(agg)
[ "def", "summary", "(", "self", ",", "func", "=", "methodcaller", "(", "'sum'", ")", ",", "title", "=", "'Total'", ",", "axis", "=", "0", ",", "subset", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "axis", "is", "None", ":", "return", "(", "self", ".", "summary", "(", "func", "=", "func", ",", "title", "=", "title", ",", "axis", "=", "0", ",", "subset", "=", "subset", ",", "*", "args", ",", "*", "*", "kwargs", ")", ".", "summary", "(", "func", "=", "func", ",", "title", "=", "title", ",", "axis", "=", "1", ",", "subset", "=", "subset", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "else", ":", "agg", "=", "Aggregate", "(", "title", ",", "func", ",", "subset", "=", "subset", ",", "axis", "=", "axis", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_add_summary", "(", "agg", ")" ]
Add multiple summary rows or columns to the dataframe. Parameters ---------- :param func: function to be used for a summary. :param titles: Title for this summary column. :param axis: Same as numpy and pandas axis argument. A value of None will cause the summary to be applied to both rows and columns. :param args: Positional arguments passed to all the functions. :param kwargs: Keyword arguments passed to all the functions. The results of summary can be chained together.
[ "Add", "multiple", "summary", "rows", "or", "columns", "to", "the", "dataframe", "." ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L240-L285
-1
251,756
HHammond/PrettyPandas
prettypandas/summarizer.py
PrettyPandas.as_percent
def as_percent(self, precision=2, *args, **kwargs): """Format subset as percentages :param precision: Decimal precision :param subset: Pandas subset """ f = Formatter(as_percent(precision), args, kwargs) return self._add_formatter(f)
python
def as_percent(self, precision=2, *args, **kwargs): """Format subset as percentages :param precision: Decimal precision :param subset: Pandas subset """ f = Formatter(as_percent(precision), args, kwargs) return self._add_formatter(f)
[ "def", "as_percent", "(", "self", ",", "precision", "=", "2", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "f", "=", "Formatter", "(", "as_percent", "(", "precision", ")", ",", "args", ",", "kwargs", ")", "return", "self", ".", "_add_formatter", "(", "f", ")" ]
Format subset as percentages :param precision: Decimal precision :param subset: Pandas subset
[ "Format", "subset", "as", "percentages" ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L335-L342
-1
251,757
HHammond/PrettyPandas
prettypandas/summarizer.py
PrettyPandas.as_currency
def as_currency(self, currency='USD', locale=LOCALE_OBJ, *args, **kwargs): """Format subset as currency :param currency: Currency :param locale: Babel locale for currency formatting :param subset: Pandas subset """ f = Formatter( as_currency(currency=currency, locale=locale), args, kwargs ) return self._add_formatter(f)
python
def as_currency(self, currency='USD', locale=LOCALE_OBJ, *args, **kwargs): """Format subset as currency :param currency: Currency :param locale: Babel locale for currency formatting :param subset: Pandas subset """ f = Formatter( as_currency(currency=currency, locale=locale), args, kwargs ) return self._add_formatter(f)
[ "def", "as_currency", "(", "self", ",", "currency", "=", "'USD'", ",", "locale", "=", "LOCALE_OBJ", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "f", "=", "Formatter", "(", "as_currency", "(", "currency", "=", "currency", ",", "locale", "=", "locale", ")", ",", "args", ",", "kwargs", ")", "return", "self", ".", "_add_formatter", "(", "f", ")" ]
Format subset as currency :param currency: Currency :param locale: Babel locale for currency formatting :param subset: Pandas subset
[ "Format", "subset", "as", "currency" ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L344-L356
-1
251,758
HHammond/PrettyPandas
prettypandas/summarizer.py
PrettyPandas.as_unit
def as_unit(self, unit, location='suffix', *args, **kwargs): """Format subset as with units :param unit: string to use as unit :param location: prefix or suffix :param subset: Pandas subset """ f = Formatter( as_unit(unit, location=location), args, kwargs ) return self._add_formatter(f)
python
def as_unit(self, unit, location='suffix', *args, **kwargs): """Format subset as with units :param unit: string to use as unit :param location: prefix or suffix :param subset: Pandas subset """ f = Formatter( as_unit(unit, location=location), args, kwargs ) return self._add_formatter(f)
[ "def", "as_unit", "(", "self", ",", "unit", ",", "location", "=", "'suffix'", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "f", "=", "Formatter", "(", "as_unit", "(", "unit", ",", "location", "=", "location", ")", ",", "args", ",", "kwargs", ")", "return", "self", ".", "_add_formatter", "(", "f", ")" ]
Format subset as with units :param unit: string to use as unit :param location: prefix or suffix :param subset: Pandas subset
[ "Format", "subset", "as", "with", "units" ]
99a814ffc3aa61f66eaf902afaa4b7802518d33a
https://github.com/HHammond/PrettyPandas/blob/99a814ffc3aa61f66eaf902afaa4b7802518d33a/prettypandas/summarizer.py#L358-L370
-1
251,759
TUT-ARG/sed_eval
sed_eval/sound_event.py
EventBasedMetrics.validate_onset
def validate_onset(reference_event, estimated_event, t_collar=0.200): """Validate estimated event based on event onset Parameters ---------- reference_event : dict Reference event. estimated_event: dict Estimated event. t_collar : float > 0, seconds Time collar with which the estimated onset has to be in order to be consider valid estimation. Default value 0.2 Returns ------- bool """ # Detect field naming style used and validate onset if 'event_onset' in reference_event and 'event_onset' in estimated_event: return math.fabs(reference_event['event_onset'] - estimated_event['event_onset']) <= t_collar elif 'onset' in reference_event and 'onset' in estimated_event: return math.fabs(reference_event['onset'] - estimated_event['onset']) <= t_collar
python
def validate_onset(reference_event, estimated_event, t_collar=0.200): """Validate estimated event based on event onset Parameters ---------- reference_event : dict Reference event. estimated_event: dict Estimated event. t_collar : float > 0, seconds Time collar with which the estimated onset has to be in order to be consider valid estimation. Default value 0.2 Returns ------- bool """ # Detect field naming style used and validate onset if 'event_onset' in reference_event and 'event_onset' in estimated_event: return math.fabs(reference_event['event_onset'] - estimated_event['event_onset']) <= t_collar elif 'onset' in reference_event and 'onset' in estimated_event: return math.fabs(reference_event['onset'] - estimated_event['onset']) <= t_collar
[ "def", "validate_onset", "(", "reference_event", ",", "estimated_event", ",", "t_collar", "=", "0.200", ")", ":", "# Detect field naming style used and validate onset", "if", "'event_onset'", "in", "reference_event", "and", "'event_onset'", "in", "estimated_event", ":", "return", "math", ".", "fabs", "(", "reference_event", "[", "'event_onset'", "]", "-", "estimated_event", "[", "'event_onset'", "]", ")", "<=", "t_collar", "elif", "'onset'", "in", "reference_event", "and", "'onset'", "in", "estimated_event", ":", "return", "math", ".", "fabs", "(", "reference_event", "[", "'onset'", "]", "-", "estimated_event", "[", "'onset'", "]", ")", "<=", "t_collar" ]
Validate estimated event based on event onset Parameters ---------- reference_event : dict Reference event. estimated_event: dict Estimated event. t_collar : float > 0, seconds Time collar with which the estimated onset has to be in order to be consider valid estimation. Default value 0.2 Returns ------- bool
[ "Validate", "estimated", "event", "based", "on", "event", "onset" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/sound_event.py#L1604-L1630
-1
251,760
TUT-ARG/sed_eval
sed_eval/sound_event.py
EventBasedMetrics.validate_offset
def validate_offset(reference_event, estimated_event, t_collar=0.200, percentage_of_length=0.5): """Validate estimated event based on event offset Parameters ---------- reference_event : dict Reference event. estimated_event : dict Estimated event. t_collar : float > 0, seconds First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation. Default value 0.2 percentage_of_length : float in [0, 1] Second condition, percentage of the length within which the estimated offset has to be in order to be consider valid estimation. Default value 0.5 Returns ------- bool """ # Detect field naming style used and validate onset if 'event_offset' in reference_event and 'event_offset' in estimated_event: annotated_length = reference_event['event_offset'] - reference_event['event_onset'] return math.fabs(reference_event['event_offset'] - estimated_event['event_offset']) <= max(t_collar, percentage_of_length * annotated_length) elif 'offset' in reference_event and 'offset' in estimated_event: annotated_length = reference_event['offset'] - reference_event['onset'] return math.fabs(reference_event['offset'] - estimated_event['offset']) <= max(t_collar, percentage_of_length * annotated_length)
python
def validate_offset(reference_event, estimated_event, t_collar=0.200, percentage_of_length=0.5): """Validate estimated event based on event offset Parameters ---------- reference_event : dict Reference event. estimated_event : dict Estimated event. t_collar : float > 0, seconds First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation. Default value 0.2 percentage_of_length : float in [0, 1] Second condition, percentage of the length within which the estimated offset has to be in order to be consider valid estimation. Default value 0.5 Returns ------- bool """ # Detect field naming style used and validate onset if 'event_offset' in reference_event and 'event_offset' in estimated_event: annotated_length = reference_event['event_offset'] - reference_event['event_onset'] return math.fabs(reference_event['event_offset'] - estimated_event['event_offset']) <= max(t_collar, percentage_of_length * annotated_length) elif 'offset' in reference_event and 'offset' in estimated_event: annotated_length = reference_event['offset'] - reference_event['onset'] return math.fabs(reference_event['offset'] - estimated_event['offset']) <= max(t_collar, percentage_of_length * annotated_length)
[ "def", "validate_offset", "(", "reference_event", ",", "estimated_event", ",", "t_collar", "=", "0.200", ",", "percentage_of_length", "=", "0.5", ")", ":", "# Detect field naming style used and validate onset", "if", "'event_offset'", "in", "reference_event", "and", "'event_offset'", "in", "estimated_event", ":", "annotated_length", "=", "reference_event", "[", "'event_offset'", "]", "-", "reference_event", "[", "'event_onset'", "]", "return", "math", ".", "fabs", "(", "reference_event", "[", "'event_offset'", "]", "-", "estimated_event", "[", "'event_offset'", "]", ")", "<=", "max", "(", "t_collar", ",", "percentage_of_length", "*", "annotated_length", ")", "elif", "'offset'", "in", "reference_event", "and", "'offset'", "in", "estimated_event", ":", "annotated_length", "=", "reference_event", "[", "'offset'", "]", "-", "reference_event", "[", "'onset'", "]", "return", "math", ".", "fabs", "(", "reference_event", "[", "'offset'", "]", "-", "estimated_event", "[", "'offset'", "]", ")", "<=", "max", "(", "t_collar", ",", "percentage_of_length", "*", "annotated_length", ")" ]
Validate estimated event based on event offset Parameters ---------- reference_event : dict Reference event. estimated_event : dict Estimated event. t_collar : float > 0, seconds First condition, Time collar with which the estimated offset has to be in order to be consider valid estimation. Default value 0.2 percentage_of_length : float in [0, 1] Second condition, percentage of the length within which the estimated offset has to be in order to be consider valid estimation. Default value 0.5 Returns ------- bool
[ "Validate", "estimated", "event", "based", "on", "event", "offset" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/sound_event.py#L1633-L1668
-1
251,761
TUT-ARG/sed_eval
sed_eval/io.py
load_event_list
def load_event_list(filename, **kwargs): """Load event list from csv formatted text-file Supported formats (see more `dcase_util.containers.MetaDataContainer.load()` method): - [event onset (float >= 0)][delimiter][event offset (float >= 0)] - [event onset (float >= 0)][delimiter][event offset (float >= 0)][delimiter][label] - [filename][delimiter][event onset (float >= 0)][delimiter][event offset (float >= 0)][delimiter][event label] - [filename][delimiter][scene_label][delimiter][event onset (float >= 0)][delimiter][event offset (float >= 0)][delimiter][event label] - [filename] Supported delimiters: ``,``, ``;``, ``tab`` Example of event list file:: 21.64715 23.00552 alert 36.91184 38.27021 alert 69.72575 71.09029 alert 63.53990 64.89827 alert 84.25553 84.83920 alert 20.92974 21.82661 clearthroat 28.39992 29.29679 clearthroat 80.47837 81.95937 clearthroat 44.48363 45.96463 clearthroat 78.13073 79.05953 clearthroat 15.17031 16.27235 cough 20.54931 21.65135 cough 27.79964 28.90168 cough 75.45959 76.32490 cough 70.81708 71.91912 cough 21.23203 22.55902 doorslam 7.546220 9.014880 doorslam 34.11303 35.04183 doorslam 45.86001 47.32867 doorslam Parameters ---------- filename : str Path to the csv-file Returns ------- list of dict Event list """ return dcase_util.containers.MetaDataContainer().load(filename=filename, **kwargs)
python
def load_event_list(filename, **kwargs): """Load event list from csv formatted text-file Supported formats (see more `dcase_util.containers.MetaDataContainer.load()` method): - [event onset (float >= 0)][delimiter][event offset (float >= 0)] - [event onset (float >= 0)][delimiter][event offset (float >= 0)][delimiter][label] - [filename][delimiter][event onset (float >= 0)][delimiter][event offset (float >= 0)][delimiter][event label] - [filename][delimiter][scene_label][delimiter][event onset (float >= 0)][delimiter][event offset (float >= 0)][delimiter][event label] - [filename] Supported delimiters: ``,``, ``;``, ``tab`` Example of event list file:: 21.64715 23.00552 alert 36.91184 38.27021 alert 69.72575 71.09029 alert 63.53990 64.89827 alert 84.25553 84.83920 alert 20.92974 21.82661 clearthroat 28.39992 29.29679 clearthroat 80.47837 81.95937 clearthroat 44.48363 45.96463 clearthroat 78.13073 79.05953 clearthroat 15.17031 16.27235 cough 20.54931 21.65135 cough 27.79964 28.90168 cough 75.45959 76.32490 cough 70.81708 71.91912 cough 21.23203 22.55902 doorslam 7.546220 9.014880 doorslam 34.11303 35.04183 doorslam 45.86001 47.32867 doorslam Parameters ---------- filename : str Path to the csv-file Returns ------- list of dict Event list """ return dcase_util.containers.MetaDataContainer().load(filename=filename, **kwargs)
[ "def", "load_event_list", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "return", "dcase_util", ".", "containers", ".", "MetaDataContainer", "(", ")", ".", "load", "(", "filename", "=", "filename", ",", "*", "*", "kwargs", ")" ]
Load event list from csv formatted text-file Supported formats (see more `dcase_util.containers.MetaDataContainer.load()` method): - [event onset (float >= 0)][delimiter][event offset (float >= 0)] - [event onset (float >= 0)][delimiter][event offset (float >= 0)][delimiter][label] - [filename][delimiter][event onset (float >= 0)][delimiter][event offset (float >= 0)][delimiter][event label] - [filename][delimiter][scene_label][delimiter][event onset (float >= 0)][delimiter][event offset (float >= 0)][delimiter][event label] - [filename] Supported delimiters: ``,``, ``;``, ``tab`` Example of event list file:: 21.64715 23.00552 alert 36.91184 38.27021 alert 69.72575 71.09029 alert 63.53990 64.89827 alert 84.25553 84.83920 alert 20.92974 21.82661 clearthroat 28.39992 29.29679 clearthroat 80.47837 81.95937 clearthroat 44.48363 45.96463 clearthroat 78.13073 79.05953 clearthroat 15.17031 16.27235 cough 20.54931 21.65135 cough 27.79964 28.90168 cough 75.45959 76.32490 cough 70.81708 71.91912 cough 21.23203 22.55902 doorslam 7.546220 9.014880 doorslam 34.11303 35.04183 doorslam 45.86001 47.32867 doorslam Parameters ---------- filename : str Path to the csv-file Returns ------- list of dict Event list
[ "Load", "event", "list", "from", "csv", "formatted", "text", "-", "file" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/io.py#L22-L70
-1
251,762
TUT-ARG/sed_eval
sed_eval/io.py
load_scene_list
def load_scene_list(filename, **kwargs): """Load scene list from csv formatted text-file Supported formats (see more `dcase_util.containers.MetaDataContainer.load()` method): - [filename][delimiter][scene label] - [filename][delimiter][segment start (float >= 0)][delimiter][segment stop (float >= 0)][delimiter][scene label] Supported delimiters: ``,``, ``;``, ``tab`` Example of scene list file:: scenes_stereo/supermarket09.wav supermarket scenes_stereo/tubestation10.wav tubestation scenes_stereo/quietstreet08.wav quietstreet scenes_stereo/restaurant05.wav restaurant scenes_stereo/busystreet05.wav busystreet scenes_stereo/openairmarket04.wav openairmarket scenes_stereo/quietstreet01.wav quietstreet scenes_stereo/supermarket05.wav supermarket scenes_stereo/openairmarket01.wav openairmarket Parameters ---------- filename : str Path to the csv-file Returns ------- list of dict Scene list """ return dcase_util.containers.MetaDataContainer().load(filename=filename, **kwargs)
python
def load_scene_list(filename, **kwargs): """Load scene list from csv formatted text-file Supported formats (see more `dcase_util.containers.MetaDataContainer.load()` method): - [filename][delimiter][scene label] - [filename][delimiter][segment start (float >= 0)][delimiter][segment stop (float >= 0)][delimiter][scene label] Supported delimiters: ``,``, ``;``, ``tab`` Example of scene list file:: scenes_stereo/supermarket09.wav supermarket scenes_stereo/tubestation10.wav tubestation scenes_stereo/quietstreet08.wav quietstreet scenes_stereo/restaurant05.wav restaurant scenes_stereo/busystreet05.wav busystreet scenes_stereo/openairmarket04.wav openairmarket scenes_stereo/quietstreet01.wav quietstreet scenes_stereo/supermarket05.wav supermarket scenes_stereo/openairmarket01.wav openairmarket Parameters ---------- filename : str Path to the csv-file Returns ------- list of dict Scene list """ return dcase_util.containers.MetaDataContainer().load(filename=filename, **kwargs)
[ "def", "load_scene_list", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "return", "dcase_util", ".", "containers", ".", "MetaDataContainer", "(", ")", ".", "load", "(", "filename", "=", "filename", ",", "*", "*", "kwargs", ")" ]
Load scene list from csv formatted text-file Supported formats (see more `dcase_util.containers.MetaDataContainer.load()` method): - [filename][delimiter][scene label] - [filename][delimiter][segment start (float >= 0)][delimiter][segment stop (float >= 0)][delimiter][scene label] Supported delimiters: ``,``, ``;``, ``tab`` Example of scene list file:: scenes_stereo/supermarket09.wav supermarket scenes_stereo/tubestation10.wav tubestation scenes_stereo/quietstreet08.wav quietstreet scenes_stereo/restaurant05.wav restaurant scenes_stereo/busystreet05.wav busystreet scenes_stereo/openairmarket04.wav openairmarket scenes_stereo/quietstreet01.wav quietstreet scenes_stereo/supermarket05.wav supermarket scenes_stereo/openairmarket01.wav openairmarket Parameters ---------- filename : str Path to the csv-file Returns ------- list of dict Scene list
[ "Load", "scene", "list", "from", "csv", "formatted", "text", "-", "file" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/io.py#L73-L107
-1
251,763
TUT-ARG/sed_eval
sed_eval/io.py
load_file_pair_list
def load_file_pair_list(filename): """Load file pair list csv formatted text-file Format is [reference_file][delimiter][estimated_file] Supported delimiters: ``,``, ``;``, ``tab`` Example of file-list:: office_snr0_high_v2.txt office_snr0_high_v2_detected.txt office_snr0_med_v2.txt office_snr0_med_v2_detected.txt Parameters ---------- filename : str Path to the csv-file Returns ------- file_list: list File pair dicts in a list """ data = [] input_file = open(filename, 'rt') try: dialect = csv.Sniffer().sniff(input_file.readline(), [',', ';', '\t']) except csv.Error: raise ValueError('Unknown delimiter in file [{file}].'.format(file=filename)) input_file.seek(0) for row in csv.reader(input_file, dialect): if len(row) == 2: data.append( { 'reference_file': row[0], 'estimated_file': row[1] } ) else: raise ValueError('Unknown file pair list format in file [{file}].'.format(file=filename)) input_file.close() return data
python
def load_file_pair_list(filename): """Load file pair list csv formatted text-file Format is [reference_file][delimiter][estimated_file] Supported delimiters: ``,``, ``;``, ``tab`` Example of file-list:: office_snr0_high_v2.txt office_snr0_high_v2_detected.txt office_snr0_med_v2.txt office_snr0_med_v2_detected.txt Parameters ---------- filename : str Path to the csv-file Returns ------- file_list: list File pair dicts in a list """ data = [] input_file = open(filename, 'rt') try: dialect = csv.Sniffer().sniff(input_file.readline(), [',', ';', '\t']) except csv.Error: raise ValueError('Unknown delimiter in file [{file}].'.format(file=filename)) input_file.seek(0) for row in csv.reader(input_file, dialect): if len(row) == 2: data.append( { 'reference_file': row[0], 'estimated_file': row[1] } ) else: raise ValueError('Unknown file pair list format in file [{file}].'.format(file=filename)) input_file.close() return data
[ "def", "load_file_pair_list", "(", "filename", ")", ":", "data", "=", "[", "]", "input_file", "=", "open", "(", "filename", ",", "'rt'", ")", "try", ":", "dialect", "=", "csv", ".", "Sniffer", "(", ")", ".", "sniff", "(", "input_file", ".", "readline", "(", ")", ",", "[", "','", ",", "';'", ",", "'\\t'", "]", ")", "except", "csv", ".", "Error", ":", "raise", "ValueError", "(", "'Unknown delimiter in file [{file}].'", ".", "format", "(", "file", "=", "filename", ")", ")", "input_file", ".", "seek", "(", "0", ")", "for", "row", "in", "csv", ".", "reader", "(", "input_file", ",", "dialect", ")", ":", "if", "len", "(", "row", ")", "==", "2", ":", "data", ".", "append", "(", "{", "'reference_file'", ":", "row", "[", "0", "]", ",", "'estimated_file'", ":", "row", "[", "1", "]", "}", ")", "else", ":", "raise", "ValueError", "(", "'Unknown file pair list format in file [{file}].'", ".", "format", "(", "file", "=", "filename", ")", ")", "input_file", ".", "close", "(", ")", "return", "data" ]
Load file pair list csv formatted text-file Format is [reference_file][delimiter][estimated_file] Supported delimiters: ``,``, ``;``, ``tab`` Example of file-list:: office_snr0_high_v2.txt office_snr0_high_v2_detected.txt office_snr0_med_v2.txt office_snr0_med_v2_detected.txt Parameters ---------- filename : str Path to the csv-file Returns ------- file_list: list File pair dicts in a list
[ "Load", "file", "pair", "list", "csv", "formatted", "text", "-", "file" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/io.py#L110-L160
-1
251,764
TUT-ARG/sed_eval
sed_eval/scene.py
SceneClassificationMetrics.class_wise_accuracy
def class_wise_accuracy(self, scene_label): """Class-wise accuracy Returns ------- dict results in a dictionary format """ if len(self.accuracies_per_class.shape) == 2: return { 'accuracy': float(numpy.mean(self.accuracies_per_class[:, self.scene_label_list.index(scene_label)])) } else: return { 'accuracy': float(numpy.mean(self.accuracies_per_class[self.scene_label_list.index(scene_label)])) }
python
def class_wise_accuracy(self, scene_label): """Class-wise accuracy Returns ------- dict results in a dictionary format """ if len(self.accuracies_per_class.shape) == 2: return { 'accuracy': float(numpy.mean(self.accuracies_per_class[:, self.scene_label_list.index(scene_label)])) } else: return { 'accuracy': float(numpy.mean(self.accuracies_per_class[self.scene_label_list.index(scene_label)])) }
[ "def", "class_wise_accuracy", "(", "self", ",", "scene_label", ")", ":", "if", "len", "(", "self", ".", "accuracies_per_class", ".", "shape", ")", "==", "2", ":", "return", "{", "'accuracy'", ":", "float", "(", "numpy", ".", "mean", "(", "self", ".", "accuracies_per_class", "[", ":", ",", "self", ".", "scene_label_list", ".", "index", "(", "scene_label", ")", "]", ")", ")", "}", "else", ":", "return", "{", "'accuracy'", ":", "float", "(", "numpy", ".", "mean", "(", "self", ".", "accuracies_per_class", "[", "self", ".", "scene_label_list", ".", "index", "(", "scene_label", ")", "]", ")", ")", "}" ]
Class-wise accuracy Returns ------- dict results in a dictionary format
[ "Class", "-", "wise", "accuracy" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/scene.py#L466-L484
-1
251,765
TUT-ARG/sed_eval
sed_eval/util/scene_list.py
unique_scene_labels
def unique_scene_labels(scene_list): """Find the unique scene labels Parameters ---------- scene_list : list, shape=(n,) A list containing scene dicts Returns ------- labels: list, shape=(n,) Unique labels in alphabetical order """ if isinstance(scene_list, dcase_util.containers.MetaDataContainer): return scene_list.unique_scene_labels else: labels = [] for item in scene_list: if 'scene_label' in item and item['scene_label'] not in labels: labels.append(item['scene_label']) labels.sort() return labels
python
def unique_scene_labels(scene_list): """Find the unique scene labels Parameters ---------- scene_list : list, shape=(n,) A list containing scene dicts Returns ------- labels: list, shape=(n,) Unique labels in alphabetical order """ if isinstance(scene_list, dcase_util.containers.MetaDataContainer): return scene_list.unique_scene_labels else: labels = [] for item in scene_list: if 'scene_label' in item and item['scene_label'] not in labels: labels.append(item['scene_label']) labels.sort() return labels
[ "def", "unique_scene_labels", "(", "scene_list", ")", ":", "if", "isinstance", "(", "scene_list", ",", "dcase_util", ".", "containers", ".", "MetaDataContainer", ")", ":", "return", "scene_list", ".", "unique_scene_labels", "else", ":", "labels", "=", "[", "]", "for", "item", "in", "scene_list", ":", "if", "'scene_label'", "in", "item", "and", "item", "[", "'scene_label'", "]", "not", "in", "labels", ":", "labels", ".", "append", "(", "item", "[", "'scene_label'", "]", ")", "labels", ".", "sort", "(", ")", "return", "labels" ]
Find the unique scene labels Parameters ---------- scene_list : list, shape=(n,) A list containing scene dicts Returns ------- labels: list, shape=(n,) Unique labels in alphabetical order
[ "Find", "the", "unique", "scene", "labels" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/util/scene_list.py#L11-L35
-1
251,766
TUT-ARG/sed_eval
sed_eval/util/event_roll.py
event_list_to_event_roll
def event_list_to_event_roll(source_event_list, event_label_list=None, time_resolution=0.01): """Convert event list into event roll, binary activity matrix Parameters ---------- source_event_list : list, shape=(n,) A list containing event dicts event_label_list : list, shape=(k,) or None A list of containing unique labels in alphabetical order (Default value = None) time_resolution : float > 0 Time resolution in seconds of the event roll (Default value = 0.01) Returns ------- event_roll: np.ndarray, shape=(m,k) Event roll """ if isinstance(source_event_list, dcase_util.containers.MetaDataContainer): max_offset_value = source_event_list.max_offset if event_label_list is None: event_label_list = source_event_list.unique_event_labels elif isinstance(source_event_list, list): max_offset_value = event_list.max_event_offset(source_event_list) if event_label_list is None: event_label_list = event_list.unique_event_labels(source_event_list) else: raise ValueError('Unknown source_event_list type.') # Initialize event roll event_roll = numpy.zeros((int(math.ceil(max_offset_value * 1 / time_resolution)), len(event_label_list))) # Fill-in event_roll for event in source_event_list: pos = event_label_list.index(event['event_label']) if 'event_onset' in event and 'event_offset' in event: event_onset = event['event_onset'] event_offset = event['event_offset'] elif 'onset' in event and 'offset' in event: event_onset = event['onset'] event_offset = event['offset'] onset = int(math.floor(event_onset * 1 / float(time_resolution))) offset = int(math.ceil(event_offset * 1 / float(time_resolution))) event_roll[onset:offset, pos] = 1 return event_roll
python
def event_list_to_event_roll(source_event_list, event_label_list=None, time_resolution=0.01): """Convert event list into event roll, binary activity matrix Parameters ---------- source_event_list : list, shape=(n,) A list containing event dicts event_label_list : list, shape=(k,) or None A list of containing unique labels in alphabetical order (Default value = None) time_resolution : float > 0 Time resolution in seconds of the event roll (Default value = 0.01) Returns ------- event_roll: np.ndarray, shape=(m,k) Event roll """ if isinstance(source_event_list, dcase_util.containers.MetaDataContainer): max_offset_value = source_event_list.max_offset if event_label_list is None: event_label_list = source_event_list.unique_event_labels elif isinstance(source_event_list, list): max_offset_value = event_list.max_event_offset(source_event_list) if event_label_list is None: event_label_list = event_list.unique_event_labels(source_event_list) else: raise ValueError('Unknown source_event_list type.') # Initialize event roll event_roll = numpy.zeros((int(math.ceil(max_offset_value * 1 / time_resolution)), len(event_label_list))) # Fill-in event_roll for event in source_event_list: pos = event_label_list.index(event['event_label']) if 'event_onset' in event and 'event_offset' in event: event_onset = event['event_onset'] event_offset = event['event_offset'] elif 'onset' in event and 'offset' in event: event_onset = event['onset'] event_offset = event['offset'] onset = int(math.floor(event_onset * 1 / float(time_resolution))) offset = int(math.ceil(event_offset * 1 / float(time_resolution))) event_roll[onset:offset, pos] = 1 return event_roll
[ "def", "event_list_to_event_roll", "(", "source_event_list", ",", "event_label_list", "=", "None", ",", "time_resolution", "=", "0.01", ")", ":", "if", "isinstance", "(", "source_event_list", ",", "dcase_util", ".", "containers", ".", "MetaDataContainer", ")", ":", "max_offset_value", "=", "source_event_list", ".", "max_offset", "if", "event_label_list", "is", "None", ":", "event_label_list", "=", "source_event_list", ".", "unique_event_labels", "elif", "isinstance", "(", "source_event_list", ",", "list", ")", ":", "max_offset_value", "=", "event_list", ".", "max_event_offset", "(", "source_event_list", ")", "if", "event_label_list", "is", "None", ":", "event_label_list", "=", "event_list", ".", "unique_event_labels", "(", "source_event_list", ")", "else", ":", "raise", "ValueError", "(", "'Unknown source_event_list type.'", ")", "# Initialize event roll", "event_roll", "=", "numpy", ".", "zeros", "(", "(", "int", "(", "math", ".", "ceil", "(", "max_offset_value", "*", "1", "/", "time_resolution", ")", ")", ",", "len", "(", "event_label_list", ")", ")", ")", "# Fill-in event_roll", "for", "event", "in", "source_event_list", ":", "pos", "=", "event_label_list", ".", "index", "(", "event", "[", "'event_label'", "]", ")", "if", "'event_onset'", "in", "event", "and", "'event_offset'", "in", "event", ":", "event_onset", "=", "event", "[", "'event_onset'", "]", "event_offset", "=", "event", "[", "'event_offset'", "]", "elif", "'onset'", "in", "event", "and", "'offset'", "in", "event", ":", "event_onset", "=", "event", "[", "'onset'", "]", "event_offset", "=", "event", "[", "'offset'", "]", "onset", "=", "int", "(", "math", ".", "floor", "(", "event_onset", "*", "1", "/", "float", "(", "time_resolution", ")", ")", ")", "offset", "=", "int", "(", "math", ".", "ceil", "(", "event_offset", "*", "1", "/", "float", "(", "time_resolution", ")", ")", ")", "event_roll", "[", "onset", ":", "offset", ",", "pos", "]", "=", "1", "return", "event_roll" ]
Convert event list into event roll, binary activity matrix Parameters ---------- source_event_list : list, shape=(n,) A list containing event dicts event_label_list : list, shape=(k,) or None A list of containing unique labels in alphabetical order (Default value = None) time_resolution : float > 0 Time resolution in seconds of the event roll (Default value = 0.01) Returns ------- event_roll: np.ndarray, shape=(m,k) Event roll
[ "Convert", "event", "list", "into", "event", "roll", "binary", "activity", "matrix" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/util/event_roll.py#L13-L71
-1
251,767
TUT-ARG/sed_eval
sed_eval/util/event_roll.py
pad_event_roll
def pad_event_roll(event_roll, length): """Pad event roll's length to given length Parameters ---------- event_roll: np.ndarray, shape=(m,k) Event roll length : int Length to be padded Returns ------- event_roll: np.ndarray, shape=(m,k) Padded event roll """ if length > event_roll.shape[0]: padding = numpy.zeros((length-event_roll.shape[0], event_roll.shape[1])) event_roll = numpy.vstack((event_roll, padding)) return event_roll
python
def pad_event_roll(event_roll, length): """Pad event roll's length to given length Parameters ---------- event_roll: np.ndarray, shape=(m,k) Event roll length : int Length to be padded Returns ------- event_roll: np.ndarray, shape=(m,k) Padded event roll """ if length > event_roll.shape[0]: padding = numpy.zeros((length-event_roll.shape[0], event_roll.shape[1])) event_roll = numpy.vstack((event_roll, padding)) return event_roll
[ "def", "pad_event_roll", "(", "event_roll", ",", "length", ")", ":", "if", "length", ">", "event_roll", ".", "shape", "[", "0", "]", ":", "padding", "=", "numpy", ".", "zeros", "(", "(", "length", "-", "event_roll", ".", "shape", "[", "0", "]", ",", "event_roll", ".", "shape", "[", "1", "]", ")", ")", "event_roll", "=", "numpy", ".", "vstack", "(", "(", "event_roll", ",", "padding", ")", ")", "return", "event_roll" ]
Pad event roll's length to given length Parameters ---------- event_roll: np.ndarray, shape=(m,k) Event roll length : int Length to be padded Returns ------- event_roll: np.ndarray, shape=(m,k) Padded event roll
[ "Pad", "event", "roll", "s", "length", "to", "given", "length" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/util/event_roll.py#L74-L96
-1
251,768
TUT-ARG/sed_eval
sed_eval/util/event_roll.py
match_event_roll_lengths
def match_event_roll_lengths(event_roll_a, event_roll_b, length=None): """Fix the length of two event rolls Parameters ---------- event_roll_a: np.ndarray, shape=(m1,k) Event roll A event_roll_b: np.ndarray, shape=(m2,k) Event roll B length: int, optional Length of the event roll, if none given, shorter event roll is padded to match longer one. Returns ------- event_roll_a: np.ndarray, shape=(max(m1,m2),k) Padded event roll A event_roll_b: np.ndarray, shape=(max(m1,m2),k) Padded event roll B """ # Fix durations of both event_rolls to be equal if length is None: length = max(event_roll_b.shape[0], event_roll_a.shape[0]) else: length = int(length) if length < event_roll_a.shape[0]: event_roll_a = event_roll_a[0:length, :] else: event_roll_a = pad_event_roll( event_roll=event_roll_a, length=length ) if length < event_roll_b.shape[0]: event_roll_b = event_roll_b[0:length, :] else: event_roll_b = pad_event_roll( event_roll=event_roll_b, length=length ) return event_roll_a, event_roll_b
python
def match_event_roll_lengths(event_roll_a, event_roll_b, length=None): """Fix the length of two event rolls Parameters ---------- event_roll_a: np.ndarray, shape=(m1,k) Event roll A event_roll_b: np.ndarray, shape=(m2,k) Event roll B length: int, optional Length of the event roll, if none given, shorter event roll is padded to match longer one. Returns ------- event_roll_a: np.ndarray, shape=(max(m1,m2),k) Padded event roll A event_roll_b: np.ndarray, shape=(max(m1,m2),k) Padded event roll B """ # Fix durations of both event_rolls to be equal if length is None: length = max(event_roll_b.shape[0], event_roll_a.shape[0]) else: length = int(length) if length < event_roll_a.shape[0]: event_roll_a = event_roll_a[0:length, :] else: event_roll_a = pad_event_roll( event_roll=event_roll_a, length=length ) if length < event_roll_b.shape[0]: event_roll_b = event_roll_b[0:length, :] else: event_roll_b = pad_event_roll( event_roll=event_roll_b, length=length ) return event_roll_a, event_roll_b
[ "def", "match_event_roll_lengths", "(", "event_roll_a", ",", "event_roll_b", ",", "length", "=", "None", ")", ":", "# Fix durations of both event_rolls to be equal", "if", "length", "is", "None", ":", "length", "=", "max", "(", "event_roll_b", ".", "shape", "[", "0", "]", ",", "event_roll_a", ".", "shape", "[", "0", "]", ")", "else", ":", "length", "=", "int", "(", "length", ")", "if", "length", "<", "event_roll_a", ".", "shape", "[", "0", "]", ":", "event_roll_a", "=", "event_roll_a", "[", "0", ":", "length", ",", ":", "]", "else", ":", "event_roll_a", "=", "pad_event_roll", "(", "event_roll", "=", "event_roll_a", ",", "length", "=", "length", ")", "if", "length", "<", "event_roll_b", ".", "shape", "[", "0", "]", ":", "event_roll_b", "=", "event_roll_b", "[", "0", ":", "length", ",", ":", "]", "else", ":", "event_roll_b", "=", "pad_event_roll", "(", "event_roll", "=", "event_roll_b", ",", "length", "=", "length", ")", "return", "event_roll_a", ",", "event_roll_b" ]
Fix the length of two event rolls Parameters ---------- event_roll_a: np.ndarray, shape=(m1,k) Event roll A event_roll_b: np.ndarray, shape=(m2,k) Event roll B length: int, optional Length of the event roll, if none given, shorter event roll is padded to match longer one. Returns ------- event_roll_a: np.ndarray, shape=(max(m1,m2),k) Padded event roll A event_roll_b: np.ndarray, shape=(max(m1,m2),k) Padded event roll B
[ "Fix", "the", "length", "of", "two", "event", "rolls" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/util/event_roll.py#L99-L148
-1
251,769
TUT-ARG/sed_eval
sed_eval/util/event_list.py
filter_event_list
def filter_event_list(event_list, scene_label=None, event_label=None, filename=None): """Filter event list based on given fields Parameters ---------- event_list : list, shape=(n,) A list containing event dicts scene_label : str Scene label event_label : str Event label filename : str Filename Returns ------- event_list: list, shape=(n,) A list containing event dicts """ return dcase_util.containers.MetaDataContainer(event_list).filter( filename=filename, scene_label=scene_label, event_label=event_label )
python
def filter_event_list(event_list, scene_label=None, event_label=None, filename=None): """Filter event list based on given fields Parameters ---------- event_list : list, shape=(n,) A list containing event dicts scene_label : str Scene label event_label : str Event label filename : str Filename Returns ------- event_list: list, shape=(n,) A list containing event dicts """ return dcase_util.containers.MetaDataContainer(event_list).filter( filename=filename, scene_label=scene_label, event_label=event_label )
[ "def", "filter_event_list", "(", "event_list", ",", "scene_label", "=", "None", ",", "event_label", "=", "None", ",", "filename", "=", "None", ")", ":", "return", "dcase_util", ".", "containers", ".", "MetaDataContainer", "(", "event_list", ")", ".", "filter", "(", "filename", "=", "filename", ",", "scene_label", "=", "scene_label", ",", "event_label", "=", "event_label", ")" ]
Filter event list based on given fields Parameters ---------- event_list : list, shape=(n,) A list containing event dicts scene_label : str Scene label event_label : str Event label filename : str Filename Returns ------- event_list: list, shape=(n,) A list containing event dicts
[ "Filter", "event", "list", "based", "on", "given", "fields" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/util/event_list.py#L15-L43
-1
251,770
TUT-ARG/sed_eval
sed_eval/util/event_list.py
unique_files
def unique_files(event_list): """Find the unique files Parameters ---------- event_list : list or dcase_util.containers.MetaDataContainer A list containing event dicts Returns ------- list Unique filenames in alphabetical order """ if isinstance(event_list, dcase_util.containers.MetaDataContainer): return event_list.unique_files else: files = {} for event in event_list: if 'file' in event: files[event['file']] = event['file'] elif 'filename' in event: files[event['filename']] = event['filename'] files = list(files.keys()) files.sort() return files
python
def unique_files(event_list): """Find the unique files Parameters ---------- event_list : list or dcase_util.containers.MetaDataContainer A list containing event dicts Returns ------- list Unique filenames in alphabetical order """ if isinstance(event_list, dcase_util.containers.MetaDataContainer): return event_list.unique_files else: files = {} for event in event_list: if 'file' in event: files[event['file']] = event['file'] elif 'filename' in event: files[event['filename']] = event['filename'] files = list(files.keys()) files.sort() return files
[ "def", "unique_files", "(", "event_list", ")", ":", "if", "isinstance", "(", "event_list", ",", "dcase_util", ".", "containers", ".", "MetaDataContainer", ")", ":", "return", "event_list", ".", "unique_files", "else", ":", "files", "=", "{", "}", "for", "event", "in", "event_list", ":", "if", "'file'", "in", "event", ":", "files", "[", "event", "[", "'file'", "]", "]", "=", "event", "[", "'file'", "]", "elif", "'filename'", "in", "event", ":", "files", "[", "event", "[", "'filename'", "]", "]", "=", "event", "[", "'filename'", "]", "files", "=", "list", "(", "files", ".", "keys", "(", ")", ")", "files", ".", "sort", "(", ")", "return", "files" ]
Find the unique files Parameters ---------- event_list : list or dcase_util.containers.MetaDataContainer A list containing event dicts Returns ------- list Unique filenames in alphabetical order
[ "Find", "the", "unique", "files" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/util/event_list.py#L46-L75
-1
251,771
TUT-ARG/sed_eval
sed_eval/util/event_list.py
unique_event_labels
def unique_event_labels(event_list): """Find the unique event labels Parameters ---------- event_list : list or dcase_util.containers.MetaDataContainer A list containing event dicts Returns ------- list Unique labels in alphabetical order """ if isinstance(event_list, dcase_util.containers.MetaDataContainer): return event_list.unique_event_labels else: labels = [] for event in event_list: if 'event_label' in event and event['event_label'] not in labels: labels.append(event['event_label']) labels.sort() return labels
python
def unique_event_labels(event_list): """Find the unique event labels Parameters ---------- event_list : list or dcase_util.containers.MetaDataContainer A list containing event dicts Returns ------- list Unique labels in alphabetical order """ if isinstance(event_list, dcase_util.containers.MetaDataContainer): return event_list.unique_event_labels else: labels = [] for event in event_list: if 'event_label' in event and event['event_label'] not in labels: labels.append(event['event_label']) labels.sort() return labels
[ "def", "unique_event_labels", "(", "event_list", ")", ":", "if", "isinstance", "(", "event_list", ",", "dcase_util", ".", "containers", ".", "MetaDataContainer", ")", ":", "return", "event_list", ".", "unique_event_labels", "else", ":", "labels", "=", "[", "]", "for", "event", "in", "event_list", ":", "if", "'event_label'", "in", "event", "and", "event", "[", "'event_label'", "]", "not", "in", "labels", ":", "labels", ".", "append", "(", "event", "[", "'event_label'", "]", ")", "labels", ".", "sort", "(", ")", "return", "labels" ]
Find the unique event labels Parameters ---------- event_list : list or dcase_util.containers.MetaDataContainer A list containing event dicts Returns ------- list Unique labels in alphabetical order
[ "Find", "the", "unique", "event", "labels" ]
0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/util/event_list.py#L78-L103
-1
251,772
rasguanabana/ytfs
ytfs/actions.py
YTActions.__getChannelId
def __getChannelId(self): """ Obtain channel id for channel name, if present in ``self.search_params``. """ if not self.search_params.get("channelId"): return api_fixed_url = "https://www.googleapis.com/youtube/v3/channels?part=id&maxResults=1&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "forUsername": self.search_params["channelId"]}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id'] return # got it except IndexError: pass # try searching now... api_fixed_url = "https://www.googleapis.com/youtube/v3/search?part=snippet&type=channel&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "q": self.search_params['channelId']}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id']['channelId'] except IndexError: del self.search_params["channelId"]
python
def __getChannelId(self): """ Obtain channel id for channel name, if present in ``self.search_params``. """ if not self.search_params.get("channelId"): return api_fixed_url = "https://www.googleapis.com/youtube/v3/channels?part=id&maxResults=1&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "forUsername": self.search_params["channelId"]}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id'] return # got it except IndexError: pass # try searching now... api_fixed_url = "https://www.googleapis.com/youtube/v3/search?part=snippet&type=channel&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "q": self.search_params['channelId']}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id']['channelId'] except IndexError: del self.search_params["channelId"]
[ "def", "__getChannelId", "(", "self", ")", ":", "if", "not", "self", ".", "search_params", ".", "get", "(", "\"channelId\"", ")", ":", "return", "api_fixed_url", "=", "\"https://www.googleapis.com/youtube/v3/channels?part=id&maxResults=1&fields=items%2Fid&\"", "url", "=", "api_fixed_url", "+", "urlencode", "(", "{", "\"key\"", ":", "self", ".", "api_key", ",", "\"forUsername\"", ":", "self", ".", "search_params", "[", "\"channelId\"", "]", "}", ")", "get", "=", "requests", ".", "get", "(", "url", ")", ".", "json", "(", ")", "try", ":", "self", ".", "search_params", "[", "\"channelId\"", "]", "=", "get", "[", "'items'", "]", "[", "0", "]", "[", "'id'", "]", "return", "# got it", "except", "IndexError", ":", "pass", "# try searching now...", "api_fixed_url", "=", "\"https://www.googleapis.com/youtube/v3/search?part=snippet&type=channel&fields=items%2Fid&\"", "url", "=", "api_fixed_url", "+", "urlencode", "(", "{", "\"key\"", ":", "self", ".", "api_key", ",", "\"q\"", ":", "self", ".", "search_params", "[", "'channelId'", "]", "}", ")", "get", "=", "requests", ".", "get", "(", "url", ")", ".", "json", "(", ")", "try", ":", "self", ".", "search_params", "[", "\"channelId\"", "]", "=", "get", "[", "'items'", "]", "[", "0", "]", "[", "'id'", "]", "[", "'channelId'", "]", "except", "IndexError", ":", "del", "self", ".", "search_params", "[", "\"channelId\"", "]" ]
Obtain channel id for channel name, if present in ``self.search_params``.
[ "Obtain", "channel", "id", "for", "channel", "name", "if", "present", "in", "self", ".", "search_params", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/actions.py#L113-L141
-1
251,773
rasguanabana/ytfs
ytfs/actions.py
YTActions.__searchParser
def __searchParser(self, query): """ Parse `query` for advanced search options. Parameters ---------- query : str Search query to parse. Besides a search query, user can specify additional search parameters and YTFS specific options. Syntax: Additional search parameters: ``option:value``. if `value` contains spaces, then surround it with parentheses; available parameters: `channel`, `max`, `before`, `after`, `order`. YTFS options: specify options between ``[`` and ``]``; Available options: `a`, `v`, `f`, `P`, `s`, `m`. If an option takes a parameter, then specify it beetween parentheses. Examples: ``channel:foo search query``, ``my favourite music [a]``, ``channel:(the famous funny cats channel) [vf(240)P] funny cats max:20``. Invalid parameters/options are ignored. Returns ------- params : tuple Tuple: 0 - dictionary of url GET parameters; 1 - dictionary of YTStor options. """ ret = dict() parse_params = True buf = "" ptr = "" p_avail = ("channel", "max", "before", "after", "order") opts = dict() par_open = False translate = { 'a': 'audio', 'v': 'video', 'f': 'format', 's': 'stream', 'P': 'stream', 'm': 'metadata', 'max': 'maxResults', 'channel': 'channelId', 'before': 'publishedBefore', 'after': 'publishedAfter', 'order': 'order', '': 'q' } for i in query+' ': if parse_params: if not par_open: if i == ' ': # flush buf try: if ret.get(translate[ptr]): ret[ translate[ptr] ] += ' ' else: ret[ translate[ptr] ] = '' ret[ translate[ptr] ] += buf except KeyError: pass ptr = "" buf = "" elif i == ':' and buf in p_avail: ptr = buf buf = "" elif not buf and i == '[': # buf must be empty parse_params = False ptr = "" elif i != '(': buf += i elif not (par_open == 1 and i == ')'): buf += i if i == '(': par_open += 1 if par_open > 0 and i == ')': par_open -= 1 else: if i == ']': parse_params = True par_open = False ptr = "" buf = "" elif ptr and not par_open and i == '(': par_open = True elif par_open: if i == ')': try: opts[ translate[ptr] ] = buf except KeyError: pass par_open = False buf = "" else: buf += i elif i.isalpha(): ptr = i try: opts[ translate[ptr] ] = not i.isupper() except KeyError: pass return (ret, opts)
python
def __searchParser(self, query): """ Parse `query` for advanced search options. Parameters ---------- query : str Search query to parse. Besides a search query, user can specify additional search parameters and YTFS specific options. Syntax: Additional search parameters: ``option:value``. if `value` contains spaces, then surround it with parentheses; available parameters: `channel`, `max`, `before`, `after`, `order`. YTFS options: specify options between ``[`` and ``]``; Available options: `a`, `v`, `f`, `P`, `s`, `m`. If an option takes a parameter, then specify it beetween parentheses. Examples: ``channel:foo search query``, ``my favourite music [a]``, ``channel:(the famous funny cats channel) [vf(240)P] funny cats max:20``. Invalid parameters/options are ignored. Returns ------- params : tuple Tuple: 0 - dictionary of url GET parameters; 1 - dictionary of YTStor options. """ ret = dict() parse_params = True buf = "" ptr = "" p_avail = ("channel", "max", "before", "after", "order") opts = dict() par_open = False translate = { 'a': 'audio', 'v': 'video', 'f': 'format', 's': 'stream', 'P': 'stream', 'm': 'metadata', 'max': 'maxResults', 'channel': 'channelId', 'before': 'publishedBefore', 'after': 'publishedAfter', 'order': 'order', '': 'q' } for i in query+' ': if parse_params: if not par_open: if i == ' ': # flush buf try: if ret.get(translate[ptr]): ret[ translate[ptr] ] += ' ' else: ret[ translate[ptr] ] = '' ret[ translate[ptr] ] += buf except KeyError: pass ptr = "" buf = "" elif i == ':' and buf in p_avail: ptr = buf buf = "" elif not buf and i == '[': # buf must be empty parse_params = False ptr = "" elif i != '(': buf += i elif not (par_open == 1 and i == ')'): buf += i if i == '(': par_open += 1 if par_open > 0 and i == ')': par_open -= 1 else: if i == ']': parse_params = True par_open = False ptr = "" buf = "" elif ptr and not par_open and i == '(': par_open = True elif par_open: if i == ')': try: opts[ translate[ptr] ] = buf except KeyError: pass par_open = False buf = "" else: buf += i elif i.isalpha(): ptr = i try: opts[ translate[ptr] ] = not i.isupper() except KeyError: pass return (ret, opts)
[ "def", "__searchParser", "(", "self", ",", "query", ")", ":", "ret", "=", "dict", "(", ")", "parse_params", "=", "True", "buf", "=", "\"\"", "ptr", "=", "\"\"", "p_avail", "=", "(", "\"channel\"", ",", "\"max\"", ",", "\"before\"", ",", "\"after\"", ",", "\"order\"", ")", "opts", "=", "dict", "(", ")", "par_open", "=", "False", "translate", "=", "{", "'a'", ":", "'audio'", ",", "'v'", ":", "'video'", ",", "'f'", ":", "'format'", ",", "'s'", ":", "'stream'", ",", "'P'", ":", "'stream'", ",", "'m'", ":", "'metadata'", ",", "'max'", ":", "'maxResults'", ",", "'channel'", ":", "'channelId'", ",", "'before'", ":", "'publishedBefore'", ",", "'after'", ":", "'publishedAfter'", ",", "'order'", ":", "'order'", ",", "''", ":", "'q'", "}", "for", "i", "in", "query", "+", "' '", ":", "if", "parse_params", ":", "if", "not", "par_open", ":", "if", "i", "==", "' '", ":", "# flush buf", "try", ":", "if", "ret", ".", "get", "(", "translate", "[", "ptr", "]", ")", ":", "ret", "[", "translate", "[", "ptr", "]", "]", "+=", "' '", "else", ":", "ret", "[", "translate", "[", "ptr", "]", "]", "=", "''", "ret", "[", "translate", "[", "ptr", "]", "]", "+=", "buf", "except", "KeyError", ":", "pass", "ptr", "=", "\"\"", "buf", "=", "\"\"", "elif", "i", "==", "':'", "and", "buf", "in", "p_avail", ":", "ptr", "=", "buf", "buf", "=", "\"\"", "elif", "not", "buf", "and", "i", "==", "'['", ":", "# buf must be empty", "parse_params", "=", "False", "ptr", "=", "\"\"", "elif", "i", "!=", "'('", ":", "buf", "+=", "i", "elif", "not", "(", "par_open", "==", "1", "and", "i", "==", "')'", ")", ":", "buf", "+=", "i", "if", "i", "==", "'('", ":", "par_open", "+=", "1", "if", "par_open", ">", "0", "and", "i", "==", "')'", ":", "par_open", "-=", "1", "else", ":", "if", "i", "==", "']'", ":", "parse_params", "=", "True", "par_open", "=", "False", "ptr", "=", "\"\"", "buf", "=", "\"\"", "elif", "ptr", "and", "not", "par_open", "and", "i", "==", "'('", ":", "par_open", "=", "True", "elif", "par_open", ":", "if", "i", "==", "')'", ":", "try", ":", "opts", "[", "translate", "[", "ptr", "]", "]", "=", "buf", "except", "KeyError", ":", "pass", "par_open", "=", "False", "buf", "=", "\"\"", "else", ":", "buf", "+=", "i", "elif", "i", ".", "isalpha", "(", ")", ":", "ptr", "=", "i", "try", ":", "opts", "[", "translate", "[", "ptr", "]", "]", "=", "not", "i", ".", "isupper", "(", ")", "except", "KeyError", ":", "pass", "return", "(", "ret", ",", "opts", ")" ]
Parse `query` for advanced search options. Parameters ---------- query : str Search query to parse. Besides a search query, user can specify additional search parameters and YTFS specific options. Syntax: Additional search parameters: ``option:value``. if `value` contains spaces, then surround it with parentheses; available parameters: `channel`, `max`, `before`, `after`, `order`. YTFS options: specify options between ``[`` and ``]``; Available options: `a`, `v`, `f`, `P`, `s`, `m`. If an option takes a parameter, then specify it beetween parentheses. Examples: ``channel:foo search query``, ``my favourite music [a]``, ``channel:(the famous funny cats channel) [vf(240)P] funny cats max:20``. Invalid parameters/options are ignored. Returns ------- params : tuple Tuple: 0 - dictionary of url GET parameters; 1 - dictionary of YTStor options.
[ "Parse", "query", "for", "advanced", "search", "options", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/actions.py#L143-L274
-1
251,774
rasguanabana/ytfs
ytfs/actions.py
YTActions.__search
def __search(self, pt=""): """ Method responsible for searching using YouTube API. Parameters ---------- pt : str Token of search results page. If ``None``, then the first page is downloaded. Returns ------- results : dict Parsed JSON returned by YouTube API. """ if not self.search_params.get('q') and not self.search_params.get('channelId'): return {'items': []} # no valid query - no results. api_fixed_url = "https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&fields=items(id%2Ckind%2Csnippet)%2CnextPageToken%2CprevPageToken&" d = {"key": self.api_key, "pageToken": pt} d.update(self.search_params) url = api_fixed_url + urlencode(d) try: get = requests.get(url) except requests.exceptions.ConnectionError: raise ConnectionError if get.status_code != 200: return {'items': []} # no valid query - no results. return get.json()
python
def __search(self, pt=""): """ Method responsible for searching using YouTube API. Parameters ---------- pt : str Token of search results page. If ``None``, then the first page is downloaded. Returns ------- results : dict Parsed JSON returned by YouTube API. """ if not self.search_params.get('q') and not self.search_params.get('channelId'): return {'items': []} # no valid query - no results. api_fixed_url = "https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&fields=items(id%2Ckind%2Csnippet)%2CnextPageToken%2CprevPageToken&" d = {"key": self.api_key, "pageToken": pt} d.update(self.search_params) url = api_fixed_url + urlencode(d) try: get = requests.get(url) except requests.exceptions.ConnectionError: raise ConnectionError if get.status_code != 200: return {'items': []} # no valid query - no results. return get.json()
[ "def", "__search", "(", "self", ",", "pt", "=", "\"\"", ")", ":", "if", "not", "self", ".", "search_params", ".", "get", "(", "'q'", ")", "and", "not", "self", ".", "search_params", ".", "get", "(", "'channelId'", ")", ":", "return", "{", "'items'", ":", "[", "]", "}", "# no valid query - no results.", "api_fixed_url", "=", "\"https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&fields=items(id%2Ckind%2Csnippet)%2CnextPageToken%2CprevPageToken&\"", "d", "=", "{", "\"key\"", ":", "self", ".", "api_key", ",", "\"pageToken\"", ":", "pt", "}", "d", ".", "update", "(", "self", ".", "search_params", ")", "url", "=", "api_fixed_url", "+", "urlencode", "(", "d", ")", "try", ":", "get", "=", "requests", ".", "get", "(", "url", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "raise", "ConnectionError", "if", "get", ".", "status_code", "!=", "200", ":", "return", "{", "'items'", ":", "[", "]", "}", "# no valid query - no results.", "return", "get", ".", "json", "(", ")" ]
Method responsible for searching using YouTube API. Parameters ---------- pt : str Token of search results page. If ``None``, then the first page is downloaded. Returns ------- results : dict Parsed JSON returned by YouTube API.
[ "Method", "responsible", "for", "searching", "using", "YouTube", "API", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/actions.py#L276-L309
-1
251,775
rasguanabana/ytfs
ytfs/actions.py
YTActions.clean
def clean(self): """Clear the data. For each ``YTStor`` object present in this object ``clean`` method is executed.""" for s in self.visible_files.values(): s.clean() for s in [sub[1][x] for sub in self.avail_files.values() for x in sub[1]]: # Double list comprehensions aren't very s.clean()
python
def clean(self): """Clear the data. For each ``YTStor`` object present in this object ``clean`` method is executed.""" for s in self.visible_files.values(): s.clean() for s in [sub[1][x] for sub in self.avail_files.values() for x in sub[1]]: # Double list comprehensions aren't very s.clean()
[ "def", "clean", "(", "self", ")", ":", "for", "s", "in", "self", ".", "visible_files", ".", "values", "(", ")", ":", "s", ".", "clean", "(", ")", "for", "s", "in", "[", "sub", "[", "1", "]", "[", "x", "]", "for", "sub", "in", "self", ".", "avail_files", ".", "values", "(", ")", "for", "x", "in", "sub", "[", "1", "]", "]", ":", "# Double list comprehensions aren't very", "s", ".", "clean", "(", ")" ]
Clear the data. For each ``YTStor`` object present in this object ``clean`` method is executed.
[ "Clear", "the", "data", ".", "For", "each", "YTStor", "object", "present", "in", "this", "object", "clean", "method", "is", "executed", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/actions.py#L477-L484
-1
251,776
rasguanabana/ytfs
ytfs/stor.py
YTStor.obtainInfo
def obtainInfo(self): """ Method for obtaining information about the movie. """ try: info = self.ytdl.extract_info(self.yid, download=False) except youtube_dl.utils.DownloadError: raise ConnectionError if not self.preferences['stream']: self.url = (info['requested_formats'][0]['url'], info['requested_formats'][1]['url']) return True # else: for f in info['formats']: if 'filesize' not in f or not f['filesize']: f['filesize'] = float('inf') # next line won't fail, infinity, because unknown filesize is the least preferred # - for easy sorting - we'll get best quality and lowest filsize aud = {(-int(f['abr']), f['filesize'], f['url']) for f in info['formats'] if f.get('abr') and not f.get('height')} vid = {(-int(f['height']), f['filesize'], f['url']) for f in info['formats'] if not f.get('abr') and f.get('height')} full= {(-int(f['height']), f['filesize'], f['url']) for f in info['formats'] if f.get('abr') and f.get('height')} try: _f = int( self.preferences.get('format') ) # if valid format is present, then choose closes value _k = lambda x: abs(x[0] + _f) # +, because x[0] is negative except (ValueError, TypeError): _k = lambda d: d if self.preferences['audio'] and self.preferences['video']: fm = sorted(full, key=_k) elif self.preferences['audio']: fm = sorted(aud, key=_k) elif self.preferences['video']: fm = sorted(vid, key=_k) filesize = 0 i = -1 try: while filesize == 0: # some videos are problematic, we will try to find format with non-zero filesize i += 1 self.url = fm[i][2] if fm[i][1] == float('inf'): filesize = int(self.r_session.head(self.url).headers['content-length']) else: filesize = int(fm[i][1]) except IndexError: # finding filesize failed for every format self.url = (info['requested_formats'][0]['url'], info['requested_formats'][1]['url']) self.preferences['stream'] = False # hopefully non-stream download will work return True self.filesize = filesize return True
python
def obtainInfo(self): """ Method for obtaining information about the movie. """ try: info = self.ytdl.extract_info(self.yid, download=False) except youtube_dl.utils.DownloadError: raise ConnectionError if not self.preferences['stream']: self.url = (info['requested_formats'][0]['url'], info['requested_formats'][1]['url']) return True # else: for f in info['formats']: if 'filesize' not in f or not f['filesize']: f['filesize'] = float('inf') # next line won't fail, infinity, because unknown filesize is the least preferred # - for easy sorting - we'll get best quality and lowest filsize aud = {(-int(f['abr']), f['filesize'], f['url']) for f in info['formats'] if f.get('abr') and not f.get('height')} vid = {(-int(f['height']), f['filesize'], f['url']) for f in info['formats'] if not f.get('abr') and f.get('height')} full= {(-int(f['height']), f['filesize'], f['url']) for f in info['formats'] if f.get('abr') and f.get('height')} try: _f = int( self.preferences.get('format') ) # if valid format is present, then choose closes value _k = lambda x: abs(x[0] + _f) # +, because x[0] is negative except (ValueError, TypeError): _k = lambda d: d if self.preferences['audio'] and self.preferences['video']: fm = sorted(full, key=_k) elif self.preferences['audio']: fm = sorted(aud, key=_k) elif self.preferences['video']: fm = sorted(vid, key=_k) filesize = 0 i = -1 try: while filesize == 0: # some videos are problematic, we will try to find format with non-zero filesize i += 1 self.url = fm[i][2] if fm[i][1] == float('inf'): filesize = int(self.r_session.head(self.url).headers['content-length']) else: filesize = int(fm[i][1]) except IndexError: # finding filesize failed for every format self.url = (info['requested_formats'][0]['url'], info['requested_formats'][1]['url']) self.preferences['stream'] = False # hopefully non-stream download will work return True self.filesize = filesize return True
[ "def", "obtainInfo", "(", "self", ")", ":", "try", ":", "info", "=", "self", ".", "ytdl", ".", "extract_info", "(", "self", ".", "yid", ",", "download", "=", "False", ")", "except", "youtube_dl", ".", "utils", ".", "DownloadError", ":", "raise", "ConnectionError", "if", "not", "self", ".", "preferences", "[", "'stream'", "]", ":", "self", ".", "url", "=", "(", "info", "[", "'requested_formats'", "]", "[", "0", "]", "[", "'url'", "]", ",", "info", "[", "'requested_formats'", "]", "[", "1", "]", "[", "'url'", "]", ")", "return", "True", "# else:", "for", "f", "in", "info", "[", "'formats'", "]", ":", "if", "'filesize'", "not", "in", "f", "or", "not", "f", "[", "'filesize'", "]", ":", "f", "[", "'filesize'", "]", "=", "float", "(", "'inf'", ")", "# next line won't fail, infinity, because unknown filesize is the least preferred", "# - for easy sorting - we'll get best quality and lowest filsize", "aud", "=", "{", "(", "-", "int", "(", "f", "[", "'abr'", "]", ")", ",", "f", "[", "'filesize'", "]", ",", "f", "[", "'url'", "]", ")", "for", "f", "in", "info", "[", "'formats'", "]", "if", "f", ".", "get", "(", "'abr'", ")", "and", "not", "f", ".", "get", "(", "'height'", ")", "}", "vid", "=", "{", "(", "-", "int", "(", "f", "[", "'height'", "]", ")", ",", "f", "[", "'filesize'", "]", ",", "f", "[", "'url'", "]", ")", "for", "f", "in", "info", "[", "'formats'", "]", "if", "not", "f", ".", "get", "(", "'abr'", ")", "and", "f", ".", "get", "(", "'height'", ")", "}", "full", "=", "{", "(", "-", "int", "(", "f", "[", "'height'", "]", ")", ",", "f", "[", "'filesize'", "]", ",", "f", "[", "'url'", "]", ")", "for", "f", "in", "info", "[", "'formats'", "]", "if", "f", ".", "get", "(", "'abr'", ")", "and", "f", ".", "get", "(", "'height'", ")", "}", "try", ":", "_f", "=", "int", "(", "self", ".", "preferences", ".", "get", "(", "'format'", ")", ")", "# if valid format is present, then choose closes value", "_k", "=", "lambda", "x", ":", "abs", "(", "x", "[", "0", "]", "+", "_f", ")", "# +, because x[0] is negative", "except", "(", "ValueError", ",", "TypeError", ")", ":", "_k", "=", "lambda", "d", ":", "d", "if", "self", ".", "preferences", "[", "'audio'", "]", "and", "self", ".", "preferences", "[", "'video'", "]", ":", "fm", "=", "sorted", "(", "full", ",", "key", "=", "_k", ")", "elif", "self", ".", "preferences", "[", "'audio'", "]", ":", "fm", "=", "sorted", "(", "aud", ",", "key", "=", "_k", ")", "elif", "self", ".", "preferences", "[", "'video'", "]", ":", "fm", "=", "sorted", "(", "vid", ",", "key", "=", "_k", ")", "filesize", "=", "0", "i", "=", "-", "1", "try", ":", "while", "filesize", "==", "0", ":", "# some videos are problematic, we will try to find format with non-zero filesize", "i", "+=", "1", "self", ".", "url", "=", "fm", "[", "i", "]", "[", "2", "]", "if", "fm", "[", "i", "]", "[", "1", "]", "==", "float", "(", "'inf'", ")", ":", "filesize", "=", "int", "(", "self", ".", "r_session", ".", "head", "(", "self", ".", "url", ")", ".", "headers", "[", "'content-length'", "]", ")", "else", ":", "filesize", "=", "int", "(", "fm", "[", "i", "]", "[", "1", "]", ")", "except", "IndexError", ":", "# finding filesize failed for every format", "self", ".", "url", "=", "(", "info", "[", "'requested_formats'", "]", "[", "0", "]", "[", "'url'", "]", ",", "info", "[", "'requested_formats'", "]", "[", "1", "]", "[", "'url'", "]", ")", "self", ".", "preferences", "[", "'stream'", "]", "=", "False", "# hopefully non-stream download will work", "return", "True", "self", ".", "filesize", "=", "filesize", "return", "True" ]
Method for obtaining information about the movie.
[ "Method", "for", "obtaining", "information", "about", "the", "movie", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/stor.py#L205-L264
-1
251,777
rasguanabana/ytfs
ytfs/stor.py
YTStor.registerHandler
def registerHandler(self, fh): # Do I even need that? possible FIXME. """ Register new file descriptor. Parameters ---------- fh : int File descriptor. """ self.fds.add(fh) self.atime = int(time()) # update access time self.lock.acquire() try: if (0, self.filesize) not in self.avail and self.preferences['stream'] is False: Downloader.fetch(self, None, fh) # lock forces other threads to wait, so fetch will perform just once. except requests.exceptions.ConnectionError: raise ConnectionError finally: self.lock.release()
python
def registerHandler(self, fh): # Do I even need that? possible FIXME. """ Register new file descriptor. Parameters ---------- fh : int File descriptor. """ self.fds.add(fh) self.atime = int(time()) # update access time self.lock.acquire() try: if (0, self.filesize) not in self.avail and self.preferences['stream'] is False: Downloader.fetch(self, None, fh) # lock forces other threads to wait, so fetch will perform just once. except requests.exceptions.ConnectionError: raise ConnectionError finally: self.lock.release()
[ "def", "registerHandler", "(", "self", ",", "fh", ")", ":", "# Do I even need that? possible FIXME.", "self", ".", "fds", ".", "add", "(", "fh", ")", "self", ".", "atime", "=", "int", "(", "time", "(", ")", ")", "# update access time", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "if", "(", "0", ",", "self", ".", "filesize", ")", "not", "in", "self", ".", "avail", "and", "self", ".", "preferences", "[", "'stream'", "]", "is", "False", ":", "Downloader", ".", "fetch", "(", "self", ",", "None", ",", "fh", ")", "# lock forces other threads to wait, so fetch will perform just once.", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "raise", "ConnectionError", "finally", ":", "self", ".", "lock", ".", "release", "(", ")" ]
Register new file descriptor. Parameters ---------- fh : int File descriptor.
[ "Register", "new", "file", "descriptor", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/stor.py#L266-L290
-1
251,778
rasguanabana/ytfs
ytfs/stor.py
YTStor.read
def read(self, offset, length, fh): """ Read data. Method returns data instantly, if they're avaialable and in ``self.safe_range``. Otherwise data is downloaded and then returned. Parameters ---------- offset : int Read offset length : int Length of data to read. fh : int File descriptor. """ current = (offset, offset + length) safe = [ current[0] - ( 8 * length ), current[1] + ( 16 * length ) ] if safe[0] < 0: safe[0] = 0 if safe[1] > self.filesize: safe[1] = self.filesize safe = tuple(safe) self.lock.acquire() try: dl = range_t({safe}) - self.avail for r in dl.toset(): try: Downloader.fetch(self, r, fh) # download is, let's say, atomic thanks to lock except requests.exceptions.ConnectionError: raise ConnectionError if self.disk + 1 < len(self.avail): self.data.rollover() self.disk += 1 finally: self.lock.release() self.data.seek(offset) return self.data.read(length)
python
def read(self, offset, length, fh): """ Read data. Method returns data instantly, if they're avaialable and in ``self.safe_range``. Otherwise data is downloaded and then returned. Parameters ---------- offset : int Read offset length : int Length of data to read. fh : int File descriptor. """ current = (offset, offset + length) safe = [ current[0] - ( 8 * length ), current[1] + ( 16 * length ) ] if safe[0] < 0: safe[0] = 0 if safe[1] > self.filesize: safe[1] = self.filesize safe = tuple(safe) self.lock.acquire() try: dl = range_t({safe}) - self.avail for r in dl.toset(): try: Downloader.fetch(self, r, fh) # download is, let's say, atomic thanks to lock except requests.exceptions.ConnectionError: raise ConnectionError if self.disk + 1 < len(self.avail): self.data.rollover() self.disk += 1 finally: self.lock.release() self.data.seek(offset) return self.data.read(length)
[ "def", "read", "(", "self", ",", "offset", ",", "length", ",", "fh", ")", ":", "current", "=", "(", "offset", ",", "offset", "+", "length", ")", "safe", "=", "[", "current", "[", "0", "]", "-", "(", "8", "*", "length", ")", ",", "current", "[", "1", "]", "+", "(", "16", "*", "length", ")", "]", "if", "safe", "[", "0", "]", "<", "0", ":", "safe", "[", "0", "]", "=", "0", "if", "safe", "[", "1", "]", ">", "self", ".", "filesize", ":", "safe", "[", "1", "]", "=", "self", ".", "filesize", "safe", "=", "tuple", "(", "safe", ")", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "dl", "=", "range_t", "(", "{", "safe", "}", ")", "-", "self", ".", "avail", "for", "r", "in", "dl", ".", "toset", "(", ")", ":", "try", ":", "Downloader", ".", "fetch", "(", "self", ",", "r", ",", "fh", ")", "# download is, let's say, atomic thanks to lock", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "raise", "ConnectionError", "if", "self", ".", "disk", "+", "1", "<", "len", "(", "self", ".", "avail", ")", ":", "self", ".", "data", ".", "rollover", "(", ")", "self", ".", "disk", "+=", "1", "finally", ":", "self", ".", "lock", ".", "release", "(", ")", "self", ".", "data", ".", "seek", "(", "offset", ")", "return", "self", ".", "data", ".", "read", "(", "length", ")" ]
Read data. Method returns data instantly, if they're avaialable and in ``self.safe_range``. Otherwise data is downloaded and then returned. Parameters ---------- offset : int Read offset length : int Length of data to read. fh : int File descriptor.
[ "Read", "data", ".", "Method", "returns", "data", "instantly", "if", "they", "re", "avaialable", "and", "in", "self", ".", "safe_range", ".", "Otherwise", "data", "is", "downloaded", "and", "then", "returned", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/stor.py#L292-L335
-1
251,779
rasguanabana/ytfs
ytfs/stor.py
YTStor.clean
def clean(self): """ Clear data. Explicitly close ``self.data`` if object is unused. """ self.closing = True # schedule for closing. if not self.fds: self.data.close()
python
def clean(self): """ Clear data. Explicitly close ``self.data`` if object is unused. """ self.closing = True # schedule for closing. if not self.fds: self.data.close()
[ "def", "clean", "(", "self", ")", ":", "self", ".", "closing", "=", "True", "# schedule for closing.", "if", "not", "self", ".", "fds", ":", "self", ".", "data", ".", "close", "(", ")" ]
Clear data. Explicitly close ``self.data`` if object is unused.
[ "Clear", "data", ".", "Explicitly", "close", "self", ".", "data", "if", "object", "is", "unused", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/stor.py#L337-L346
-1
251,780
rasguanabana/ytfs
ytfs/stor.py
YTStor.unregisterHandler
def unregisterHandler(self, fh): """ Unregister a file descriptor. Clean data, if such operation has been scheduled. Parameters ---------- fh : int File descriptor. """ try: self.fds.remove(fh) except KeyError: pass self.lock.acquire() try: self.data.rollover() # rollover data on close. finally: self.lock.release() if self.closing and not self.fds: self.data.close()
python
def unregisterHandler(self, fh): """ Unregister a file descriptor. Clean data, if such operation has been scheduled. Parameters ---------- fh : int File descriptor. """ try: self.fds.remove(fh) except KeyError: pass self.lock.acquire() try: self.data.rollover() # rollover data on close. finally: self.lock.release() if self.closing and not self.fds: self.data.close()
[ "def", "unregisterHandler", "(", "self", ",", "fh", ")", ":", "try", ":", "self", ".", "fds", ".", "remove", "(", "fh", ")", "except", "KeyError", ":", "pass", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "self", ".", "data", ".", "rollover", "(", ")", "# rollover data on close.", "finally", ":", "self", ".", "lock", ".", "release", "(", ")", "if", "self", ".", "closing", "and", "not", "self", ".", "fds", ":", "self", ".", "data", ".", "close", "(", ")" ]
Unregister a file descriptor. Clean data, if such operation has been scheduled. Parameters ---------- fh : int File descriptor.
[ "Unregister", "a", "file", "descriptor", ".", "Clean", "data", "if", "such", "operation", "has", "been", "scheduled", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/stor.py#L348-L374
-1
251,781
rasguanabana/ytfs
ytfs/ytfs.py
fd_dict.push
def push(self, yts): """ Search for, add and return new file descriptor. Parameters ---------- yts : YTStor-obj or None ``YTStor`` object for which we want to allocate a descriptor or ``None``, if we allocate descriptor for a control file. Returns ------- k : int File descriptor. """ if not isinstance(yts, (YTStor, YTMetaStor, type(None))): raise TypeError("Expected YTStor object, YTMetaStor object or None.") k = 0 while k in self.keys(): k += 1 self[k] = yts return k
python
def push(self, yts): """ Search for, add and return new file descriptor. Parameters ---------- yts : YTStor-obj or None ``YTStor`` object for which we want to allocate a descriptor or ``None``, if we allocate descriptor for a control file. Returns ------- k : int File descriptor. """ if not isinstance(yts, (YTStor, YTMetaStor, type(None))): raise TypeError("Expected YTStor object, YTMetaStor object or None.") k = 0 while k in self.keys(): k += 1 self[k] = yts return k
[ "def", "push", "(", "self", ",", "yts", ")", ":", "if", "not", "isinstance", "(", "yts", ",", "(", "YTStor", ",", "YTMetaStor", ",", "type", "(", "None", ")", ")", ")", ":", "raise", "TypeError", "(", "\"Expected YTStor object, YTMetaStor object or None.\"", ")", "k", "=", "0", "while", "k", "in", "self", ".", "keys", "(", ")", ":", "k", "+=", "1", "self", "[", "k", "]", "=", "yts", "return", "k" ]
Search for, add and return new file descriptor. Parameters ---------- yts : YTStor-obj or None ``YTStor`` object for which we want to allocate a descriptor or ``None``, if we allocate descriptor for a control file. Returns ------- k : int File descriptor.
[ "Search", "for", "add", "and", "return", "new", "file", "descriptor", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L70-L95
-1
251,782
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.__pathToTuple
def __pathToTuple(self, path): """ Convert directory or file path to its tuple identifier. Parameters ---------- path : str Path to convert. It can look like /, /directory, /directory/ or /directory/filename. Returns ------- tup_id : tuple Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename` will be ``None``. Raises ------ YTFS.PathConvertError When invalid path is given. """ if not path or path.count('/') > 2: raise YTFS.PathConvertError("Bad path given") # empty or too deep path try: split = path.split('/') except (AttributeError, TypeError): raise TypeError("Path has to be string") #path is not a string if split[0]: raise YTFS.PathConvertError("Path needs to start with '/'") # path doesn't start with '/'. del split[0] try: if not split[-1]: split.pop() # given path ended with '/'. except IndexError: raise YTFS.PathConvertError("Bad path given") # at least one element in split should exist at the moment if len(split) > 2: raise YTFS.PathConvertError("Path is too deep. Max allowed level is 2") # should happen due to first check, but ... try: d = split[0] except IndexError: d = None try: f = split[1] except IndexError: f = None if not d and f: raise YTFS.PathConvertError("Bad path given") # filename is present, but directory is not #sheeeeeeiiit return (d, f)
python
def __pathToTuple(self, path): """ Convert directory or file path to its tuple identifier. Parameters ---------- path : str Path to convert. It can look like /, /directory, /directory/ or /directory/filename. Returns ------- tup_id : tuple Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename` will be ``None``. Raises ------ YTFS.PathConvertError When invalid path is given. """ if not path or path.count('/') > 2: raise YTFS.PathConvertError("Bad path given") # empty or too deep path try: split = path.split('/') except (AttributeError, TypeError): raise TypeError("Path has to be string") #path is not a string if split[0]: raise YTFS.PathConvertError("Path needs to start with '/'") # path doesn't start with '/'. del split[0] try: if not split[-1]: split.pop() # given path ended with '/'. except IndexError: raise YTFS.PathConvertError("Bad path given") # at least one element in split should exist at the moment if len(split) > 2: raise YTFS.PathConvertError("Path is too deep. Max allowed level is 2") # should happen due to first check, but ... try: d = split[0] except IndexError: d = None try: f = split[1] except IndexError: f = None if not d and f: raise YTFS.PathConvertError("Bad path given") # filename is present, but directory is not #sheeeeeeiiit return (d, f)
[ "def", "__pathToTuple", "(", "self", ",", "path", ")", ":", "if", "not", "path", "or", "path", ".", "count", "(", "'/'", ")", ">", "2", ":", "raise", "YTFS", ".", "PathConvertError", "(", "\"Bad path given\"", ")", "# empty or too deep path", "try", ":", "split", "=", "path", ".", "split", "(", "'/'", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "raise", "TypeError", "(", "\"Path has to be string\"", ")", "#path is not a string", "if", "split", "[", "0", "]", ":", "raise", "YTFS", ".", "PathConvertError", "(", "\"Path needs to start with '/'\"", ")", "# path doesn't start with '/'.", "del", "split", "[", "0", "]", "try", ":", "if", "not", "split", "[", "-", "1", "]", ":", "split", ".", "pop", "(", ")", "# given path ended with '/'.", "except", "IndexError", ":", "raise", "YTFS", ".", "PathConvertError", "(", "\"Bad path given\"", ")", "# at least one element in split should exist at the moment", "if", "len", "(", "split", ")", ">", "2", ":", "raise", "YTFS", ".", "PathConvertError", "(", "\"Path is too deep. Max allowed level is 2\"", ")", "# should happen due to first check, but ...", "try", ":", "d", "=", "split", "[", "0", "]", "except", "IndexError", ":", "d", "=", "None", "try", ":", "f", "=", "split", "[", "1", "]", "except", "IndexError", ":", "f", "=", "None", "if", "not", "d", "and", "f", ":", "raise", "YTFS", ".", "PathConvertError", "(", "\"Bad path given\"", ")", "# filename is present, but directory is not #sheeeeeeiiit", "return", "(", "d", ",", "f", ")" ]
Convert directory or file path to its tuple identifier. Parameters ---------- path : str Path to convert. It can look like /, /directory, /directory/ or /directory/filename. Returns ------- tup_id : tuple Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename` will be ``None``. Raises ------ YTFS.PathConvertError When invalid path is given.
[ "Convert", "directory", "or", "file", "path", "to", "its", "tuple", "identifier", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L222-L277
-1
251,783
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.__exists
def __exists(self, p): """ Check if file of given path exists. Parameters ---------- p : str or tuple Path or tuple identifier. Returns ------- exists : bool ``True``, if file exists. Otherwise ``False``. """ try: p = self.__pathToTuple(p) except TypeError: pass return ((not p[0] and not p[1]) or (p[0] in self.searches and not p[1]) or (p[0] in self.searches and p[1] in self.searches[p[0]]))
python
def __exists(self, p): """ Check if file of given path exists. Parameters ---------- p : str or tuple Path or tuple identifier. Returns ------- exists : bool ``True``, if file exists. Otherwise ``False``. """ try: p = self.__pathToTuple(p) except TypeError: pass return ((not p[0] and not p[1]) or (p[0] in self.searches and not p[1]) or (p[0] in self.searches and p[1] in self.searches[p[0]]))
[ "def", "__exists", "(", "self", ",", "p", ")", ":", "try", ":", "p", "=", "self", ".", "__pathToTuple", "(", "p", ")", "except", "TypeError", ":", "pass", "return", "(", "(", "not", "p", "[", "0", "]", "and", "not", "p", "[", "1", "]", ")", "or", "(", "p", "[", "0", "]", "in", "self", ".", "searches", "and", "not", "p", "[", "1", "]", ")", "or", "(", "p", "[", "0", "]", "in", "self", ".", "searches", "and", "p", "[", "1", "]", "in", "self", ".", "searches", "[", "p", "[", "0", "]", "]", ")", ")" ]
Check if file of given path exists. Parameters ---------- p : str or tuple Path or tuple identifier. Returns ------- exists : bool ``True``, if file exists. Otherwise ``False``.
[ "Check", "if", "file", "of", "given", "path", "exists", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L279-L301
-1
251,784
rasguanabana/ytfs
ytfs/ytfs.py
YTFS._pathdec
def _pathdec(method): """ Decorator that replaces string `path` argument with its tuple identifier. Parameters ---------- method : function Function/method to decorate. Returns ------- mod : function Function/method after decarotion. """ @wraps(method) # functools.wraps makes docs autogeneration easy and proper for decorated functions. def mod(self, path, *args): try: return method(self, self.__pathToTuple(path), *args) except YTFS.PathConvertError: raise FuseOSError(errno.EINVAL) return mod
python
def _pathdec(method): """ Decorator that replaces string `path` argument with its tuple identifier. Parameters ---------- method : function Function/method to decorate. Returns ------- mod : function Function/method after decarotion. """ @wraps(method) # functools.wraps makes docs autogeneration easy and proper for decorated functions. def mod(self, path, *args): try: return method(self, self.__pathToTuple(path), *args) except YTFS.PathConvertError: raise FuseOSError(errno.EINVAL) return mod
[ "def", "_pathdec", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "# functools.wraps makes docs autogeneration easy and proper for decorated functions.", "def", "mod", "(", "self", ",", "path", ",", "*", "args", ")", ":", "try", ":", "return", "method", "(", "self", ",", "self", ".", "__pathToTuple", "(", "path", ")", ",", "*", "args", ")", "except", "YTFS", ".", "PathConvertError", ":", "raise", "FuseOSError", "(", "errno", ".", "EINVAL", ")", "return", "mod" ]
Decorator that replaces string `path` argument with its tuple identifier. Parameters ---------- method : function Function/method to decorate. Returns ------- mod : function Function/method after decarotion.
[ "Decorator", "that", "replaces", "string", "path", "argument", "with", "its", "tuple", "identifier", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L303-L328
-1
251,785
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.getattr
def getattr(self, tid, fh=None): """ File attributes. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. fh : int File descriptor. Unnecessary, therefore ignored. Returns ------- st : dict Dictionary that contains file attributes. See: ``man 2 stat``. """ if not self.__exists(tid): raise FuseOSError(errno.ENOENT) pt = self.PathType.get(tid) st = deepcopy(self.st) st['st_atime'] = int(time()) st['st_mtime'] = st['st_atime'] st['st_ctime'] = st['st_atime'] if pt is self.PathType.file: st['st_mode'] = stat.S_IFREG | 0o444 st['st_nlink'] = 1 st['st_size'] = self.searches[ tid[0] ][ tid[1] ].filesize st['st_ctime'] = self.searches[ tid[0] ][ tid[1] ].ctime st['st_mtime'] = st['st_ctime'] st['st_atime'] = self.searches[ tid[0] ][ tid[1] ].atime elif pt is self.PathType.ctrl: st['st_mode'] = stat.S_IFREG | 0o774 st['st_nlink'] = 1 st['st_size'] = len(self.__sh_script) elif pt is self.PathType.main: st['st_mode'] = stat.S_IFDIR | 0o774 st['st_blocks'] = math.ceil(st['st_size'] / st['st_blksize']) return st
python
def getattr(self, tid, fh=None): """ File attributes. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. fh : int File descriptor. Unnecessary, therefore ignored. Returns ------- st : dict Dictionary that contains file attributes. See: ``man 2 stat``. """ if not self.__exists(tid): raise FuseOSError(errno.ENOENT) pt = self.PathType.get(tid) st = deepcopy(self.st) st['st_atime'] = int(time()) st['st_mtime'] = st['st_atime'] st['st_ctime'] = st['st_atime'] if pt is self.PathType.file: st['st_mode'] = stat.S_IFREG | 0o444 st['st_nlink'] = 1 st['st_size'] = self.searches[ tid[0] ][ tid[1] ].filesize st['st_ctime'] = self.searches[ tid[0] ][ tid[1] ].ctime st['st_mtime'] = st['st_ctime'] st['st_atime'] = self.searches[ tid[0] ][ tid[1] ].atime elif pt is self.PathType.ctrl: st['st_mode'] = stat.S_IFREG | 0o774 st['st_nlink'] = 1 st['st_size'] = len(self.__sh_script) elif pt is self.PathType.main: st['st_mode'] = stat.S_IFDIR | 0o774 st['st_blocks'] = math.ceil(st['st_size'] / st['st_blksize']) return st
[ "def", "getattr", "(", "self", ",", "tid", ",", "fh", "=", "None", ")", ":", "if", "not", "self", ".", "__exists", "(", "tid", ")", ":", "raise", "FuseOSError", "(", "errno", ".", "ENOENT", ")", "pt", "=", "self", ".", "PathType", ".", "get", "(", "tid", ")", "st", "=", "deepcopy", "(", "self", ".", "st", ")", "st", "[", "'st_atime'", "]", "=", "int", "(", "time", "(", ")", ")", "st", "[", "'st_mtime'", "]", "=", "st", "[", "'st_atime'", "]", "st", "[", "'st_ctime'", "]", "=", "st", "[", "'st_atime'", "]", "if", "pt", "is", "self", ".", "PathType", ".", "file", ":", "st", "[", "'st_mode'", "]", "=", "stat", ".", "S_IFREG", "|", "0o444", "st", "[", "'st_nlink'", "]", "=", "1", "st", "[", "'st_size'", "]", "=", "self", ".", "searches", "[", "tid", "[", "0", "]", "]", "[", "tid", "[", "1", "]", "]", ".", "filesize", "st", "[", "'st_ctime'", "]", "=", "self", ".", "searches", "[", "tid", "[", "0", "]", "]", "[", "tid", "[", "1", "]", "]", ".", "ctime", "st", "[", "'st_mtime'", "]", "=", "st", "[", "'st_ctime'", "]", "st", "[", "'st_atime'", "]", "=", "self", ".", "searches", "[", "tid", "[", "0", "]", "]", "[", "tid", "[", "1", "]", "]", ".", "atime", "elif", "pt", "is", "self", ".", "PathType", ".", "ctrl", ":", "st", "[", "'st_mode'", "]", "=", "stat", ".", "S_IFREG", "|", "0o774", "st", "[", "'st_nlink'", "]", "=", "1", "st", "[", "'st_size'", "]", "=", "len", "(", "self", ".", "__sh_script", ")", "elif", "pt", "is", "self", ".", "PathType", ".", "main", ":", "st", "[", "'st_mode'", "]", "=", "stat", ".", "S_IFDIR", "|", "0o774", "st", "[", "'st_blocks'", "]", "=", "math", ".", "ceil", "(", "st", "[", "'st_size'", "]", "/", "st", "[", "'st_blksize'", "]", ")", "return", "st" ]
File attributes. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. fh : int File descriptor. Unnecessary, therefore ignored. Returns ------- st : dict Dictionary that contains file attributes. See: ``man 2 stat``.
[ "File", "attributes", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L331-L380
-1
251,786
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.readdir
def readdir(self, tid, fh): """ Read directory contents. Lists visible elements of ``YTActions`` object. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. fh : int File descriptor. Ommited in the function body. Returns ------- list List of filenames, wich will be shown as directory content. """ ret = [] pt = self.PathType.get(tid) try: if pt is self.PathType.main: ret = list(self.searches) elif pt is self.PathType.subdir: ret = list(self.searches[tid[0]]) elif pt is self.PathType.file: raise FuseOSError(errno.ENOTDIR) else: raise FuseOSError(errno.ENOENT) except KeyError: raise FuseOSError(errno.ENOENT) return ['.', '..'] + ret
python
def readdir(self, tid, fh): """ Read directory contents. Lists visible elements of ``YTActions`` object. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. fh : int File descriptor. Ommited in the function body. Returns ------- list List of filenames, wich will be shown as directory content. """ ret = [] pt = self.PathType.get(tid) try: if pt is self.PathType.main: ret = list(self.searches) elif pt is self.PathType.subdir: ret = list(self.searches[tid[0]]) elif pt is self.PathType.file: raise FuseOSError(errno.ENOTDIR) else: raise FuseOSError(errno.ENOENT) except KeyError: raise FuseOSError(errno.ENOENT) return ['.', '..'] + ret
[ "def", "readdir", "(", "self", ",", "tid", ",", "fh", ")", ":", "ret", "=", "[", "]", "pt", "=", "self", ".", "PathType", ".", "get", "(", "tid", ")", "try", ":", "if", "pt", "is", "self", ".", "PathType", ".", "main", ":", "ret", "=", "list", "(", "self", ".", "searches", ")", "elif", "pt", "is", "self", ".", "PathType", ".", "subdir", ":", "ret", "=", "list", "(", "self", ".", "searches", "[", "tid", "[", "0", "]", "]", ")", "elif", "pt", "is", "self", ".", "PathType", ".", "file", ":", "raise", "FuseOSError", "(", "errno", ".", "ENOTDIR", ")", "else", ":", "raise", "FuseOSError", "(", "errno", ".", "ENOENT", ")", "except", "KeyError", ":", "raise", "FuseOSError", "(", "errno", ".", "ENOENT", ")", "return", "[", "'.'", ",", "'..'", "]", "+", "ret" ]
Read directory contents. Lists visible elements of ``YTActions`` object. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. fh : int File descriptor. Ommited in the function body. Returns ------- list List of filenames, wich will be shown as directory content.
[ "Read", "directory", "contents", ".", "Lists", "visible", "elements", "of", "YTActions", "object", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L383-L419
-1
251,787
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.mkdir
def mkdir(self, tid, mode): """ Directory creation. Search is performed. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. mode : int Ignored. """ pt = self.PathType.get(tid) if pt is self.PathType.invalid or pt is self.PathType.file: raise FuseOSError(errno.EPERM) if self.__exists(tid): raise FuseOSError(errno.EEXIST) try: dir_ent = YTActions(tid[0]) dir_ent.updateResults() except ConnectionError: raise FuseOSError(errno.ENETDOWN) self.searches[tid[0]] = dir_ent # now adding directory entry is legit, nothing failed. return 0
python
def mkdir(self, tid, mode): """ Directory creation. Search is performed. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. mode : int Ignored. """ pt = self.PathType.get(tid) if pt is self.PathType.invalid or pt is self.PathType.file: raise FuseOSError(errno.EPERM) if self.__exists(tid): raise FuseOSError(errno.EEXIST) try: dir_ent = YTActions(tid[0]) dir_ent.updateResults() except ConnectionError: raise FuseOSError(errno.ENETDOWN) self.searches[tid[0]] = dir_ent # now adding directory entry is legit, nothing failed. return 0
[ "def", "mkdir", "(", "self", ",", "tid", ",", "mode", ")", ":", "pt", "=", "self", ".", "PathType", ".", "get", "(", "tid", ")", "if", "pt", "is", "self", ".", "PathType", ".", "invalid", "or", "pt", "is", "self", ".", "PathType", ".", "file", ":", "raise", "FuseOSError", "(", "errno", ".", "EPERM", ")", "if", "self", ".", "__exists", "(", "tid", ")", ":", "raise", "FuseOSError", "(", "errno", ".", "EEXIST", ")", "try", ":", "dir_ent", "=", "YTActions", "(", "tid", "[", "0", "]", ")", "dir_ent", ".", "updateResults", "(", ")", "except", "ConnectionError", ":", "raise", "FuseOSError", "(", "errno", ".", "ENETDOWN", ")", "self", ".", "searches", "[", "tid", "[", "0", "]", "]", "=", "dir_ent", "# now adding directory entry is legit, nothing failed.", "return", "0" ]
Directory creation. Search is performed. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. mode : int Ignored.
[ "Directory", "creation", ".", "Search", "is", "performed", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L422-L451
-1
251,788
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.rename
def rename(self, old, new): """ Directory renaming support. Needed because many file managers create directories with default names, wich makes it impossible to perform a search without CLI. Name changes for files are not allowed, only for directories. Parameters ---------- old : str Old name. Converted to tuple identifier by ``_pathdec`` decorator. new : str New name. Converted to tuple identifier in actual function body. """ new = self.__pathToTuple(new) if not self.__exists(old): raise FuseOSError(errno.ENOENT) if self.PathType.get(old) is not self.PathType.subdir or self.PathType.get(new) is not self.PathType.subdir: raise FuseOSError(errno.EPERM) if self.__exists(new): raise FuseOSError(errno.EEXIST) try: new_dir_ent = YTActions(new[0]) new_dir_ent.updateResults() except ConnectionError: raise FuseOSError(errno.ENETDOWN) self.searches[new[0]] = new_dir_ent # as in mkdir try: del self.searches[old[0]] except KeyError: raise FuseOSError(errno.ENOENT) return 0
python
def rename(self, old, new): """ Directory renaming support. Needed because many file managers create directories with default names, wich makes it impossible to perform a search without CLI. Name changes for files are not allowed, only for directories. Parameters ---------- old : str Old name. Converted to tuple identifier by ``_pathdec`` decorator. new : str New name. Converted to tuple identifier in actual function body. """ new = self.__pathToTuple(new) if not self.__exists(old): raise FuseOSError(errno.ENOENT) if self.PathType.get(old) is not self.PathType.subdir or self.PathType.get(new) is not self.PathType.subdir: raise FuseOSError(errno.EPERM) if self.__exists(new): raise FuseOSError(errno.EEXIST) try: new_dir_ent = YTActions(new[0]) new_dir_ent.updateResults() except ConnectionError: raise FuseOSError(errno.ENETDOWN) self.searches[new[0]] = new_dir_ent # as in mkdir try: del self.searches[old[0]] except KeyError: raise FuseOSError(errno.ENOENT) return 0
[ "def", "rename", "(", "self", ",", "old", ",", "new", ")", ":", "new", "=", "self", ".", "__pathToTuple", "(", "new", ")", "if", "not", "self", ".", "__exists", "(", "old", ")", ":", "raise", "FuseOSError", "(", "errno", ".", "ENOENT", ")", "if", "self", ".", "PathType", ".", "get", "(", "old", ")", "is", "not", "self", ".", "PathType", ".", "subdir", "or", "self", ".", "PathType", ".", "get", "(", "new", ")", "is", "not", "self", ".", "PathType", ".", "subdir", ":", "raise", "FuseOSError", "(", "errno", ".", "EPERM", ")", "if", "self", ".", "__exists", "(", "new", ")", ":", "raise", "FuseOSError", "(", "errno", ".", "EEXIST", ")", "try", ":", "new_dir_ent", "=", "YTActions", "(", "new", "[", "0", "]", ")", "new_dir_ent", ".", "updateResults", "(", ")", "except", "ConnectionError", ":", "raise", "FuseOSError", "(", "errno", ".", "ENETDOWN", ")", "self", ".", "searches", "[", "new", "[", "0", "]", "]", "=", "new_dir_ent", "# as in mkdir", "try", ":", "del", "self", ".", "searches", "[", "old", "[", "0", "]", "]", "except", "KeyError", ":", "raise", "FuseOSError", "(", "errno", ".", "ENOENT", ")", "return", "0" ]
Directory renaming support. Needed because many file managers create directories with default names, wich makes it impossible to perform a search without CLI. Name changes for files are not allowed, only for directories. Parameters ---------- old : str Old name. Converted to tuple identifier by ``_pathdec`` decorator. new : str New name. Converted to tuple identifier in actual function body.
[ "Directory", "renaming", "support", ".", "Needed", "because", "many", "file", "managers", "create", "directories", "with", "default", "names", "wich", "makes", "it", "impossible", "to", "perform", "a", "search", "without", "CLI", ".", "Name", "changes", "for", "files", "are", "not", "allowed", "only", "for", "directories", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L454-L494
-1
251,789
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.rmdir
def rmdir(self, tid): """ Directory removal. ``YTActions`` object under `tid` is told to clean all data, and then it is deleted. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. """ pt = self.PathType.get(tid) if pt is self.PathType.main: raise FuseOSError(errno.EINVAL) elif pt is not self.PathType.subdir: raise FuseOSError(errno.ENOTDIR) try: self.searches[tid[0]].clean() del self.searches[tid[0]] except KeyError: raise FuseOSError(errno.ENOENT) return 0
python
def rmdir(self, tid): """ Directory removal. ``YTActions`` object under `tid` is told to clean all data, and then it is deleted. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. """ pt = self.PathType.get(tid) if pt is self.PathType.main: raise FuseOSError(errno.EINVAL) elif pt is not self.PathType.subdir: raise FuseOSError(errno.ENOTDIR) try: self.searches[tid[0]].clean() del self.searches[tid[0]] except KeyError: raise FuseOSError(errno.ENOENT) return 0
[ "def", "rmdir", "(", "self", ",", "tid", ")", ":", "pt", "=", "self", ".", "PathType", ".", "get", "(", "tid", ")", "if", "pt", "is", "self", ".", "PathType", ".", "main", ":", "raise", "FuseOSError", "(", "errno", ".", "EINVAL", ")", "elif", "pt", "is", "not", "self", ".", "PathType", ".", "subdir", ":", "raise", "FuseOSError", "(", "errno", ".", "ENOTDIR", ")", "try", ":", "self", ".", "searches", "[", "tid", "[", "0", "]", "]", ".", "clean", "(", ")", "del", "self", ".", "searches", "[", "tid", "[", "0", "]", "]", "except", "KeyError", ":", "raise", "FuseOSError", "(", "errno", ".", "ENOENT", ")", "return", "0" ]
Directory removal. ``YTActions`` object under `tid` is told to clean all data, and then it is deleted. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator.
[ "Directory", "removal", ".", "YTActions", "object", "under", "tid", "is", "told", "to", "clean", "all", "data", "and", "then", "it", "is", "deleted", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L497-L522
-1
251,790
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.open
def open(self, tid, flags): """ File open. ``YTStor`` object associated with this file is initialised and written to ``self.fds``. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. flags : int File open mode. Read-only access is allowed. Returns ------- int New file descriptor """ pt = self.PathType.get(tid) if pt is not self.PathType.file and pt is not self.PathType.ctrl: raise FuseOSError(errno.EINVAL) if pt is not self.PathType.ctrl and (flags & os.O_WRONLY or flags & os.O_RDWR): raise FuseOSError(errno.EPERM) if not self.__exists(tid): raise FuseOSError(errno.ENOENT) try: yts = self.searches[tid[0]][tid[1]] except KeyError: return self.fds.push(None) # for control file no association is needed. try: obI = yts.obtainInfo() # network may fail except ConnectionError: raise FuseOSError(errno.ENETDOWN) if obI: fh = self.fds.push(yts) try: yts.registerHandler(fh) except ConnectionError: raise FuseOSError(errno.ENETDOWN) return fh else: raise FuseOSError(errno.EINVAL)
python
def open(self, tid, flags): """ File open. ``YTStor`` object associated with this file is initialised and written to ``self.fds``. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. flags : int File open mode. Read-only access is allowed. Returns ------- int New file descriptor """ pt = self.PathType.get(tid) if pt is not self.PathType.file and pt is not self.PathType.ctrl: raise FuseOSError(errno.EINVAL) if pt is not self.PathType.ctrl and (flags & os.O_WRONLY or flags & os.O_RDWR): raise FuseOSError(errno.EPERM) if not self.__exists(tid): raise FuseOSError(errno.ENOENT) try: yts = self.searches[tid[0]][tid[1]] except KeyError: return self.fds.push(None) # for control file no association is needed. try: obI = yts.obtainInfo() # network may fail except ConnectionError: raise FuseOSError(errno.ENETDOWN) if obI: fh = self.fds.push(yts) try: yts.registerHandler(fh) except ConnectionError: raise FuseOSError(errno.ENETDOWN) return fh else: raise FuseOSError(errno.EINVAL)
[ "def", "open", "(", "self", ",", "tid", ",", "flags", ")", ":", "pt", "=", "self", ".", "PathType", ".", "get", "(", "tid", ")", "if", "pt", "is", "not", "self", ".", "PathType", ".", "file", "and", "pt", "is", "not", "self", ".", "PathType", ".", "ctrl", ":", "raise", "FuseOSError", "(", "errno", ".", "EINVAL", ")", "if", "pt", "is", "not", "self", ".", "PathType", ".", "ctrl", "and", "(", "flags", "&", "os", ".", "O_WRONLY", "or", "flags", "&", "os", ".", "O_RDWR", ")", ":", "raise", "FuseOSError", "(", "errno", ".", "EPERM", ")", "if", "not", "self", ".", "__exists", "(", "tid", ")", ":", "raise", "FuseOSError", "(", "errno", ".", "ENOENT", ")", "try", ":", "yts", "=", "self", ".", "searches", "[", "tid", "[", "0", "]", "]", "[", "tid", "[", "1", "]", "]", "except", "KeyError", ":", "return", "self", ".", "fds", ".", "push", "(", "None", ")", "# for control file no association is needed.", "try", ":", "obI", "=", "yts", ".", "obtainInfo", "(", ")", "# network may fail", "except", "ConnectionError", ":", "raise", "FuseOSError", "(", "errno", ".", "ENETDOWN", ")", "if", "obI", ":", "fh", "=", "self", ".", "fds", ".", "push", "(", "yts", ")", "try", ":", "yts", ".", "registerHandler", "(", "fh", ")", "except", "ConnectionError", ":", "raise", "FuseOSError", "(", "errno", ".", "ENETDOWN", ")", "return", "fh", "else", ":", "raise", "FuseOSError", "(", "errno", ".", "EINVAL", ")" ]
File open. ``YTStor`` object associated with this file is initialised and written to ``self.fds``. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. flags : int File open mode. Read-only access is allowed. Returns ------- int New file descriptor
[ "File", "open", ".", "YTStor", "object", "associated", "with", "this", "file", "is", "initialised", "and", "written", "to", "self", ".", "fds", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L539-L589
-1
251,791
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.write
def write(self, tid, data, offset, fh): """ Write operation. Applicable only for control files - updateResults is called. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. data : bytes Ignored. offset : int Ignored. fh : int File descriptor. Returns ------- int Length of data written. """ if tid[1] == " next": d = True elif tid[1] == " prev": d = False else: raise FuseOSError(errno.EPERM) try: self.searches[tid[0]].updateResults(d) except KeyError: raise FuseOSError(errno.EINVAL) # sth went wrong... except ConnectionError: raise FuseOSError(errno.ENETDOWN) return len(data)
python
def write(self, tid, data, offset, fh): """ Write operation. Applicable only for control files - updateResults is called. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. data : bytes Ignored. offset : int Ignored. fh : int File descriptor. Returns ------- int Length of data written. """ if tid[1] == " next": d = True elif tid[1] == " prev": d = False else: raise FuseOSError(errno.EPERM) try: self.searches[tid[0]].updateResults(d) except KeyError: raise FuseOSError(errno.EINVAL) # sth went wrong... except ConnectionError: raise FuseOSError(errno.ENETDOWN) return len(data)
[ "def", "write", "(", "self", ",", "tid", ",", "data", ",", "offset", ",", "fh", ")", ":", "if", "tid", "[", "1", "]", "==", "\" next\"", ":", "d", "=", "True", "elif", "tid", "[", "1", "]", "==", "\" prev\"", ":", "d", "=", "False", "else", ":", "raise", "FuseOSError", "(", "errno", ".", "EPERM", ")", "try", ":", "self", ".", "searches", "[", "tid", "[", "0", "]", "]", ".", "updateResults", "(", "d", ")", "except", "KeyError", ":", "raise", "FuseOSError", "(", "errno", ".", "EINVAL", ")", "# sth went wrong...", "except", "ConnectionError", ":", "raise", "FuseOSError", "(", "errno", ".", "ENETDOWN", ")", "return", "len", "(", "data", ")" ]
Write operation. Applicable only for control files - updateResults is called. Parameters ---------- tid : str Path to file. Original `path` argument is converted to tuple identifier by ``_pathdec`` decorator. data : bytes Ignored. offset : int Ignored. fh : int File descriptor. Returns ------- int Length of data written.
[ "Write", "operation", ".", "Applicable", "only", "for", "control", "files", "-", "updateResults", "is", "called", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L633-L669
-1
251,792
rasguanabana/ytfs
ytfs/ytfs.py
YTFS.release
def release(self, tid, fh): """ Close file. Descriptor is removed from ``self.fds``. Parameters ---------- tid : str Path to file. Ignored. fh : int File descriptor to release. """ try: try: self.fds[fh].unregisterHandler(fh) except AttributeError: pass del self.fds[fh] except KeyError: raise FuseOSError(errno.EBADF) return 0
python
def release(self, tid, fh): """ Close file. Descriptor is removed from ``self.fds``. Parameters ---------- tid : str Path to file. Ignored. fh : int File descriptor to release. """ try: try: self.fds[fh].unregisterHandler(fh) except AttributeError: pass del self.fds[fh] except KeyError: raise FuseOSError(errno.EBADF) return 0
[ "def", "release", "(", "self", ",", "tid", ",", "fh", ")", ":", "try", ":", "try", ":", "self", ".", "fds", "[", "fh", "]", ".", "unregisterHandler", "(", "fh", ")", "except", "AttributeError", ":", "pass", "del", "self", ".", "fds", "[", "fh", "]", "except", "KeyError", ":", "raise", "FuseOSError", "(", "errno", ".", "EBADF", ")", "return", "0" ]
Close file. Descriptor is removed from ``self.fds``. Parameters ---------- tid : str Path to file. Ignored. fh : int File descriptor to release.
[ "Close", "file", ".", "Descriptor", "is", "removed", "from", "self", ".", "fds", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/ytfs.py#L672-L697
-1
251,793
rasguanabana/ytfs
ytfs/range_t.py
range_t.__match_l
def __match_l(self, k, _set): """ Method for searching subranges from `_set` that overlap on `k` range. Parameters ---------- k : tuple or list or range Range for which we search overlapping subranges from `_set`. _set : set Subranges set. Returns ------- matched : set Set of subranges from `_set` that overlaps on `k`. """ return {r for r in _set if k[0] in range(*r) or k[1] in range(*r) or (k[0] < r[0] and k[1] >= r[1])}
python
def __match_l(self, k, _set): """ Method for searching subranges from `_set` that overlap on `k` range. Parameters ---------- k : tuple or list or range Range for which we search overlapping subranges from `_set`. _set : set Subranges set. Returns ------- matched : set Set of subranges from `_set` that overlaps on `k`. """ return {r for r in _set if k[0] in range(*r) or k[1] in range(*r) or (k[0] < r[0] and k[1] >= r[1])}
[ "def", "__match_l", "(", "self", ",", "k", ",", "_set", ")", ":", "return", "{", "r", "for", "r", "in", "_set", "if", "k", "[", "0", "]", "in", "range", "(", "*", "r", ")", "or", "k", "[", "1", "]", "in", "range", "(", "*", "r", ")", "or", "(", "k", "[", "0", "]", "<", "r", "[", "0", "]", "and", "k", "[", "1", "]", ">=", "r", "[", "1", "]", ")", "}" ]
Method for searching subranges from `_set` that overlap on `k` range. Parameters ---------- k : tuple or list or range Range for which we search overlapping subranges from `_set`. _set : set Subranges set. Returns ------- matched : set Set of subranges from `_set` that overlaps on `k`.
[ "Method", "for", "searching", "subranges", "from", "_set", "that", "overlap", "on", "k", "range", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/range_t.py#L39-L57
-1
251,794
rasguanabana/ytfs
ytfs/range_t.py
range_t.contains
def contains(self, val): """ Check if given value or range is present. Parameters ---------- val : int or tuple or list or range Range or integer being checked. Returns ------- retlen : int Length of overlapping with `val` subranges. """ (start, end) = self.__val_convert(val) # conversion retlen = 0 for r in self.__has: if start < r[1] and end > r[0]: retlen += ((end < r[1] and end) or r[1]) - ((start > r[0] and start) or r[0]) return retlen
python
def contains(self, val): """ Check if given value or range is present. Parameters ---------- val : int or tuple or list or range Range or integer being checked. Returns ------- retlen : int Length of overlapping with `val` subranges. """ (start, end) = self.__val_convert(val) # conversion retlen = 0 for r in self.__has: if start < r[1] and end > r[0]: retlen += ((end < r[1] and end) or r[1]) - ((start > r[0] and start) or r[0]) return retlen
[ "def", "contains", "(", "self", ",", "val", ")", ":", "(", "start", ",", "end", ")", "=", "self", ".", "__val_convert", "(", "val", ")", "# conversion", "retlen", "=", "0", "for", "r", "in", "self", ".", "__has", ":", "if", "start", "<", "r", "[", "1", "]", "and", "end", ">", "r", "[", "0", "]", ":", "retlen", "+=", "(", "(", "end", "<", "r", "[", "1", "]", "and", "end", ")", "or", "r", "[", "1", "]", ")", "-", "(", "(", "start", ">", "r", "[", "0", "]", "and", "start", ")", "or", "r", "[", "0", "]", ")", "return", "retlen" ]
Check if given value or range is present. Parameters ---------- val : int or tuple or list or range Range or integer being checked. Returns ------- retlen : int Length of overlapping with `val` subranges.
[ "Check", "if", "given", "value", "or", "range", "is", "present", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/range_t.py#L118-L141
-1
251,795
rasguanabana/ytfs
ytfs/range_t.py
range_t.__add
def __add(self, val): """ Helper method for range addition. It is allowed to add only one compact subrange or ``range_t`` object at once. Parameters ---------- val : int or tuple or list or range Integer or range to add. Returns ------- __has : set ``self.__has`` extended by `val`. """ if not isinstance(val, range_t): #sanitize it val = {self.__val_convert(val)} # convert to a set, coz I like it that way. else: val = val.toset() __has = deepcopy(self.__has) # simply add to a set. __has.update(val) return __has
python
def __add(self, val): """ Helper method for range addition. It is allowed to add only one compact subrange or ``range_t`` object at once. Parameters ---------- val : int or tuple or list or range Integer or range to add. Returns ------- __has : set ``self.__has`` extended by `val`. """ if not isinstance(val, range_t): #sanitize it val = {self.__val_convert(val)} # convert to a set, coz I like it that way. else: val = val.toset() __has = deepcopy(self.__has) # simply add to a set. __has.update(val) return __has
[ "def", "__add", "(", "self", ",", "val", ")", ":", "if", "not", "isinstance", "(", "val", ",", "range_t", ")", ":", "#sanitize it", "val", "=", "{", "self", ".", "__val_convert", "(", "val", ")", "}", "# convert to a set, coz I like it that way.", "else", ":", "val", "=", "val", ".", "toset", "(", ")", "__has", "=", "deepcopy", "(", "self", ".", "__has", ")", "# simply add to a set.", "__has", ".", "update", "(", "val", ")", "return", "__has" ]
Helper method for range addition. It is allowed to add only one compact subrange or ``range_t`` object at once. Parameters ---------- val : int or tuple or list or range Integer or range to add. Returns ------- __has : set ``self.__has`` extended by `val`.
[ "Helper", "method", "for", "range", "addition", ".", "It", "is", "allowed", "to", "add", "only", "one", "compact", "subrange", "or", "range_t", "object", "at", "once", "." ]
67dd9536a1faea09c8394f697529124f78e77cfa
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/range_t.py#L195-L221
-1
251,796
proycon/pynlpl
pynlpl/evaluation.py
AbstractExperiment.done
def done(self, warn=True): """Is the subprocess done?""" if not self.process: raise Exception("Not implemented yet or process not started yet, make sure to overload the done() method in your Experiment class") self.process.poll() if self.process.returncode == None: return False elif self.process.returncode > 0: raise ProcessFailed() else: self.endtime = datetime.datetime.now() return True
python
def done(self, warn=True): """Is the subprocess done?""" if not self.process: raise Exception("Not implemented yet or process not started yet, make sure to overload the done() method in your Experiment class") self.process.poll() if self.process.returncode == None: return False elif self.process.returncode > 0: raise ProcessFailed() else: self.endtime = datetime.datetime.now() return True
[ "def", "done", "(", "self", ",", "warn", "=", "True", ")", ":", "if", "not", "self", ".", "process", ":", "raise", "Exception", "(", "\"Not implemented yet or process not started yet, make sure to overload the done() method in your Experiment class\"", ")", "self", ".", "process", ".", "poll", "(", ")", "if", "self", ".", "process", ".", "returncode", "==", "None", ":", "return", "False", "elif", "self", ".", "process", ".", "returncode", ">", "0", ":", "raise", "ProcessFailed", "(", ")", "else", ":", "self", ".", "endtime", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "return", "True" ]
Is the subprocess done?
[ "Is", "the", "subprocess", "done?" ]
7707f69a91caaa6cde037f0d0379f1d42500a68b
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/evaluation.py#L453-L464
-1
251,797
proycon/pynlpl
pynlpl/formats/giza.py
GizaSentenceAlignment.getalignedtarget
def getalignedtarget(self, index): """Returns target range only if source index aligns to a single consecutive range of target tokens.""" targetindices = [] target = None foundindex = -1 for sourceindex, targetindex in self.alignment: if sourceindex == index: targetindices.append(targetindex) if len(targetindices) > 1: for i in range(1,len(targetindices)): if abs(targetindices[i] - targetindices[i-1]) != 1: break # not consecutive foundindex = (min(targetindices), max(targetindices)) target = ' '.join(self.target[min(targetindices):max(targetindices)+1]) elif targetindices: foundindex = targetindices[0] target = self.target[foundindex] return target, foundindex
python
def getalignedtarget(self, index): """Returns target range only if source index aligns to a single consecutive range of target tokens.""" targetindices = [] target = None foundindex = -1 for sourceindex, targetindex in self.alignment: if sourceindex == index: targetindices.append(targetindex) if len(targetindices) > 1: for i in range(1,len(targetindices)): if abs(targetindices[i] - targetindices[i-1]) != 1: break # not consecutive foundindex = (min(targetindices), max(targetindices)) target = ' '.join(self.target[min(targetindices):max(targetindices)+1]) elif targetindices: foundindex = targetindices[0] target = self.target[foundindex] return target, foundindex
[ "def", "getalignedtarget", "(", "self", ",", "index", ")", ":", "targetindices", "=", "[", "]", "target", "=", "None", "foundindex", "=", "-", "1", "for", "sourceindex", ",", "targetindex", "in", "self", ".", "alignment", ":", "if", "sourceindex", "==", "index", ":", "targetindices", ".", "append", "(", "targetindex", ")", "if", "len", "(", "targetindices", ")", ">", "1", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "targetindices", ")", ")", ":", "if", "abs", "(", "targetindices", "[", "i", "]", "-", "targetindices", "[", "i", "-", "1", "]", ")", "!=", "1", ":", "break", "# not consecutive", "foundindex", "=", "(", "min", "(", "targetindices", ")", ",", "max", "(", "targetindices", ")", ")", "target", "=", "' '", ".", "join", "(", "self", ".", "target", "[", "min", "(", "targetindices", ")", ":", "max", "(", "targetindices", ")", "+", "1", "]", ")", "elif", "targetindices", ":", "foundindex", "=", "targetindices", "[", "0", "]", "target", "=", "self", ".", "target", "[", "foundindex", "]", "return", "target", ",", "foundindex" ]
Returns target range only if source index aligns to a single consecutive range of target tokens.
[ "Returns", "target", "range", "only", "if", "source", "index", "aligns", "to", "a", "single", "consecutive", "range", "of", "target", "tokens", "." ]
7707f69a91caaa6cde037f0d0379f1d42500a68b
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/giza.py#L108-L126
-1
251,798
proycon/pynlpl
pynlpl/formats/giza.py
WordAlignment.targetword
def targetword(self, index, targetwords, alignment): """Return the aligned targetword for a specified index in the source words""" if alignment[index]: return targetwords[alignment[index]] else: return None
python
def targetword(self, index, targetwords, alignment): """Return the aligned targetword for a specified index in the source words""" if alignment[index]: return targetwords[alignment[index]] else: return None
[ "def", "targetword", "(", "self", ",", "index", ",", "targetwords", ",", "alignment", ")", ":", "if", "alignment", "[", "index", "]", ":", "return", "targetwords", "[", "alignment", "[", "index", "]", "]", "else", ":", "return", "None" ]
Return the aligned targetword for a specified index in the source words
[ "Return", "the", "aligned", "targetword", "for", "a", "specified", "index", "in", "the", "source", "words" ]
7707f69a91caaa6cde037f0d0379f1d42500a68b
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/giza.py#L225-L230
-1
251,799
proycon/pynlpl
pynlpl/formats/giza.py
MultiWordAlignment.targetwords
def targetwords(self, index, targetwords, alignment): """Return the aligned targetwords for a specified index in the source words""" return [ targetwords[x] for x in alignment[index] ]
python
def targetwords(self, index, targetwords, alignment): """Return the aligned targetwords for a specified index in the source words""" return [ targetwords[x] for x in alignment[index] ]
[ "def", "targetwords", "(", "self", ",", "index", ",", "targetwords", ",", "alignment", ")", ":", "return", "[", "targetwords", "[", "x", "]", "for", "x", "in", "alignment", "[", "index", "]", "]" ]
Return the aligned targetwords for a specified index in the source words
[ "Return", "the", "aligned", "targetwords", "for", "a", "specified", "index", "in", "the", "source", "words" ]
7707f69a91caaa6cde037f0d0379f1d42500a68b
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/giza.py#L274-L276
-1