hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a082499766e01b8416387c8b260647f328d736b5
| 374 |
py
|
Python
|
src/config_writer.py
|
premkamal13/my-ai-assistant
|
8c2e60cf74ae36d0ad1177430651f34a59230f52
|
[
"MIT"
] | 1 |
2019-07-12T07:40:47.000Z
|
2019-07-12T07:40:47.000Z
|
src/config_writer.py
|
premkamal13/my-ai-assistant
|
8c2e60cf74ae36d0ad1177430651f34a59230f52
|
[
"MIT"
] | null | null | null |
src/config_writer.py
|
premkamal13/my-ai-assistant
|
8c2e60cf74ae36d0ad1177430651f34a59230f52
|
[
"MIT"
] | null | null | null |
import configparser
config = configparser.ConfigParser()
config['APP_INFO'] = {'ASSISTANT_NAME': 'Agamya','USER_NAME': 'Kamal'}
# replace the DEMO keyword with actual wolfram app id
# Note to @self: Use the gitignored version of this file to find id
config['APP_CREDS'] = {'WOLFRAM_APP_ID' : 'DEMO'}
with open('.config', 'w+') as configfile:
config.write(configfile)
| 34 | 70 | 0.724599 |
e53c57b84ad82d2d57ee3d7b59ec385d89330759
| 295 |
py
|
Python
|
server/djangoapp/admin.py
|
findsemaine/agfzb-CloudAppDevelopment_Capstone
|
56498093059d881ca9f4120f01a0a6ba1bf98115
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/admin.py
|
findsemaine/agfzb-CloudAppDevelopment_Capstone
|
56498093059d881ca9f4120f01a0a6ba1bf98115
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/admin.py
|
findsemaine/agfzb-CloudAppDevelopment_Capstone
|
56498093059d881ca9f4120f01a0a6ba1bf98115
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import CarMake, CarModel
class CarModelInline(admin.StackedInline):
model = CarModel
extra = 5
class CarMakeAdmin(admin.ModelAdmin):
inlines = [CarModelInline]
admin.site.register(CarMake, CarMakeAdmin)
admin.site.register(CarModel)
| 24.583333 | 42 | 0.776271 |
6f937596a34474eee812a1c703d7a57f6929d547
| 14,063 |
py
|
Python
|
sdk/python/pulumi_azure_nextgen/servicefabric/v20200101preview/get_managed_cluster.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31 |
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/servicefabric/v20200101preview/get_managed_cluster.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231 |
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/servicefabric/v20200101preview/get_managed_cluster.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4 |
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetManagedClusterResult',
'AwaitableGetManagedClusterResult',
'get_managed_cluster',
]
@pulumi.output_type
class GetManagedClusterResult:
"""
The manged cluster resource
"""
def __init__(__self__, addon_features=None, admin_password=None, admin_user_name=None, azure_active_directory=None, client_connection_port=None, clients=None, cluster_certificate_thumbprint=None, cluster_code_version=None, cluster_id=None, cluster_state=None, dns_name=None, etag=None, fabric_settings=None, fqdn=None, http_gateway_connection_port=None, id=None, load_balancing_rules=None, location=None, name=None, provisioning_state=None, sku=None, tags=None, type=None):
if addon_features and not isinstance(addon_features, list):
raise TypeError("Expected argument 'addon_features' to be a list")
pulumi.set(__self__, "addon_features", addon_features)
if admin_password and not isinstance(admin_password, str):
raise TypeError("Expected argument 'admin_password' to be a str")
pulumi.set(__self__, "admin_password", admin_password)
if admin_user_name and not isinstance(admin_user_name, str):
raise TypeError("Expected argument 'admin_user_name' to be a str")
pulumi.set(__self__, "admin_user_name", admin_user_name)
if azure_active_directory and not isinstance(azure_active_directory, dict):
raise TypeError("Expected argument 'azure_active_directory' to be a dict")
pulumi.set(__self__, "azure_active_directory", azure_active_directory)
if client_connection_port and not isinstance(client_connection_port, int):
raise TypeError("Expected argument 'client_connection_port' to be a int")
pulumi.set(__self__, "client_connection_port", client_connection_port)
if clients and not isinstance(clients, list):
raise TypeError("Expected argument 'clients' to be a list")
pulumi.set(__self__, "clients", clients)
if cluster_certificate_thumbprint and not isinstance(cluster_certificate_thumbprint, str):
raise TypeError("Expected argument 'cluster_certificate_thumbprint' to be a str")
pulumi.set(__self__, "cluster_certificate_thumbprint", cluster_certificate_thumbprint)
if cluster_code_version and not isinstance(cluster_code_version, str):
raise TypeError("Expected argument 'cluster_code_version' to be a str")
pulumi.set(__self__, "cluster_code_version", cluster_code_version)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if cluster_state and not isinstance(cluster_state, str):
raise TypeError("Expected argument 'cluster_state' to be a str")
pulumi.set(__self__, "cluster_state", cluster_state)
if dns_name and not isinstance(dns_name, str):
raise TypeError("Expected argument 'dns_name' to be a str")
pulumi.set(__self__, "dns_name", dns_name)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if fabric_settings and not isinstance(fabric_settings, list):
raise TypeError("Expected argument 'fabric_settings' to be a list")
pulumi.set(__self__, "fabric_settings", fabric_settings)
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if http_gateway_connection_port and not isinstance(http_gateway_connection_port, int):
raise TypeError("Expected argument 'http_gateway_connection_port' to be a int")
pulumi.set(__self__, "http_gateway_connection_port", http_gateway_connection_port)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if load_balancing_rules and not isinstance(load_balancing_rules, list):
raise TypeError("Expected argument 'load_balancing_rules' to be a list")
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="addonFeatures")
def addon_features(self) -> Optional[Sequence[str]]:
"""
client certificates for the cluster.
"""
return pulumi.get(self, "addon_features")
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> Optional[str]:
"""
vm admin user password.
"""
return pulumi.get(self, "admin_password")
@property
@pulumi.getter(name="adminUserName")
def admin_user_name(self) -> str:
"""
vm admin user name.
"""
return pulumi.get(self, "admin_user_name")
@property
@pulumi.getter(name="azureActiveDirectory")
def azure_active_directory(self) -> Optional['outputs.AzureActiveDirectoryResponse']:
"""
Azure active directory.
"""
return pulumi.get(self, "azure_active_directory")
@property
@pulumi.getter(name="clientConnectionPort")
def client_connection_port(self) -> Optional[int]:
"""
The port used for client connections to the cluster.
"""
return pulumi.get(self, "client_connection_port")
@property
@pulumi.getter
def clients(self) -> Optional[Sequence['outputs.ClientCertificateResponse']]:
"""
client certificates for the cluster.
"""
return pulumi.get(self, "clients")
@property
@pulumi.getter(name="clusterCertificateThumbprint")
def cluster_certificate_thumbprint(self) -> str:
"""
The cluster certificate thumbprint used node to node communication.
"""
return pulumi.get(self, "cluster_certificate_thumbprint")
@property
@pulumi.getter(name="clusterCodeVersion")
def cluster_code_version(self) -> Optional[str]:
"""
The Service Fabric runtime version of the cluster. This property can only by set the user when **upgradeMode** is set to 'Manual'. To get list of available Service Fabric versions for new clusters use [ClusterVersion API](./ClusterVersion.md). To get the list of available version for existing clusters use **availableClusterVersions**.
"""
return pulumi.get(self, "cluster_code_version")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> str:
"""
A service generated unique identifier for the cluster resource.
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="clusterState")
def cluster_state(self) -> str:
"""
The current state of the cluster.
"""
return pulumi.get(self, "cluster_state")
@property
@pulumi.getter(name="dnsName")
def dns_name(self) -> str:
"""
The cluster dns name.
"""
return pulumi.get(self, "dns_name")
@property
@pulumi.getter
def etag(self) -> str:
"""
Azure resource etag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="fabricSettings")
def fabric_settings(self) -> Optional[Sequence['outputs.SettingsSectionDescriptionResponse']]:
"""
The list of custom fabric settings to configure the cluster.
"""
return pulumi.get(self, "fabric_settings")
@property
@pulumi.getter
def fqdn(self) -> str:
"""
the cluster Fully qualified domain name.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="httpGatewayConnectionPort")
def http_gateway_connection_port(self) -> Optional[int]:
"""
The port used for http connections to the cluster.
"""
return pulumi.get(self, "http_gateway_connection_port")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Optional[Sequence['outputs.LoadBalancingRuleResponse']]:
"""
Describes load balancing rules.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter
def location(self) -> str:
"""
Azure resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the managed cluster resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the managed cluster
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetManagedClusterResult(GetManagedClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagedClusterResult(
addon_features=self.addon_features,
admin_password=self.admin_password,
admin_user_name=self.admin_user_name,
azure_active_directory=self.azure_active_directory,
client_connection_port=self.client_connection_port,
clients=self.clients,
cluster_certificate_thumbprint=self.cluster_certificate_thumbprint,
cluster_code_version=self.cluster_code_version,
cluster_id=self.cluster_id,
cluster_state=self.cluster_state,
dns_name=self.dns_name,
etag=self.etag,
fabric_settings=self.fabric_settings,
fqdn=self.fqdn,
http_gateway_connection_port=self.http_gateway_connection_port,
id=self.id,
load_balancing_rules=self.load_balancing_rules,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_managed_cluster(cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagedClusterResult:
"""
The manged cluster resource
:param str cluster_name: The name of the cluster resource.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicefabric/v20200101preview:getManagedCluster', __args__, opts=opts, typ=GetManagedClusterResult).value
return AwaitableGetManagedClusterResult(
addon_features=__ret__.addon_features,
admin_password=__ret__.admin_password,
admin_user_name=__ret__.admin_user_name,
azure_active_directory=__ret__.azure_active_directory,
client_connection_port=__ret__.client_connection_port,
clients=__ret__.clients,
cluster_certificate_thumbprint=__ret__.cluster_certificate_thumbprint,
cluster_code_version=__ret__.cluster_code_version,
cluster_id=__ret__.cluster_id,
cluster_state=__ret__.cluster_state,
dns_name=__ret__.dns_name,
etag=__ret__.etag,
fabric_settings=__ret__.fabric_settings,
fqdn=__ret__.fqdn,
http_gateway_connection_port=__ret__.http_gateway_connection_port,
id=__ret__.id,
load_balancing_rules=__ret__.load_balancing_rules,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 39.838527 | 477 | 0.66579 |
13421c8526cb0bc2abfffb443a3de65a7e65d089
| 295 |
py
|
Python
|
cnns/nnlib/datasets/resize.py
|
adam-dziedzic/time-series-ml
|
81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a
|
[
"Apache-2.0"
] | 1 |
2018-03-25T13:19:46.000Z
|
2018-03-25T13:19:46.000Z
|
cnns/nnlib/datasets/resize.py
|
adam-dziedzic/time-series-ml
|
81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a
|
[
"Apache-2.0"
] | null | null | null |
cnns/nnlib/datasets/resize.py
|
adam-dziedzic/time-series-ml
|
81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import glob
def resize_img(image_path, size):
img = cv2.imread(image_path)
img = cv2.resize(img,(size,size), interpolation = cv2.INTER_CUBIC)
cv2.imwrite(image_path,img)
all_images = glob.glob('tiny-224/*/*/*/*')
for image in all_images:
resize_img(image, 224)
| 22.692308 | 70 | 0.691525 |
338b7764e3b4aa993e69e9770c4cb9a8b02cbf74
| 332 |
py
|
Python
|
sanskrit_parser/generator/generator.py
|
avinashvarna/sanskrit_parser
|
79338213128b29927fe2f06031379bb1e3864a83
|
[
"MIT"
] | 54 |
2017-06-30T09:11:53.000Z
|
2022-03-22T15:35:41.000Z
|
sanskrit_parser/generator/generator.py
|
avinashvarna/sanskrit_parser
|
79338213128b29927fe2f06031379bb1e3864a83
|
[
"MIT"
] | 159 |
2017-06-30T07:04:36.000Z
|
2021-06-17T17:03:43.000Z
|
sanskrit_parser/generator/generator.py
|
avinashvarna/sanskrit_parser
|
79338213128b29927fe2f06031379bb1e3864a83
|
[
"MIT"
] | 18 |
2017-08-17T13:22:00.000Z
|
2022-01-20T01:08:58.000Z
|
# -*- coding: utf-8 -*-
"""
Intro
=====
Experimental
Sanskrit pada generator
Process
1. PrakRiti (dhatu / prAtipadika)
2. pratyaya addition
2.a strIpratyaya
2.b samAsa
3. prakRti / pratyaya transformations
4. aNga transformations
5. prakRti + pratyaya sandhi
6. pada sandhi
@author: Karthik Madathil (github: @kmadathil)
"""
| 15.809524 | 46 | 0.71988 |
8c9567c23bbf35dd4975851e8748ddb2077697ad
| 2,554 |
py
|
Python
|
project/tools/wrapper/__main__.py
|
GaetanLongree/MASI-TFE
|
f632e80d4e9772216821f9b00a3374e1c3903cb5
|
[
"MIT"
] | null | null | null |
project/tools/wrapper/__main__.py
|
GaetanLongree/MASI-TFE
|
f632e80d4e9772216821f9b00a3374e1c3903cb5
|
[
"MIT"
] | null | null | null |
project/tools/wrapper/__main__.py
|
GaetanLongree/MASI-TFE
|
f632e80d4e9772216821f9b00a3374e1c3903cb5
|
[
"MIT"
] | null | null | null |
import getopt
import sys
import traceback
from . import debug, setup, cleanup, execution, runtime_info, workload_manager, module_handler, api_communicator
def main(argv):
try:
opts, args = getopt.getopt(argv, "i:")
except getopt.GetoptError:
debug.log("Error in the command parameters")
sys.exit(2)
input_file = None
for opt, arg in opts:
if opt == '-i':
input_file = arg
if input_file is not None:
try:
# Setup stage
setup.run(input_file)
# Module handling setup
handler = module_handler.ModuleHandler()
handler.from_dict(runtime_info.modules)
# modules execution
if runtime_info.modules is not None \
and 'preprocessing' in runtime_info.modules:
try:
preprocessing_output = handler.run('preprocessing', runtime_info.user_input)
runtime_info.__update_input__(preprocessing_output['input'])
runtime_info.__update_modules__(preprocessing_output['modules'])
except Exception as err_msg:
debug.log(err_msg)
api_communicator.notify_client(err_msg)
# Job Execution
execution.run()
# Wait for slurm to finish running the job
terminated_successfully = execution.wait()
if terminated_successfully \
and runtime_info.modules is not None \
and 'postprocessing' in runtime_info.modules:
# Postprocessing modules execution
try:
postprocessing_output = handler.run('postprocessing', runtime_info.user_input)
runtime_info.__update_input__(postprocessing_output['input'])
runtime_info.__update_modules__(postprocessing_output['modules'])
except Exception as err_msg:
debug.log(err_msg)
api_communicator.notify_client(err_msg)
# Last Status update of the job
api_communicator.update_job_status()
# Cleanup of local files
cleanup.run(runtime_info.get_all())
except BaseException as error:
# api_communicator.notify_client("Error caught: {}".format(error))
debug.log("Error caught: {}".format(error))
debug.log("Traceback : {}".format(traceback.format_exc()))
if __name__ == '__main__':
main(sys.argv[1:])
| 36.485714 | 112 | 0.59397 |
786b85b71bd8a8364dbc2d9f954d8be87afb261e
| 852 |
py
|
Python
|
microrpc/server.py
|
jasonjohnson/microrpc
|
96bd3836c43a7fec5bf71b773d66f0f019d5f675
|
[
"MIT"
] | null | null | null |
microrpc/server.py
|
jasonjohnson/microrpc
|
96bd3836c43a7fec5bf71b773d66f0f019d5f675
|
[
"MIT"
] | null | null | null |
microrpc/server.py
|
jasonjohnson/microrpc
|
96bd3836c43a7fec5bf71b773d66f0f019d5f675
|
[
"MIT"
] | null | null | null |
import pickle
from socketserver import BaseRequestHandler, TCPServer, ThreadingMixIn
class Handler(BaseRequestHandler):
def handle(self):
while True:
buff = bytearray()
while not buff.endswith(b'.'):
buff += self.request.recv(256)
method, args, kwargs = pickle.loads(buff)
try:
result = self.server.methods[method](*args, **kwargs)
except Exception as e:
result = e
self.request.sendall(pickle.dumps(result))
class Server(ThreadingMixIn, TCPServer):
def __init__(self, host='0.0.0.0', port=9090):
super().__init__((host, port), Handler)
self.methods = {}
def rpc(self, func):
self.methods[func.__name__] = func
return func
def run(self):
self.serve_forever()
| 25.818182 | 70 | 0.584507 |
c3c1b93d1243f1ded99ea5049be502ae2ecc46f0
| 4,693 |
py
|
Python
|
.history/awards/settings_20201129191246.py
|
Nyash-Mauro/awwards
|
aa8200a4d2a26a9d022f0ebda280379c1b3761c3
|
[
"MIT"
] | null | null | null |
.history/awards/settings_20201129191246.py
|
Nyash-Mauro/awwards
|
aa8200a4d2a26a9d022f0ebda280379c1b3761c3
|
[
"MIT"
] | null | null | null |
.history/awards/settings_20201129191246.py
|
Nyash-Mauro/awwards
|
aa8200a4d2a26a9d022f0ebda280379c1b3761c3
|
[
"MIT"
] | null | null | null |
"""
Django settings for awards project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
import django_heroku
import dj_database_url
from decouple import config, Csv
from pathlib import Path
import cloudinary
import cloudinary.uploader
import cloudinary.api
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i2@2%*mlx=(oq$t1y*uu#9p=!ag@x=e@i%y@vdg!qct8nz054u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'award',
'corsheaders',
'bootstrap4',
'tinymce',
'star_ratings',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'awards.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'awards.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
cloudinary.config(
cloud_name="dapwcit3i",
api_key="917726294659896",
api_secret="PeLRKhRoA2E-r-5ykRTpuEBNcH4"
)
MODE = config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE') == "dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
django_heroku.settings(locals())
STAR_RATINGS_RERATE = False
STAR_RATINGS_RANGE = 10
STAR_RATINGS_STAR_HEIGHT = 20
STAR_RATINGS_STAR_WIDTH = 20
| 25.644809 | 91 | 0.691242 |
9c3929081e0b239e8030339b99d88fd9478999ca
| 3,117 |
py
|
Python
|
mars/services/lifecycle/api/web.py
|
haijohn/mars
|
672b3a33a70565f01b1a3f508908445491d85acf
|
[
"Apache-2.0"
] | 1 |
2021-06-10T02:43:01.000Z
|
2021-06-10T02:43:01.000Z
|
mars/services/lifecycle/api/web.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
mars/services/lifecycle/api/web.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from ....lib.aio import alru_cache
from ....utils import serialize_serializable, deserialize_serializable
from ...web import web_api, MarsServiceWebAPIHandler, MarsWebAPIClientMixin
from .core import AbstractLifecycleAPI
class LifecycleWebAPIHandler(MarsServiceWebAPIHandler):
_root_pattern = '/api/session/(?P<session_id>[^/]+)/lifecycle'
@alru_cache(cache_exceptions=False)
async def _get_cluster_api(self):
from ...cluster import ClusterAPI
return await ClusterAPI.create(self._supervisor_addr)
@alru_cache(cache_exceptions=False)
async def _get_oscar_lifecycle_api(self, session_id: str):
from .oscar import LifecycleAPI
cluster_api = await self._get_cluster_api()
[address] = await cluster_api.get_supervisors_by_keys([session_id])
return await LifecycleAPI.create(session_id, address)
@web_api('', method='post', arg_filter={'action': 'decref_tileables'})
async def decref_tileables(self, session_id: str):
tileable_keys = self.get_argument('tileable_keys').split(',')
oscar_api = await self._get_oscar_lifecycle_api(session_id)
await oscar_api.decref_tileables(tileable_keys)
@web_api('', method='get', arg_filter={'action': 'get_all_chunk_ref_counts'})
async def get_all_chunk_ref_counts(self, session_id: str):
oscar_api = await self._get_oscar_lifecycle_api(session_id)
res = await oscar_api.get_all_chunk_ref_counts()
self.write(serialize_serializable(res))
web_handlers = {
LifecycleWebAPIHandler.get_root_pattern(): LifecycleWebAPIHandler
}
class WebLifecycleAPI(AbstractLifecycleAPI, MarsWebAPIClientMixin):
def __init__(self, session_id: str, address: str):
self._session_id = session_id
self._address = address.rstrip('/')
async def decref_tileables(self, tileable_keys: List[str]):
path = f'{self._address}/api/session/{self._session_id}/lifecycle' \
f'?action=decref_tileables'
await self._request_url(
path, method='POST',
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body='tileable_keys=' + ','.join(tileable_keys)
)
async def get_all_chunk_ref_counts(self) -> Dict[str, int]:
path = f'{self._address}/api/session/{self._session_id}/lifecycle' \
f'?action=get_all_chunk_ref_counts'
res = await self._request_url(path)
return deserialize_serializable(res.body)
| 41.013158 | 81 | 0.72281 |
1dbd6d4ba9850c6c888e563cf8d747312616d463
| 10,009 |
py
|
Python
|
pyion2json/test_pyion2json.py
|
crouchcd/pyion2json
|
20d84c24052a877aa81f473a57cc9360adbabc15
|
[
"MIT"
] | 3 |
2020-05-01T20:22:16.000Z
|
2020-12-09T14:03:56.000Z
|
pyion2json/test_pyion2json.py
|
crouchcd/pyion2json
|
20d84c24052a877aa81f473a57cc9360adbabc15
|
[
"MIT"
] | null | null | null |
pyion2json/test_pyion2json.py
|
crouchcd/pyion2json
|
20d84c24052a877aa81f473a57cc9360adbabc15
|
[
"MIT"
] | null | null | null |
import unittest
import json
import amazon.ion.simpleion as ion
from pyion2json import ion_cursor_to_json
_ION_NULLS = """[
null,
null.null, // Identical to unadorned null
null.bool,
null.int,
null.float,
null.decimal,
null.timestamp,
null.string,
null.symbol,
null.blob,
null.clob,
null.struct,
null.list,
null.sexp
]"""
_JSON_NULLS = json.dumps([
None
] * 14)
_ION_BOOLS = """[
true,
false
]"""
_JSON_BOOLS = json.dumps([
True,
False
])
_ION_INTS = """[
0, // Zero. Surprise!
-0, // ...the same value with a minus sign
123, // A normal int
-123, // Another negative int
0xBeef, // An int denoted in hexadecimal
0b0101, // An int denoted in binary
1_2_3, // An int with underscores
0xFA_CE, // An int denoted in hexadecimal with underscores
0b10_10_10, // An int denoted in binary with underscores
]"""
_JSON_INTS = json.dumps([
0,
-0,
123,
-123,
0xBeef,
0b0101,
123,
0xFACE,
0b101010
])
_ION_DECIMALS = """[
0.123,
-0.12d4,
0D0,
0.,
-0d0,
-0.,
-0d-1,
123_456.789_012
]"""
_JSON_DECIMALS = json.dumps([
0.123,
-0.12e4,
0e0,
0.,
-0e0,
-0.,
-0e-1,
123456.789012
])
_ION_TIMESTAMPS = """[
2007-02-23T12:14Z, // Seconds are optional, but local offset is not
2007-02-23T12:14:33.079-08:00, // A timestamp with millisecond precision and PST local time
2007-02-23T20:14:33.079Z, // The same instant in UTC ("zero" or "zulu")
2007-02-23T20:14:33.079+00:00, // The same instant, with explicit local offset
2007-02-23T20:14:33.079-00:00, // The same instant, with unknown local offset
2007-01-01T00:00-00:00, // Happy New Year in UTC, unknown local offset
2007-01-01, // The same instant, with days precision, unknown local offset
2007-01-01T, // The same value, different syntax.
2007-01T, // The same instant, with months precision, unknown local offset
2007T, // The same instant, with years precision, unknown local offset
2007-02-23, // A day, unknown local offset
2007-02-23T00:00Z, // The same instant, but more precise and in UTC
2007-02-23T00:00+00:00, // An equivalent format for the same value
2007-02-23T00:00:00-00:00
]"""
_JSON_TIMESTAMPS = json.dumps([
'2007-02-23 12:14:00+00:00',
'2007-02-23 12:14:33.079000-08:00',
'2007-02-23 20:14:33.079000+00:00',
'2007-02-23 20:14:33.079000+00:00',
'2007-02-23 20:14:33.079000',
'2007-01-01 00:00:00',
'2007-01-01 00:00:00',
'2007-01-01 00:00:00',
'2007-01-01 00:00:00',
'2007-01-01 00:00:00',
'2007-02-23 00:00:00',
'2007-02-23 00:00:00+00:00',
'2007-02-23 00:00:00+00:00',
'2007-02-23 00:00:00'
])
_ION_STRINGS = """[
"", // An empty string value
" my string ", // A normal string
"\\"", // Contains one double-quote character
"\uABCD", // Contains one unicode character
xml::"<e a='v'>c</e>", // String with type annotation 'xml'
( '''hello ''' // Sexp with one element
'''world!''' ),
("hello world!"), // The exact same sexp value
// This Ion value is a string containing three newlines. The serialized
// form's first newline is escaped into nothingness.
'''\
The first line of the string.
This is the second line of the string,
and this is the third line.
'''
]"""
_JSON_STRINGS = json.dumps([
'',
' my string ',
'"',
'\uABCD',
'<e a=\'v\'>c</e>',
['hello world!'],
['hello world!'],
'''\
The first line of the string.
This is the second line of the string,
and this is the third line.
'''
])
_ION_SYMBOLS = """[
'myVar2', // A symbol
myVar2, // The same symbol
myvar2, // A different symbol
'hi ho', // Symbol requiring quotes
'\\'ahoy\\'', // A symbol with embedded quotes
'' // The empty symbol
]"""
_JSON_SYMBOLS = json.dumps([
'myVar2',
'myVar2',
'myvar2',
'hi ho',
'\'ahoy\'',
''
])
_ION_BLOBS = """[
// A valid blob value with zero padding characters.
{{
+AB/
}},
// A valid blob value with one required padding character.
{{ VG8gaW5maW5pdHkuLi4gYW5kIGJleW9uZCE= }},
// A valid blob value with two required padding characters.
{{ dHdvIHBhZGRpbmcgY2hhcmFjdGVycw== }}
]"""
_JSON_BLOBS = json.dumps([
'+AB/',
'VG8gaW5maW5pdHkuLi4gYW5kIGJleW9uZCE=',
'dHdvIHBhZGRpbmcgY2hhcmFjdGVycw=='
])
_ION_CLOBS = """[
{{ "This is a CLOB of text." }},
shift_jis ::
{{
'''Another clob with user-defined encoding, '''
'''this time on multiple lines.'''
}}
]"""
_JSON_CLOBS = json.dumps([
'This is a CLOB of text.',
'Another clob with user-defined encoding, this time on multiple lines.'
])
_ION_STRUCTS = """[
{ }, // An empty struct value
{ first : "Tom" , last: "Riddle" }, // Structure with two fields
{"first":"Tom","last":"Riddle"}, // The same value with confusing style
{center:{x:1.0, y:12.5}, radius:3}, // Nested struct
{ x:1, }, // Trailing comma is legal in Ion (unlike JSON)
{ "":42 }, // A struct value containing a field with an empty name
{ field_name: annotation:: value }
]"""
_JSON_STRUCTS = json.dumps([
{},
{'first': 'Tom', 'last': 'Riddle'},
{'first': 'Tom', 'last': 'Riddle'},
{'center': {'x': 1.0, 'y': 12.5}, 'radius': 3},
{'x': 1, },
{'': 42},
{'field_name': 'value'}
])
_ION_LISTS = """[
[], // An empty list value
[1, 2, 3], // List of three ints
[ 1 , two ], // List of an int and a symbol
[a , [b]] // Nested list
]"""
_JSON_LISTS = json.dumps([
[],
[1, 2, 3],
[1, 'two'],
['a', ['b']],
])
_ION_SEXPS = """[
(), // An empty expression value
(cons 1 2), // S-expression of three values
([hello][there]), // S-expression containing two lists
(a+-b),
( 'a' '+-' 'b' ),
(a.b;),
( 'a' '.' 'b' ';')
]"""
_JSON_SEXPS = json.dumps([
[],
['cons', 1, 2],
[['hello'], ['there']],
['a', '+-', 'b'],
['a', '+-', 'b'],
['a', '.', 'b', ';'],
['a', '.', 'b', ';']
])
class TestPyIonToJson(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.maxDiff = None
def test_ion_null(self):
"""Check that Ion `null` types convert to JSON `null`
"""
ion_cursor = ion.loads(_ION_NULLS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_NULLS)
def test_ion_bool(self):
"""Check that Ion `bool` types convert to JSON `bool`
"""
ion_cursor = ion.loads(_ION_BOOLS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_BOOLS)
def test_ion_int(self):
"""Check that Ion `int` types convert to JSON `number`
"""
ion_cursor = ion.loads(_ION_INTS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_INTS)
def test_ion_decimal(self):
"""Check that Ion `decimal` types convert to JSON `number`
"""
ion_cursor = ion.loads(_ION_DECIMALS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_DECIMALS)
def test_ion_timestamp(self):
"""Check that Ion `timestamp` types convert to JSON `string`
"""
ion_cursor = ion.loads(_ION_TIMESTAMPS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_TIMESTAMPS)
def test_ion_string(self):
"""Check that Ion `string` types convert to JSON `string`
"""
ion_cursor = ion.loads(_ION_STRINGS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_STRINGS)
def test_ion_symbol(self):
"""Check that Ion `symbol` types convert to JSON `string`
"""
ion_cursor = ion.loads(_ION_SYMBOLS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_SYMBOLS)
def test_ion_blob(self):
"""Check that Ion `blob` types convert to JSON `string`
"""
ion_cursor = ion.loads(_ION_BLOBS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_BLOBS)
def test_ion_clob(self):
"""Check that Ion `clob` types convert to JSON `string`
"""
ion_cursor = ion.loads(_ION_CLOBS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_CLOBS)
def test_ion_struct(self):
"""Check that Ion `struct` types convert to JSON `object`
"""
ion_cursor = ion.loads(_ION_STRUCTS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_STRUCTS)
def test_ion_list(self):
"""Check that Ion `list` types convert to JSON `array`
"""
ion_cursor = ion.loads(_ION_LISTS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_LISTS)
def test_ion_sexp(self):
"""Check that Ion `sexp` types convert to JSON `array`
"""
ion_cursor = ion.loads(_ION_SEXPS)
json_rows = ion_cursor_to_json(ion_cursor)
self.assertEqual(json.dumps(json_rows), _JSON_SEXPS)
if __name__ == '__main__':
unittest.main()
| 29.438235 | 102 | 0.570187 |
9dff15a20ac27b9469cf8409889d721dd36f7686
| 534 |
py
|
Python
|
event/migrations/0017_auto_20200814_2151.py
|
Aleccc/gtcrew
|
7e6e7024afdbf48ee796cb1f9a86b913e6843dda
|
[
"MIT"
] | null | null | null |
event/migrations/0017_auto_20200814_2151.py
|
Aleccc/gtcrew
|
7e6e7024afdbf48ee796cb1f9a86b913e6843dda
|
[
"MIT"
] | 21 |
2019-02-14T02:47:34.000Z
|
2022-01-23T02:22:54.000Z
|
event/migrations/0017_auto_20200814_2151.py
|
Aleccc/gtcrew
|
7e6e7024afdbf48ee796cb1f9a86b913e6843dda
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-08-15 01:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('team', '0017_auto_20200713_2157'),
('wagtailcore', '0052_pagelogentry'),
('event', '0016_auto_20200814_2141'),
]
operations = [
migrations.RenameModel(
old_name='RacerInline',
new_name='Racer',
),
migrations.RenameModel(
old_name='ResultInline',
new_name='Result',
),
]
| 22.25 | 47 | 0.576779 |
d1103e9f056445e64f726b4e069e026f211347c3
| 6,122 |
py
|
Python
|
ppr-api/src/ppr_api/models/test_search.py
|
cameron-freshworks/ppr
|
01d6f5d300c791aebad5e58bb4601e9be2ccfc46
|
[
"Apache-2.0"
] | null | null | null |
ppr-api/src/ppr_api/models/test_search.py
|
cameron-freshworks/ppr
|
01d6f5d300c791aebad5e58bb4601e9be2ccfc46
|
[
"Apache-2.0"
] | 2 |
2021-01-25T22:06:58.000Z
|
2021-01-25T22:07:01.000Z
|
ppr-api/src/ppr_api/models/test_search.py
|
cameron-freshworks/ppr
|
01d6f5d300c791aebad5e58bb4601e9be2ccfc46
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module holds model data and database operations for test searches."""
from __future__ import annotations
from typing import List
from .db import db
from .test_search_result import TestSearchResult
class TestSearch(db.Model):
"""This class maintains test searches detail information (for automated testing)."""
__tablename__ = 'test_searches'
id = db.Column('id', db.Integer, db.Sequence('test_searches_id_seq'), primary_key=True)
search_criteria = db.Column('search_criteria', db.Text, nullable=False)
run_time = db.Column('run_time', db.Float, nullable=False)
# parent keys
batch_id = db.Column('batch_id', db.Integer, db.ForeignKey('test_search_batches.id'), nullable=False, index=True)
# relationships - test_search_batch
search_batch = db.relationship('TestSearchBatch', foreign_keys=[batch_id],
back_populates='searches', cascade='all, delete', uselist=False)
# relationships - test_search_results
results = db.relationship('TestSearchResult', back_populates='search')
@property
def json(self) -> dict:
"""Return the search as a json object."""
search = {
'criteria': self.search_criteria,
'matchesExact': {
'avgIndexDiff': self.avg_index_diff(TestSearchResult.MatchType.EXACT.value),
'firstFailIndex': self.fail_index(TestSearchResult.MatchType.EXACT.value),
'missedMatches': self.missed_matches(TestSearchResult.MatchType.EXACT.value),
'resultsApi': self.get_results(
TestSearchResult.MatchType.EXACT.value, TestSearchResult.Source.API.value),
'resultsLegacy': self.get_results(
TestSearchResult.MatchType.EXACT.value, TestSearchResult.Source.LEGACY.value)
},
'matchesSimilar': {
'avgIndexDiff': self.avg_index_diff(TestSearchResult.MatchType.SIMILAR.value),
'firstFailIndex': self.fail_index(TestSearchResult.MatchType.SIMILAR.value),
'missedMatches': self.missed_matches(TestSearchResult.MatchType.SIMILAR.value),
'resultsApi': self.get_results(
TestSearchResult.MatchType.SIMILAR.value, TestSearchResult.Source.API.value),
'resultsLegacy': self.get_results(
TestSearchResult.MatchType.SIMILAR.value, TestSearchResult.Source.LEGACY.value)
},
'runTime': self.run_time,
}
search['matchesExact']['passed'] = (
len(search['matchesExact']['missedMatches']) == 0 and
search['matchesExact']['firstFailIndex'] == -1
)
search['matchesSimilar']['passed'] = (
len(search['matchesSimilar']['missedMatches']) == 0 and
search['matchesSimilar']['firstFailIndex'] == -1
)
return search
def avg_index_diff(self, match_type) -> float:
"""Return the average index diff between api/legacy results. Excludes missed results."""
api_results = self.get_results(match_type, TestSearchResult.Source.API.value)
total_diff = 0
total_paired_results = 0
for result in api_results:
if result['pairedIndex'] != -1:
total_diff += abs(result['index'] - result['pairedIndex'])
total_paired_results += 1
return (total_diff / total_paired_results) if total_paired_results else 0
def fail_index(self, match_type) -> int:
"""Return the first index that diffs between api/legacy results. Includes missed results."""
api_results = self.get_results(match_type, TestSearchResult.Source.API.value)
for result in api_results:
if result['pairedIndex'] != result['index']:
return result['index']
return -1
def get_results(self, match_type, source) -> List[dict]:
"""Return results list of this search with given match type and source."""
results = db.session.query(TestSearchResult).filter(
TestSearchResult.search_id == self.id,
TestSearchResult.match_type == match_type,
TestSearchResult.source == source
).order_by(TestSearchResult.index.asc())
results_json = []
for result in results:
results_json.append(result.json)
return results_json
def missed_matches(self, match_type) -> list:
"""Return the missed matches for the given match type."""
missed = []
for result in self.get_results(match_type, TestSearchResult.Source.LEGACY.value):
if result['pairedIndex'] == -1:
missed.append(result)
return missed
def save(self):
"""Render a search to the local cache."""
db.session.add(self)
db.session.commit()
@classmethod
def find_by_id(cls, search_id: int = None) -> TestSearch:
"""Return a search object by search ID."""
search = None
if search_id:
search = db.session.query(TestSearch).\
filter(TestSearch.id == search_id).one_or_none()
return search
@classmethod
def find_all_by_batch_id(cls, batch_id: int = None) -> List[TestSearch]:
"""Return a list of search objects by batch ID."""
searches = []
if batch_id:
searches = db.session.query(TestSearch).\
filter(TestSearch.batch_id == batch_id).all()
return searches
| 42.513889 | 117 | 0.645541 |
cf52179226a6fc57df431857bfc47f5c4335a01b
| 20,066 |
py
|
Python
|
custominstall.py
|
JustSch/custom-install
|
9ffc854bfce41f05b80a8dc85dd8636d0939bd57
|
[
"MIT"
] | null | null | null |
custominstall.py
|
JustSch/custom-install
|
9ffc854bfce41f05b80a8dc85dd8636d0939bd57
|
[
"MIT"
] | null | null | null |
custominstall.py
|
JustSch/custom-install
|
9ffc854bfce41f05b80a8dc85dd8636d0939bd57
|
[
"MIT"
] | null | null | null |
# This file is a part of custom-install.py.
#
# custom-install is copyright (c) 2019-2020 Ian Burgwin
# This file is licensed under The MIT License (MIT).
# You can find the full license text in LICENSE.md in the root of this project.
from argparse import ArgumentParser
from os import makedirs, scandir
from os.path import dirname, join
from random import randint
from hashlib import sha256
from sys import platform
from tempfile import TemporaryDirectory
from typing import BinaryIO, TYPE_CHECKING
import subprocess
if TYPE_CHECKING:
from os import PathLike
from typing import Union
from events import Events
from pyctr.crypto import CryptoEngine, Keyslot, load_seeddb
from pyctr.type.cia import CIAReader, CIASection
from pyctr.type.ncch import NCCHSection
from pyctr.util import roundup
# used to run the save3ds_fuse binary next to the script
script_dir: str = dirname(__file__)
# missing contents are replaced with 0xFFFFFFFF in the cmd file
CMD_MISSING = b'\xff\xff\xff\xff'
# the size of each file and directory in a title's contents are rounded up to this
TITLE_ALIGN_SIZE = 0x8000
# size to read at a time when copying files
READ_SIZE = 0x200000
# version for cifinish.bin
CIFINISH_VERSION = 3
# Placeholder for SDPathErrors
class SDPathError(Exception):
pass
class InvalidCIFinishError(Exception):
pass
def load_cifinish(path: 'Union[PathLike, bytes, str]'):
try:
with open(path, 'rb') as f:
header = f.read(0x10)
if header[0:8] != b'CIFINISH':
raise InvalidCIFinishError('CIFINISH magic not found')
version = int.from_bytes(header[0x8:0xC], 'little')
count = int.from_bytes(header[0xC:0x10], 'little')
data = {}
for _ in range(count):
if version == 1:
# ignoring the titlekey and common key index, since it's not useful in this scenario
raw_entry = f.read(0x30)
if len(raw_entry) != 0x30:
raise InvalidCIFinishError(f'title entry is not 0x30 (version {version})')
title_magic = raw_entry[0xA:0x10]
title_id = int.from_bytes(raw_entry[0:8], 'little')
has_seed = raw_entry[0x9]
seed = raw_entry[0x20:0x30]
elif version == 2:
# this is assuming the "wrong" version created by an earlier version of this script
# there wasn't a version of custom-install-finalize that really accepted this version
raw_entry = f.read(0x20)
if len(raw_entry) != 0x20:
raise InvalidCIFinishError(f'title entry is not 0x20 (version {version})')
title_magic = raw_entry[0:6]
title_id = int.from_bytes(raw_entry[0x6:0xE], 'little')
has_seed = raw_entry[0xE]
seed = raw_entry[0x10:0x20]
elif version == 3:
raw_entry = f.read(0x20)
if len(raw_entry) != 0x20:
raise InvalidCIFinishError(f'title entry is not 0x20 (version {version})')
title_magic = raw_entry[0:6]
title_id = int.from_bytes(raw_entry[0x8:0x10], 'little')
has_seed = raw_entry[0x6]
seed = raw_entry[0x10:0x20]
else:
raise InvalidCIFinishError(f'unknown version {version}')
if title_magic == b'TITLE\0':
data[title_id] = {'seed': seed if has_seed else None}
return data
except FileNotFoundError:
# allow the caller to easily create a new database in the same place where an existing one would be updated
return {}
def save_cifinish(path: 'Union[PathLike, bytes, str]', data: dict):
with open(path, 'wb') as out:
entries = sorted(data.items())
out.write(b'CIFINISH')
out.write(CIFINISH_VERSION.to_bytes(4, 'little'))
out.write(len(entries).to_bytes(4, 'little'))
for tid, data in entries:
finalize_entry_data = [
# magic
b'TITLE\0',
# has seed
bool(data['seed']).to_bytes(1, 'little'),
# padding
b'\0',
# title id
tid.to_bytes(8, 'little'),
# seed, if needed
(data['seed'] if data['seed'] else (b'\0' * 0x10))
]
out.write(b''.join(finalize_entry_data))
class CustomInstall:
cia: CIAReader
def __init__(self, boot9, seeddb, movable, cias, sd, cifinish_out=None, skip_contents=False):
self.event = Events()
self.log_lines = [] # Stores all info messages for user to view
self.crypto = CryptoEngine(boot9=boot9)
self.crypto.setup_sd_key_from_file(movable)
self.seeddb = seeddb
self.cias = cias
self.sd = sd
self.skip_contents = skip_contents
self.cifinish_out = cifinish_out
self.movable = movable
def copy_with_progress(self, src: BinaryIO, dst: BinaryIO, size: int, path: str):
left = size
cipher = self.crypto.create_ctr_cipher(Keyslot.SD, self.crypto.sd_path_to_iv(path))
while left > 0:
to_read = min(READ_SIZE, left)
data = cipher.encrypt(src.read(READ_SIZE))
dst.write(data)
left -= to_read
total_read = size - left
self.event.update_percentage((total_read / size) * 100, total_read / 1048576, size / 1048576)
def start(self):
crypto = self.crypto
# TODO: Move a lot of these into their own methods
self.log("Finding path to install to...")
[sd_path, id1s] = self.get_sd_path()
if len(id1s) > 1:
raise SDPathError(f'There are multiple id1 directories for id0 {crypto.id0.hex()}, '
f'please remove extra directories')
elif len(id1s) == 0:
raise SDPathError(f'Could not find a suitable id1 directory for id0 {crypto.id0.hex()}')
if self.cifinish_out:
cifinish_path = self.cifinish_out
else:
cifinish_path = join(self.sd, 'cifinish.bin')
sd_path = join(sd_path, id1s[0])
title_info_entries = {}
cifinish_data = load_cifinish(cifinish_path)
load_seeddb(self.seeddb)
# Now loop through all provided cia files
for c in self.cias:
self.log('Reading ' + c)
try:
cia = CIAReader(c)
except Exception as e:
self.log(f'Failed to load file: {type(e).__name__}: {e}')
continue
self.cia = cia
tid_parts = (cia.tmd.title_id[0:8], cia.tmd.title_id[8:16])
try:
self.log(f'Installing {cia.contents[0].exefs.icon.get_app_title().short_desc}...')
except:
self.log('Installing...')
sizes = [1] * 5
if cia.tmd.save_size:
# one for the data directory, one for the 00000001.sav file
sizes.extend((1, cia.tmd.save_size))
for record in cia.content_info:
sizes.append(record.size)
# this calculates the size to put in the Title Info Entry
title_size = sum(roundup(x, TITLE_ALIGN_SIZE) for x in sizes)
# checks if this is dlc, which has some differences
is_dlc = tid_parts[0] == '0004008c'
# this checks if it has a manual (index 1) and is not DLC
has_manual = (not is_dlc) and (1 in cia.contents)
# this gets the extdata id from the extheader, stored in the storage info area
try:
with cia.contents[0].open_raw_section(NCCHSection.ExtendedHeader) as e:
e.seek(0x200 + 0x30)
extdata_id = e.read(8)
except KeyError:
# not an executable title
extdata_id = b'\0' * 8
# cmd content id, starts with 1 for non-dlc contents
cmd_id = len(cia.content_info) if is_dlc else 1
cmd_filename = f'{cmd_id:08x}.cmd'
# get the title root where all the contents will be
title_root = join(sd_path, 'title', *tid_parts)
content_root = join(title_root, 'content')
# generate the path used for the IV
title_root_cmd = f'/title/{"/".join(tid_parts)}'
content_root_cmd = title_root_cmd + '/content'
if not self.skip_contents:
makedirs(join(content_root, 'cmd'), exist_ok=True)
if cia.tmd.save_size:
makedirs(join(title_root, 'data'), exist_ok=True)
if is_dlc:
# create the separate directories for every 256 contents
for x in range(((len(cia.content_info) - 1) // 256) + 1):
makedirs(join(content_root, f'{x:08x}'), exist_ok=True)
# maybe this will be changed in the future
tmd_id = 0
tmd_filename = f'{tmd_id:08x}.tmd'
# write the tmd
enc_path = content_root_cmd + '/' + tmd_filename
self.log(f'Writing {enc_path}...')
with cia.open_raw_section(CIASection.TitleMetadata) as s:
with open(join(content_root, tmd_filename), 'wb') as o:
self.copy_with_progress(s, o, cia.sections[CIASection.TitleMetadata].size, enc_path)
# write each content
for co in cia.content_info:
content_filename = co.id + '.app'
if is_dlc:
dir_index = format((co.cindex // 256), '08x')
enc_path = content_root_cmd + f'/{dir_index}/{content_filename}'
out_path = join(content_root, dir_index, content_filename)
else:
enc_path = content_root_cmd + '/' + content_filename
out_path = join(content_root, content_filename)
self.log(f'Writing {enc_path}...')
with cia.open_raw_section(co.cindex) as s, open(out_path, 'wb') as o:
self.copy_with_progress(s, o, co.size, enc_path)
# generate a blank save
if cia.tmd.save_size:
enc_path = title_root_cmd + '/data/00000001.sav'
out_path = join(title_root, 'data', '00000001.sav')
cipher = crypto.create_ctr_cipher(Keyslot.SD, crypto.sd_path_to_iv(enc_path))
# in a new save, the first 0x20 are all 00s. the rest can be random
data = cipher.encrypt(b'\0' * 0x20)
self.log(f'Generating blank save at {enc_path}...')
with open(out_path, 'wb') as o:
o.write(data)
o.write(b'\0' * (cia.tmd.save_size - 0x20))
# generate and write cmd
enc_path = content_root_cmd + '/cmd/' + cmd_filename
out_path = join(content_root, 'cmd', cmd_filename)
self.log(f'Generating {enc_path}')
highest_index = 0
content_ids = {}
for record in cia.content_info:
highest_index = record.cindex
with cia.open_raw_section(record.cindex) as s:
s.seek(0x100)
cmac_data = s.read(0x100)
id_bytes = bytes.fromhex(record.id)[::-1]
cmac_data += record.cindex.to_bytes(4, 'little') + id_bytes
cmac_ncch = crypto.create_cmac_object(Keyslot.CMACSDNAND)
cmac_ncch.update(sha256(cmac_data).digest())
content_ids[record.cindex] = (id_bytes, cmac_ncch.digest())
# add content IDs up to the last one
ids_by_index = [CMD_MISSING] * (highest_index + 1)
installed_ids = []
cmacs = []
for x in range(len(ids_by_index)):
try:
info = content_ids[x]
except KeyError:
# "MISSING CONTENT!"
# The 3DS does generate a cmac for missing contents, but I don't know how it works.
# It doesn't matter anyway, the title seems to be fully functional.
cmacs.append(bytes.fromhex('4D495353494E4720434F4E54454E5421'))
else:
ids_by_index[x] = info[0]
cmacs.append(info[1])
installed_ids.append(info[0])
installed_ids.sort(key=lambda x: int.from_bytes(x, 'little'))
final = (cmd_id.to_bytes(4, 'little')
+ len(ids_by_index).to_bytes(4, 'little')
+ len(installed_ids).to_bytes(4, 'little')
+ (1).to_bytes(4, 'little'))
cmac_cmd_header = crypto.create_cmac_object(Keyslot.CMACSDNAND)
cmac_cmd_header.update(final)
final += cmac_cmd_header.digest()
final += b''.join(ids_by_index)
final += b''.join(installed_ids)
final += b''.join(cmacs)
cipher = crypto.create_ctr_cipher(Keyslot.SD, crypto.sd_path_to_iv(enc_path))
self.log(f'Writing {enc_path}')
with open(out_path, 'wb') as o:
o.write(cipher.encrypt(final))
# this starts building the title info entry
title_info_entry_data = [
# title size
title_size.to_bytes(8, 'little'),
# title type, seems to usually be 0x40
0x40.to_bytes(4, 'little'),
# title version
int(cia.tmd.title_version).to_bytes(2, 'little'),
# ncch version
cia.contents[0].version.to_bytes(2, 'little'),
# flags_0, only checking if there is a manual
(1 if has_manual else 0).to_bytes(4, 'little'),
# tmd content id, always starting with 0
(0).to_bytes(4, 'little'),
# cmd content id
cmd_id.to_bytes(4, 'little'),
# flags_1, only checking save data
(1 if cia.tmd.save_size else 0).to_bytes(4, 'little'),
# extdataid low
extdata_id[0:4],
# reserved
b'\0' * 4,
# flags_2, only using a common value
0x100000000.to_bytes(8, 'little'),
# product code
cia.contents[0].product_code.encode('ascii').ljust(0x10, b'\0'),
# reserved
b'\0' * 0x10,
# unknown
randint(0, 0xFFFFFFFF).to_bytes(4, 'little'),
# reserved
b'\0' * 0x2c
]
title_info_entries[cia.tmd.title_id] = b''.join(title_info_entry_data)
cifinish_data[int(cia.tmd.title_id, 16)] = {'seed': (get_seed(cia.contents[0].program_id) if cia.contents[0].flags.uses_seed else None)}
# This is saved regardless if any titles were installed, so the file can be upgraded just in case.
save_cifinish(cifinish_path, cifinish_data)
if title_info_entries:
with TemporaryDirectory(suffix='-custom-install') as tempdir:
# set up the common arguments for the two times we call save3ds_fuse
save3ds_fuse_common_args = [
join(script_dir, 'bin', platform, 'save3ds_fuse'),
'-b', crypto.b9_path,
'-m', self.movable,
'--sd', self.sd,
'--db', 'sdtitle',
tempdir
]
# extract the title database to add our own entry to
self.log('Extracting Title Database...')
subprocess.run(save3ds_fuse_common_args + ['-x'])
for title_id, entry in title_info_entries.items():
# write the title info entry to the temp directory
with open(join(tempdir, title_id), 'wb') as o:
o.write(entry)
# import the directory, now including our title
self.log('Importing into Title Database...')
subprocess.run(save3ds_fuse_common_args + ['-i'])
self.log('FINAL STEP:\nRun custom-install-finalize through homebrew launcher.')
self.log('This will install a ticket and seed if required.')
else:
self.log('Did not install any titles.', 2)
def get_sd_path(self):
sd_path = join(self.sd, 'Nintendo 3DS', self.crypto.id0.hex())
id1s = []
for d in scandir(sd_path):
if d.is_dir() and len(d.name) == 32:
try:
# check if the name can be converted to hex
# I'm not sure what the 3DS does if there is a folder that is not a 32-char hex string.
bytes.fromhex(d.name)
except ValueError:
continue
else:
id1s.append(d.name)
return [sd_path, id1s]
def log(self, message, mtype=0, errorname=None, end='\n'):
"""Logs an Message with a type. Format is similar to python errors
There are 3 types of errors, indexed accordingly
type 0 = Message
type 1 = Warning
type 2 = Error
optionally, errorname can be a custom name as a string to identify errors easily
"""
if errorname:
errorname += ": "
else:
# No errorname provided
errorname = ""
types = [
"", # Type 0
"Warning: ", # Type 1
"Error: " # Type 2
]
# Example: "Warning: UninformativeError: An error occured, try again.""
msg_with_type = types[mtype] + errorname + str(message)
self.log_lines.append(msg_with_type)
self.event.on_log_msg(msg_with_type, end=end)
return msg_with_type
if __name__ == "__main__":
parser = ArgumentParser(description='Manually install a CIA to the SD card for a Nintendo 3DS system.')
parser.add_argument('cia', help='CIA files', nargs='+')
parser.add_argument('-m', '--movable', help='movable.sed file', required=True)
parser.add_argument('-b', '--boot9', help='boot9 file')
parser.add_argument('-s', '--seeddb', help='seeddb file')
parser.add_argument('--sd', help='path to SD root', required=True)
parser.add_argument('--skip-contents', help="don't add contents, only add title info entry", action='store_true')
parser.add_argument('--cifinish-out', help='path for cifinish.bin file, defaults to (SD root)/cifinish.bin')
args = parser.parse_args()
installer = CustomInstall(boot9=args.boot9,
seeddb=args.seeddb,
cias=args.cia,
movable=args.movable,
sd=args.sd,
cifinish_out=args.cifinish_out,
skip_contents=(args.skip_contents or False))
def log_handle(msg, end='\n'):
print(msg, end=end)
def percent_handle(total_percent, total_read, size):
installer.log(f' {total_percent:>5.1f}% {total_read:>.1f} MiB / {size:.1f} MiB\r', end='')
installer.event.on_log_msg += log_handle
installer.event.update_percentage += percent_handle
installer.start()
| 41.203285 | 148 | 0.547144 |
5e886667e495d30bf864d224193f954c5d8267bd
| 1,145 |
py
|
Python
|
returns/pointfree/cond.py
|
thecoblack/returns
|
ad76d4c5282ce53213cad57dc550e5b4565e2b48
|
[
"BSD-2-Clause"
] | null | null | null |
returns/pointfree/cond.py
|
thecoblack/returns
|
ad76d4c5282ce53213cad57dc550e5b4565e2b48
|
[
"BSD-2-Clause"
] | null | null | null |
returns/pointfree/cond.py
|
thecoblack/returns
|
ad76d4c5282ce53213cad57dc550e5b4565e2b48
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import Callable, Type, TypeVar
from returns.interfaces.specific.result import ResultLikeN
from returns.methods.cond import internal_cond
from returns.primitives.hkt import Kind2, Kinded, kinded
_ValueType = TypeVar('_ValueType')
_ErrorType = TypeVar('_ErrorType')
_ResultKind = TypeVar('_ResultKind', bound=ResultLikeN)
def cond(
container_type: Type[_ResultKind],
success_value: _ValueType,
error_value: _ErrorType,
) -> Kinded[Callable[[bool], Kind2[_ResultKind, _ValueType, _ErrorType]]]:
"""
Help us to reduce the boilerplate when choosing paths with ``ResultLikeN``.
.. code:: python
>>> from returns.pointfree import cond
>>> from returns.result import Failure, Result, Success
>>> assert cond(Result, 'success', 'failure')(True) == Success('success')
>>> assert cond(Result, 'success', 'failure')(False) == Failure('failure')
"""
@kinded
def factory(
is_success: bool,
) -> Kind2[_ResultKind, _ValueType, _ErrorType]:
return internal_cond(
container_type, is_success, success_value, error_value,
)
return factory
| 30.131579 | 80 | 0.69345 |
d3cda9d52e5f426b1c6f178552630f4c1c4408a4
| 114,138 |
py
|
Python
|
PPP/global_variables.py
|
nostrumbiodiscovery/PPP
|
7e5c948e5a5a8fd05f7024f9eb4513e85b6ac54d
|
[
"Apache-2.0"
] | null | null | null |
PPP/global_variables.py
|
nostrumbiodiscovery/PPP
|
7e5c948e5a5a8fd05f7024f9eb4513e85b6ac54d
|
[
"Apache-2.0"
] | null | null | null |
PPP/global_variables.py
|
nostrumbiodiscovery/PPP
|
7e5c948e5a5a8fd05f7024f9eb4513e85b6ac54d
|
[
"Apache-2.0"
] | 1 |
2020-10-01T13:54:07.000Z
|
2020-10-01T13:54:07.000Z
|
default_supported_aminoacids = ["ALA", "ARG", "ASH", "ASN", "ASP", "CYS", "CYT", "GLH",
"GLN", "GLU", "GLY", "HID", "HIE", "HIS", "HIP", "ILE",
"LEU", "LYS", "LYN", "MET", "PHE", "PRO", "PTR", "SER", "TRP",
"THR", "TYR", "VAL", "ACE", "NMA"]
aminoacids_3letter = ['ALA', 'ARG', 'ASH', 'ASN', 'ASP', 'CYS', 'GLH', 'GLN',
'GLU', 'GLY', 'HID', 'HIE', 'HIS', 'HIP', 'ILE', 'LEU',
'LYS', 'LYN', 'MET', 'PHE', 'PRO', 'SER', 'TRP', 'THR',
'TYR', 'VAL']
aminoacids_1letter = ["A", "R", "D", "N", "D", "C", "E", "Q", "E", "G", "H", "H",
"H", "H", "I", "L", "K", "K", "M", "F", "P", "S", "W", "T",
"Y", "V"]
protein_atomnames_dictionary = {
"END": [["H1", "1H", "HN1", "HN1", "HT1"], ["H2", "2H", "HN2", "HN2", "HT2"],
["H3", "3H", "HN3", "H", "HN3", "HT3"], ["OXT", "O1", "OT2", "OXT", "OXT"]],
"ALA": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB1', '1HB', '2HB', 'HB1', 'HB1', 'HB1', 'HB1'], ['HB2', '2HB', 'HB2', 'HB2', 'HB2', 'HB2', 'HB2'],
['HB3', '3HB', 'HB3', 'HB3', 'HB3', 'HB3', 'HB3'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"ARG": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG2', '1HG', 'HG1', 'HG1', 'HG2', '2HG', 'HG2'], ['HG3', '2HG', 'HG2', 'HG2', 'HG1', 'HG1', 'HG3'],
['HD2', '1HD', 'HD1', 'HD1', 'HD2', 'HD2', '2HD'], ['HD3', '2HD', 'HD2', 'HD2', 'HD1', 'HD1', 'HD3'],
['HE', 'HE', 'HNE', 'HE', 'HE', 'HE', 'HE'], ['HH11', '1HH1', 'HN11', 'HH11', 'HH11', 'HH11', 'HH11'],
['HH12', '2HH1', 'HN12', 'HH12', 'HH12', 'HH12', 'HH12'],
['HH21', '1HH2', 'HN21', 'HH21', 'HH21', 'HH21', 'HH21'],
['HH22', '2HH2', 'HN22', 'HH22', 'HH22', 'HH22', 'HH22'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD', 'CD', 'CD', 'CD', 'CD', 'CD', 'CD'],
['CZ', 'CZ', 'CZ', 'CZ', 'CZ', 'CZ', 'CZ'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['NE', 'NE', 'NE', 'NE', 'NE', 'NE', 'NE'], ['NH1', 'NH1', 'NH1', 'NH1', 'NH1', 'NH1', 'NH1'],
['NH2', 'NH2', 'NH2', 'NH2', 'NH2', 'NH2', 'NH2'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"ASH": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD2', 'HD2', 'HD2'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'], ['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'],
['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'], ['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
['OD1', 'OD1', 'OD1', 'OD1', 'OD1', 'OD1', 'OD1'], ['OD2', 'OD2', 'OD2', 'OD2', 'OD2', 'OD2', 'OD2']],
"ASN": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD21', '1HD2', 'HN21', 'HD21', 'HD1', 'HD21', 'HD21'],
['HD22', '2HD2', 'HN22', 'HD22', 'HD2', 'HD22', 'HD22'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['ND2', 'ND2', 'ND2', 'ND2', 'ND2', 'ND2', 'ND2'], ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
['OD1', 'OD1', 'OD1', 'OD1', 'OD1', 'OD1', 'OD1']],
"ASP": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['C', 'C', 'C', 'C', 'C', 'C', 'C'], ['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'],
['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'], ['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
['OD1', 'OD1', 'OD1', 'OD1', 'OD1', 'OD1', 'OD1'], ['OD2', 'OD2', 'OD2', 'OD2', 'OD2', 'OD2', 'OD2']],
"CYS": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG', 'HG', 'HSG', 'HG', 'HG', 'HG', 'HG'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
['SG', 'SG', 'SG', 'SG', 'SG', 'SG', 'SG']],
"CYT": [['H', 'H', 'HN', 'HN', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['C', 'C', 'C', 'C', 'C', 'C', 'C'], ['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'],
['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['O', 'O', 'O', 'O', 'O', 'O', 'O'], ['SG', 'SG', 'SG', 'SG', 'SG', 'SG', 'SG']],
"GLN": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', 'HB1', '2HB', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG2', '1HG', 'HG1', 'HG1', '2HG', 'HG2', 'HG2'], ['HG3', '2HG', 'HG2', 'HG2', 'HG1', 'HG1', 'HG3'],
['HE21', '1HE2', 'HN21', 'HE1', 'HE21', 'HE21', 'HE21'],
['HE22', '2HE2', 'HN22', 'HE2', 'HE22', 'HE22', 'HE22'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD', 'CD', 'CD', 'CD', 'CD', 'CD', 'CD'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['NE2', 'NE2', 'NE2', 'NE2', 'NE2', 'NE2', 'NE2'],
['O', 'O', 'O', 'O', 'O', 'O', 'O'], ['OE1', 'OE1', 'OE1', 'OE1', 'OE1', 'OE1', 'OE1']],
"GLY": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA2', '1HA', '2HA', 'HA1', 'HA2', 'HA2', 'HA1'],
['HA3', '2HA', 'HA2', 'HA2', 'HA1', 'HA1', 'HA2'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"GLU": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG2', '1HG', 'HG1', 'HG1', 'HG2', '2HG', 'HG2'], ['HG3', '2HG', 'HG2', 'HG2', 'HG1', 'HG1', 'HG3'],
['C', 'C', 'C', 'C', 'C', 'C', 'C'], ['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'],
['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'], ['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'],
['CD', 'CD', 'CD', 'CD', 'CD', 'CD', 'CD'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['O', 'O', 'O', 'O', 'O', 'O', 'O'], ['OE1', 'OE1', 'OE1', 'OE1', 'OE1', 'OE1', 'OE1'],
['OE2', 'OE2', 'OE2', 'OE2', 'OE2', 'OE2', 'OE2']],
"GLH": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG2', '1HG', 'HG1', '2HG', 'HG2', 'HG2', 'HG2'], ['HG3', '2HG', 'HG2', 'HG2', 'HG1', 'HG1', 'HG3'],
['HE2', 'HE2', 'HE2'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'], ['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'],
['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'], ['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'],
['CD', 'CD', 'CD', 'CD', 'CD', 'CD', 'CD'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['O', 'O', 'O', 'O', 'O', 'O', 'O'], ['OE1', 'OE1', 'OE1', 'OE1', 'OE1', 'OE1', 'OE1'],
['OE2', 'OE2', 'OE2', 'OE2', 'OE2', 'OE2', 'OE2']],
"HIE": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN1'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2'], ['HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1'],
['HE2', 'HNE2', 'HE2', 'HE2'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2'],
['CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['ND1', 'ND1', 'ND1', 'ND1', 'ND1', 'ND1', 'ND1'], ['NE2', 'NE2', 'NE2', 'NE2', 'NE2', 'NE2', 'NE2'],
['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"HID": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN1'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD1', 'HD1', 'HND1', 'HD1', 'HD1', 'HD1', 'HD1'], ['HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2'],
['HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2'],
['CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['ND1', 'ND1', 'ND1', 'ND1', 'ND1', 'ND1', 'ND1'], ['NE2', 'NE2', 'NE2', 'NE2', 'NE2', 'NE2', 'NE2'],
['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"HIP": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN1'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD1', 'HD1', 'HND1', 'HD1', 'HD1', 'HD1', 'HD1'], ['HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2'],
['HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1'], ['HE2', 'HNE2', 'HE2', 'HE2'],
['C', 'C', 'C', 'C', 'C', 'C', 'C'], ['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'],
['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'], ['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'],
['CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2'], ['CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['ND1', 'ND1', 'ND1', 'ND1', 'ND1', 'ND1', 'ND1'],
['NE2', 'NE2', 'NE2', 'NE2', 'NE2', 'NE2', 'NE2'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"ILE": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB', 'HB', 'HB', 'HB', 'HB3', 'HB', 'HB'], ['HG12', '1HG1', '2HG1', 'HG11', 'HG12', 'HG12', 'HG12'],
['HG13', '2HG1', 'HG12', 'HG12', 'HG11', 'HG11', 'HG13'],
['HG21', '1HG2', 'HG21', 'HG21', 'HG21', 'HG21', 'HG21'],
['HG22', '2HG2', 'HG22', 'HG22', 'HG22', 'HG22', 'HG22'],
['HG23', '3HG2', 'HG23', 'HG23', 'HG23', 'HG23', 'HG23'],
['HD11', '1HD1', 'HD11', 'HD11', 'HD11', 'HD11', 'HD11'],
['HD12', '2HD1', 'HD12', 'HD12', 'HD12', 'HD12', 'HD12'],
['HD13', '3HD1', 'HD13', 'HD13', 'HD13', 'HD13', 'HD13'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG1', 'CG1', 'CG1', 'CG1', 'CG1', 'CG1', 'CG1'], ['CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2'],
['CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"LYN": [['H', 'H', 'HN', 'H2', 'HN', 'H1', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG2', '1HG', 'HG1', 'HG1', 'HG2', 'HG2', 'HG2'], ['HG3', '2HG', 'HG2', 'HG2', 'HG1', 'HG1', 'HG3'],
['HD2', '1HD', 'HD1', 'HD1', 'HD2', 'HD2', 'HD2'], ['HD3', '2HD', 'HD2', 'HD2', 'HD1', 'HD1', 'HD3'],
['HE2', '1HE', 'HE1', 'HE1', 'HE2', 'HE2', 'HE2'], ['HE3', '2HE', 'HE2', 'HE2', 'HE1', 'HE1', 'HE3'],
['HZ1', '1HZ', 'HNZ1', 'HZ1', 'HZ1', 'HZ1', 'H11'], ['HZ2', '2HZ', 'HNZ2', 'HZ2', 'HZ2', 'HZ2', 'H12'],
['C', 'C', 'C', 'C', 'C', 'C', 'C'], ['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'],
['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'], ['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'],
['CD', 'CD', 'CD', 'CD', 'CD', 'CD', 'CD'], ['CE', 'CE', 'CE', 'CE', 'CE', 'CE', 'CE'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['NZ', 'NZ', 'N1', 'NZ', 'NZ', 'NZ', 'NZ'],
['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"LYS": [['H', 'H', 'HN', 'H2', 'HN', 'H1', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG2', '1HG', 'HG1', 'HG1', '2HG', 'HG2', 'HG2'], ['HG3', '2HG', 'HG2', 'HG2', 'HG1', 'HG1', 'HG3'],
['HD2', '1HD', 'HD1', 'HD2', '2HD'], ['HD3', '2HD', 'HD2', 'HD2', 'HD1', 'HD1', 'HD3'],
['HE2', '1HE', 'HE1', '2HE', 'HE2'], ['HE3', '2HE', 'HE2', 'HE2', 'HE1', 'HE1', 'HE3'],
['HZ1', '1HZ', 'HNZ1', 'HZ1', 'HZ1', 'HZ1', 'H11'], ['HZ2', '2HZ', 'HNZ2', 'HZ2', 'HZ2', 'HZ2', 'H12'],
['HZ3', '3HZ', 'HNZ3', 'HZ', 'HZ3', 'HZ3', 'HZ3'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD', 'CD', 'CD', 'CD', 'CD', 'CD', 'CD'],
['CE', 'CE', 'CE', 'CE', 'CE', 'CE', 'CE'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['NZ', 'NZ', 'N1', 'NZ', 'NZ', 'NZ', 'NZ'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"LEU": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG', 'HG', 'HG', 'HG', 'HG', 'HG', 'HG'], ['HD11', '1HD1', 'HD11', 'HD11', 'HD11', 'HD11', 'HD11'],
['HD12', '2HD1', 'HD12', 'HD12', 'HD12', 'HD12', 'HD12'],
['HD13', '3HD1', 'HD13', 'HD13', 'HD13', 'HD13', 'HD13'],
['HD21', '1HD2', 'HD21', 'HD21', 'HD21', 'HD21', 'HD21'],
['HD22', '2HD2', 'HD22', 'HD22', 'HD22', 'HD22', 'HD22'],
['HD23', '3HD2', 'HD23', 'HD23', 'HD23', 'HD23', 'HD23'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1'],
['CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"MET": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG2', '1HG', 'HG1', 'HG1', '2HG', 'HG2', 'HG2'], ['HG3', '2HG', 'HG2', 'HG2', 'HG1', 'HG1', 'HG3'],
['HE1', '1HE', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1'], ['HE2', '2HE', 'HE2', 'HE2', 'HE2', 'HE2', 'HE2'],
['HE3', '3HE', 'HE3', 'HE3', 'HE3', 'HE3', 'HE3'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CE', 'CE', 'CE', 'CE', 'CE', 'CE', 'CE'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
['SD', 'SD', 'SD', 'SD', 'SD', 'SD', 'SD']],
"PHE": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1'], ['HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2'],
['HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1'], ['HE2', 'HE2', 'HE2', 'HE2', 'HE2', 'HE2', 'HE2'],
['HZ', 'HZ', 'HZ', 'HZ', 'HZ', 'HZ', 'HZ'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1'],
['CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2'], ['CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1'],
['CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2'], ['CZ', 'CZ', 'CZ', 'CZ', 'CZ', 'CZ', 'CZ'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"PRO": [['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'], ['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'],
['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'], ['HG2', '1HG', '2HG', 'HG1', 'HG2', 'HG2', 'HG2'],
['HG3', '2HG', 'HG2', 'HG2', 'HG1', 'HG1', 'HG3'], ['HD2', '1HD', '2HD', 'HD1', 'HD2', 'HD2', 'HD2'],
['HD3', '2HD', 'HD1', 'HD2', 'HD1', 'HD1', 'HD3'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD', 'CD', 'CD', 'CD', 'CD', 'CD', 'CD'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"SER": [['H', 'H', 'HN', 'HN', 'H2', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HG', 'HG', 'HOG', 'HG', 'HG', 'HG', 'HG'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
['OG', 'OG', 'OG', 'OG', 'OG', 'OG', 'OG']],
"THR": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB', 'HB', 'HB', 'HB', 'HB1', 'HB3', 'HB'], ['HG1', 'HG1', 'HOG1', 'HG1', 'HG1', 'HG1', 'HG1'],
['HG21', '1HG2', 'HG21', 'HG21', 'HG21', 'HG21', 'HG21'],
['HG22', '2HG2', 'HG22', 'HG22', 'HG22', 'HG22', 'HG22'],
['HG23', '3HG2', 'HG23', 'HG23', 'HG23', 'HG23', 'HG23'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['O', 'O', 'O', 'O', 'O', 'O', 'O'], ['OG1', 'OG1', 'OG1', 'OG1', 'OG1', 'OG1', 'OG1']],
"TRP": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1'], ['HE1', 'HE1', 'HNE1', 'HE1', 'HE1', 'HE1', 'HE1'],
['HE3', 'HE3', 'HE3', 'HE3', 'HE3', 'HE3', 'HE3'], ['HZ2', 'HZ2', 'HZ2', 'HZ2', 'HZ2', 'HZ2', 'HZ2'],
['HZ3', 'HZ3', 'HZ3', 'HZ3', 'HZ3', 'HZ3', 'HZ3'], ['HH2', 'HH2', 'HH2', 'HH2', 'HH2', 'HH2', 'HH2'],
['C', 'C', 'C', 'C', 'C', 'C', 'C'], ['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'],
['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'], ['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'],
['CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1'], ['CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2'],
['CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2'], ['CE3', 'CE3', 'CE3', 'CE3', 'CE3', 'CE3', 'CE3'],
['CZ2', 'CZ2', 'CZ2', 'CZ2', 'CZ2', 'CZ2', 'CZ2'], ['CZ3', 'CZ3', 'CZ3', 'CZ3', 'CZ3', 'CZ3', 'CZ3'],
['CH2', 'CH2', 'CH2', 'CH2', 'CH2', 'CH2', 'CH2'], ['N', 'N', 'N', 'N', 'N', 'N', 'N'],
['NE1', 'NE1', 'NE1', 'NE1', 'NE1', 'NE1', 'NE1'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"TYR": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB2', '1HB', '2HB', 'HB1', 'HB2', 'HB2', 'HB2'], ['HB3', '2HB', 'HB2', 'HB2', 'HB1', 'HB1', 'HB3'],
['HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1', 'HD1'], ['HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2', 'HD2'],
['HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1', 'HE1'], ['HE2', 'HE2', 'HE2', 'HE2', 'HE2', 'HE2', 'HE2'],
['HH', 'HH', 'HOH', 'HH', 'HH', 'HH', 'HH'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG', 'CG', 'CG', 'CG', 'CG', 'CG', 'CG'], ['CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1', 'CD1'],
['CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2', 'CD2'], ['CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1', 'CE1'],
['CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2', 'CE2'], ['CZ', 'CZ', 'CZ', 'CZ', 'CZ', 'CZ', 'CZ'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
['OH', 'OH', 'OH', 'OH', 'OH', 'OH', 'OH']],
"VAL": [['H', 'H', 'HN', 'H2', 'HN', 'H', 'HN'], ['HA', 'HA', 'HA', 'HA', 'HA', 'HA', 'HA'],
['HB', 'HB', 'HB', 'HB3', 'HB', 'HB', 'HB'], ['HG11', '1HG1', 'HG11', 'HG11', 'HG11', 'HG11', 'HG11'],
['HG12', '2HG1', 'HG12', 'HG12', 'HG12', 'HG12', 'HG12'],
['HG13', '3HG1', 'HG13', 'HG13', 'HG13', 'HG13', 'HG13'],
['HG21', '1HG2', 'HG21', 'HG21', 'HG21', 'HG21', 'HG21'],
['HG22', '2HG2', 'HG22', 'HG22', 'HG22', 'HG22', 'HG22'],
['HG23', '3HG2', 'HG23', 'HG23', 'HG23', 'HG23', 'HG23'], ['C', 'C', 'C', 'C', 'C', 'C', 'C'],
['CA', 'CA', 'CA', 'CA', 'CA', 'CA', 'CA'], ['CB', 'CB', 'CB', 'CB', 'CB', 'CB', 'CB'],
['CG1', 'CG1', 'CG1', 'CG1', 'CG1', 'CG1', 'CG1'], ['CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2', 'CG2'],
['N', 'N', 'N', 'N', 'N', 'N', 'N'], ['O', 'O', 'O', 'O', 'O', 'O', 'O']],
"HOH": [['1HW', 'H1', 'H1 1'], ['2HW', 'H2', 'H1 2'], ['OW', 'O', 'O1']],
"ACE": [['CA', 'CH3'], ['HH31', 'HA1', 'H', '1H', 'H1'], ['HH32', 'HA2', '2H', 'H2'], ['HH33', 'HA3', '3H', 'H3'],
['C', 'C'], ['O', 'O']],
"NMA": [['HH31', '1HA', 'HA1'], ['HH32', '2HA', 'HA2'], ['HH33', '3HA', 'HA3'], ['CA', 'C'], ['H', 'H'], ['N']],
"PTR": [['H', 'H', 'HN', 'H2', 'HN2', 'H', 'HN'], ['N', 'N', 'N'], ['CA', 'CA', 'CA'], ['HA', 'HA', 'HA'], ['O1P', 'O1P', 'O1P'], ['O2P', 'O2P', 'O2P'],
['O3P', 'O3P', 'O3P'], ['P', 'P', 'P'], ['OH', 'OH', 'OH'], ['CZ', 'CZ', 'CZ'], ['C', 'C', 'C'],
['O', 'O', 'O'], ['CB', 'CB', 'CB'], ['CG', 'CG', 'CG'], ['HB2', 'HB2', 'HB2'], ['HB3', 'HB3', 'HB3'],
['CD1', 'CD1', 'CD1'], ['CD2', 'CD2', 'CD2'], ['HD1', 'HD1', 'HD1'], ['HD2', 'HD2', 'HD2'],
['CE2', 'CE2', 'CE2'], ['CE1', 'CE1', 'CE1'], ['HE2', 'HE2', 'HE2'], ['HE1', 'HE1', 'HE1'],
['H1', 'HN1'], ['H3', 'HN3']],
}
supported_metals = ["MN", "MG", "ZN", "CA", "CU", "FE", "NI", "CO", "PB"]
coordination_geometries = {
'octahedric': [[90, 180], 6],
'tetrahedric': [[109.5], 4]
}
default_mutations_maps = {
'ALA-ARG': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'HD2', 'HD3', 'NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-ALA': ['disappear', 1, 0], 'ALA-ARG': ['appear', 0, 1]},
],
'ALA-ASH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['OD1', 'OD2', 'HD2'],
{'ALA-ASH': ['appear', 0, 1], 'ASH-ALA': ['disappear', 1, 0]},
],
'ALA-ASN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['OD1', 'ND2', 'HD21', 'HD22'],
{'ASN-ALA': ['disappear', 1, 0], 'ALA-ASN': ['appear', 0, 1]},
],
'ALA-ASP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['OD1', 'OD2'],
{'ALA-ASP': ['appear', 0, 1], 'ASP-ALA': ['disappear', 1, 0]},
],
'ALA-CYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'SG']],
['HG'],
{'CYS-ALA': ['disappear', 1, 0], 'ALA-CYS': ['appear', 0, 1]},
],
'ALA-GLH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'OE1', 'OE2', 'HE2'],
{'GLH-ALA': ['disappear', 1, 0], 'ALA-GLH': ['appear', 0, 1]},
],
'ALA-GLN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22'],
{'ALA-GLN': ['appear', 0, 1], 'GLN-ALA': ['disappear', 1, 0]},
],
'ALA-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'OE1', 'OE2'],
{'ALA-GLU': ['appear', 0, 1], 'GLU-ALA': ['disappear', 1, 0]},
],
'ALA-GLY': [
['N', 'H', 'CA', 'O', 'C'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB1', 'HB2', 'HB3'),
{'GLY-ALA': ['appear', 1, 0], 'ALA-GLY': ['disappear', 0, 1]},
],
'ALA-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['ND1', 'CD2', 'HD1', 'CE1', 'HD2', 'NE2', 'HE1'],
{'HID-ALA': ['disappear', 1, 0], 'ALA-HID': ['appear', 0, 1]},
],
'ALA-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['ND1', 'CD2', 'CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'ALA-HIE': ['appear', 0, 1], 'HIE-ALA': ['disappear', 1, 0]},
],
'ALA-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['ND1', 'CD2', 'HD1', 'CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'HIP-ALA': ['disappear', 1, 0], 'ALA-HIP': ['appear', 0, 1]},
],
'ALA-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB1', 'HB'], ['HB2', 'CG1'], ['HB3', 'CG2']],
['HG12', 'HG13', 'CD1', 'HG21', 'HG22', 'HG23', 'HD11', 'HD12', 'HD13'],
{'ILE-ALA': ['disappear', 1, 0], 'ALA-ILE': ['appear', 0, 1]},
],
'ALA-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG', 'CD1', 'CD2', 'HD11', 'HD12', 'HD13', 'HD21', 'HD22', 'HD23'],
{'ALA-LEU': ['appear', 0, 1], 'LEU-ALA': ['disappear', 1, 0]},
],
'ALA-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-ALA': ['disappear', 1, 0], 'ALA-LYN': ['appear', 0, 1]},
],
'ALA-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'ALA-LYS': ['appear', 0, 1], 'LYS-ALA': ['disappear', 1, 0]},
],
'ALA-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['HG2', 'HG3', 'SD', 'CE', 'HE1', 'HE2', 'HE3'],
{'MET-ALA': ['disappear', 1, 0], 'ALA-MET': ['appear', 0, 1]},
],
'ALA-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['CD1', 'CD2', 'HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'HZ'],
{'ALA-PHE': ['appear', 0, 1], 'PHE-ALA': ['disappear', 1, 0]},
],
'ALA-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG'], ['H', 'CD']],
['HG2', 'HG3', 'HD2', 'HD3'],
{'ALA-PRO': ['appear', 0, 1], 'PRO-ALA': ['disappear', 1, 0]},
],
'ALA-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'OG']],
['HG'],
{'ALA-SER': ['appear', 0, 1], 'SER-ALA': ['disappear', 1, 0]},
],
'ALA-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB1', 'HB'], ['HB2', 'OG1'], ['HB3', 'CG2']],
['HG1', 'HG21', 'HG22', 'HG23'],
{'ALA-THR': ['appear', 0, 1], 'THR-ALA': ['disappear', 1, 0]},
],
'ALA-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['CD1', 'CD2', 'HD1', 'NE1', 'CE3', 'CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'ALA-TRP': ['appear', 0, 1], 'TRP-ALA': ['disappear', 1, 0]},
],
'ALA-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['HB1', 'CG']],
['CD1', 'CD2', 'HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'ALA-TYR': ['appear', 0, 1], 'TYR-ALA': ['disappear', 1, 0]},
],
'ALA-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB1', 'HB'], ['HB2', 'CG1'], ['HB3', 'CG2']],
['HG11', 'HG12', 'HG13', 'HG21', 'HG22', 'HG23'],
{'ALA-VAL': ['appear', 0, 1], 'VAL-ALA': ['disappear', 1, 0]},
],
'ARG-ASH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HG2', 'OD1'], ['HG3', 'OD2']],
['CD', 'HD3', 'NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-ASH': ['disappear', 0, 1], 'ASH-ARG': ['appear', 1, 0]},
],
'ARG-ASN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'OD1'], ['HG3', 'ND2'], ['CD', 'HD21'], ['HD2', 'HD22']],
['HD3', 'NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ASN-ARG': ['appear', 1, 0], 'ARG-ASN': ['disappear', 0, 1]},
],
'ARG-ASP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'OD1'], ['HG3', 'OD2']],
['CD', 'HD2', 'HD3', 'NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-ASP': ['disappear', 0, 1], 'ASP-ARG': ['appear', 1, 0]},
],
'ARG-CYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'SG'], ['HG2', 'HG']],
['HG3', 'CD', 'HD2', 'HD3', 'NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'CYS-ARG': ['appear', 1, 0], 'ARG-CYS': ['disappear', 0, 1]},
],
'ARG-GLH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['HD2', 'OE1'], ['HD3', 'OE2'], ['NE', 'HE2']],
['HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'GLH-ARG': ['appear', 1, 0], 'ARG-GLH': ['disappear', 0, 1]},
],
'ARG-GLN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['HD2', 'OE1'], ['HD3', 'NE2'], ['NE', 'HE21'], ['HE', 'HE22']],
['CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-GLN': ['disappear', 0, 1], 'GLN-ARG': ['appear', 1, 0]},
],
'ARG-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['HD2', 'OE1'], ['HD3', 'OE2']],
['NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-GLU': ['disappear', 0, 1], 'GLU-ARG': ['appear', 1, 0]},
],
'ARG-GLY': [
['N', 'H', 'CA', 'O', 'C'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'NE', 'HE', 'CZ', 'NH1', 'HH11', 'HH12', 'NH2', 'HH21',
'HH22'),
{'GLY-ARG': ['appear', 1, 0], 'ARG-GLY': ['disappear', 0, 1]},
],
'ARG-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD3', 'CE1'], ['NE', 'NE2'], ['HE', 'HE1']],
['CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-HID': ['disappear', 0, 1], 'HID-ARG': ['appear', 1, 0]},
],
'ARG-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'CE1'], ['HD3', 'NE2'], ['NE', 'HE1'], ['HE', 'HE2']],
['CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-HIE': ['disappear', 0, 1], 'HIE-ARG': ['appear', 1, 0]},
],
'ARG-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD3', 'CE1'], ['NE', 'NE2'], ['HE', 'HE1'],
['CZ', 'HE2']],
['NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'HIP-ARG': ['appear', 1, 0], 'ARG-HIP': ['disappear', 0, 1]},
],
'ARG-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG12'], ['HG3', 'HG13'], ['CD', 'CD1'], ['HD2', 'HG21'],
['HD3', 'HG22'], ['NE', 'HG23'], ['HE', 'HD11'], ['CZ', 'HD12'], ['NH1', 'HD13']],
['NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ILE-ARG': ['appear', 1, 0], 'ARG-ILE': ['disappear', 0, 1]},
],
'ARG-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG'], ['HG3', 'CD1'], ['CD', 'CD2'], ['HD2', 'HD11'], ['HD3', 'HD12'], ['NE', 'HD13'], ['HE', 'HD21'],
['CZ', 'HD22'], ['NH1', 'HD23']],
['NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-LEU': ['disappear', 0, 1], 'LEU-ARG': ['appear', 1, 0]},
],
'ARG-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['HD3', 'HD3'], ['HD2', 'HD2'], ['NE', 'CE'], ['HE', 'HE2'], ['CZ', 'HE3'], ['NH1', 'NZ'], ['NH2', 'HZ1'],
['HH11', 'HZ2']],
['HH12', 'HH21', 'HH22'],
{'LYN-ARG': ['appear', 1, 0], 'ARG-LYN': ['disappear', 0, 1]},
],
'ARG-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['HD3', 'HD3'], ['HD2', 'HD2'], ['NE', 'CE'], ['HE', 'HE2'], ['CZ', 'HE3'], ['NH1', 'NZ'], ['NH2', 'HZ1'],
['HH11', 'HZ2'], ['HH12', 'HZ3']],
['HH21', 'HH22'],
{'ARG-LYS': ['disappear', 0, 1], 'LYS-ARG': ['appear', 1, 0]},
],
'ARG-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'SD'], ['HD2', 'CE'], ['HD3', 'HE1'], ['NE', 'HE2'], ['HE', 'HE3']],
['CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'MET-ARG': ['appear', 1, 0], 'ARG-MET': ['disappear', 0, 1]},
],
'ARG-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD2', 'HD2'], ['HD3', 'CE1'], ['NE', 'CE2'], ['HE', 'HE1'],
['CZ', 'CZ'], ['NH1', 'HE2'], ['NH2', 'HZ']],
['HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-PHE': ['disappear', 0, 1], 'PHE-ARG': ['appear', 1, 0]},
],
'ARG-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'CD'], ['HD3', 'HD3'],
['HD2', 'HD2']],
['H', 'NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-PRO': ['disappear', 0, 1], 'PRO-ARG': ['appear', 1, 0]},
],
'ARG-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG2', 'HG']],
['HG3', 'CD', 'HD2', 'HD3', 'NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-SER': ['disappear', 0, 1], 'SER-ARG': ['appear', 1, 0]},
],
'ARG-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['CD', 'HG22'],
['HD2', 'HG23']],
['HD3', 'NE', 'HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-THR': ['disappear', 0, 1], 'THR-ARG': ['appear', 1, 0]},
],
'ARG-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD2', 'NE1'], ['HD3', 'CE3'], ['NE', 'CE2'], ['HE', 'HE1'],
['CZ', 'HE3'], ['NH1', 'CZ3'], ['NH2', 'CZ2'], ['HH11', 'HZ3'], ['HH12', 'HZ2'], ['HH21', 'CH2'],
['HH22', 'HH2']],
[],
{'ARG-TRP': ['', 0, 1], 'TRP-ARG': ['', 1, 0]},
],
'ARG-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CZ', 'CZ'], ['HD2', 'HD2'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD3', 'CE1'], ['NE', 'CE2'],
['HE', 'HE1'], ['NH1', 'HE2'], ['NH2', 'OH'], ['HH11', 'HH']],
['HH12', 'HH21', 'HH22'],
{'ARG-TYR': ['disappear', 0, 1], 'TYR-ARG': ['appear', 1, 0]},
],
'ARG-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['CD', 'HG13'],
['HD2', 'HG21'], ['HD3', 'HG22'], ['NE', 'HG23']],
['HE', 'CZ', 'NH1', 'NH2', 'HH11', 'HH12', 'HH21', 'HH22'],
{'ARG-VAL': ['disappear', 0, 1], 'VAL-ARG': ['appear', 1, 0]},
],
'ASH-ASN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'OD1'], ['OD2', 'ND2'], ['HD2', 'HD21']],
['HD22'],
{'ASH-ASN': ['appear', 0, 1], 'ASN-ASH': ['disappear', 1, 0]},
],
'ASH-ASP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'OD1', 'OD2'],
[],
['HD2'],
{'ASP-ASH': ['appear', 1, 0], 'ASH-ASP': ['disappear', 0, 1]},
],
'ASH-CYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'SG'], ['OD1', 'HG']],
['OD2', 'HD2'],
{'ASH-CYS': ['disappear', 0, 1], 'CYS-ASH': ['appear', 1, 0]},
],
'ASH-GLH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3'], ['HD2', 'CD']],
['OE1', 'OE2', 'HE2'],
{'ASH-GLH': ['appear', 0, 1], 'GLH-ASH': ['disappear', 1, 0]},
],
'ASH-GLN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3'], ['HD2', 'CD']],
['OE1', 'NE2', 'HE21', 'HE22'],
{'ASH-GLN': ['appear', 0, 1], 'GLN-ASH': ['disappear', 1, 0]},
],
'ASH-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3'], ['HD2', 'CD']],
['OE1', 'OE2'],
{'GLU-ASH': ['disappear', 1, 0], 'ASH-GLU': ['appear', 0, 1]},
],
'ASH-GLY': [
['N', 'H', 'CA', 'O', 'C'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB2', 'HB3', 'CG', 'OD1', 'OD2', 'HD2'),
{'ASH-GLY': ['disappear', 0, 1], 'GLY-ASH': ['appear', 1, 0]},
],
'ASH-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['OD1', 'ND1'], ['OD2', 'CD2']],
['HD1', 'CE1', 'NE2', 'HE1'],
{'HID-ASH': ['disappear', 1, 0], 'ASH-HID': ['appear', 0, 1]},
],
'ASH-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['OD1', 'ND1'], ['OD2', 'CD2']],
['CE1', 'NE2', 'HE1', 'HE2'],
{'HIE-ASH': ['disappear', 1, 0], 'ASH-HIE': ['appear', 0, 1]},
],
'ASH-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['OD1', 'ND1'], ['OD2', 'CD2']],
['HD1', 'CE1', 'NE2', 'HE1', 'HE2'],
{'ASH-HIP': ['appear', 0, 1], 'HIP-ASH': ['disappear', 1, 0]},
],
'ASH-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['OD1', 'HG12'], ['OD2', 'HG13'], ['HD2', 'CD1']],
['HG21', 'HG22', 'HG23', 'HD11', 'HD12', 'HD13'],
{'ASH-ILE': ['appear', 0, 1], 'ILE-ASH': ['disappear', 1, 0]},
],
'ASH-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG'], ['OD2', 'CD1'], ['HD2', 'CD2']],
['HD11', 'HD12', 'HD13', 'HD21', 'HD22', 'HD23'],
{'LEU-ASH': ['disappear', 1, 0], 'ASH-LEU': ['appear', 0, 1]},
],
'ASH-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['OD1', 'HG2'], ['OD2', 'HG3']],
['CD', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'ASH-LYN': ['appear', 0, 1], 'LYN-ASH': ['disappear', 1, 0]},
],
'ASH-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['OD1', 'HG2'], ['OD2', 'HG3']],
['CD', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-ASH': ['disappear', 1, 0], 'ASH-LYS': ['appear', 0, 1]},
],
'ASH-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3'], ['HD2', 'SD']],
['CE', 'HE1', 'HE2', 'HE3'],
{'ASH-MET': ['appear', 0, 1], 'MET-ASH': ['disappear', 1, 0]},
],
'ASH-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['OD1', 'CD1'], ['OD2', 'CD2']],
['HD1', 'CE1', 'CE2', 'HE1', 'CZ', 'HE2', 'HZ'],
{'ASH-PHE': ['appear', 0, 1], 'PHE-ASH': ['disappear', 1, 0]},
],
'ASH-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['OD1', 'HG2'], ['OD2', 'HG3'], ['H', 'CD'], ['HD2', 'HD2']],
['HD3'],
{'PRO-ASH': ['disappear', 1, 0], 'ASH-PRO': ['appear', 0, 1]},
],
'ASH-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['OD1', 'HG']],
['OD2', 'HD2'],
{'SER-ASH': ['appear', 1, 0], 'ASH-SER': ['disappear', 0, 1]},
],
'ASH-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['OD1', 'HG1'], ['OD2', 'HG21'], ['HD2', 'HG22']],
['HG23'],
{'THR-ASH': ['disappear', 1, 0], 'ASH-THR': ['appear', 0, 1]},
],
'ASH-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'CD1'], ['OD2', 'CD2'], ['HD2', 'HD1']],
['NE1', 'CE3', 'CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-ASH': ['disappear', 1, 0], 'ASH-TRP': ['appear', 0, 1]},
],
'ASH-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['OD1', 'CD1'], ['OD2', 'CD2']],
['HD1', 'CE1', 'CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'ASH-TYR': ['appear', 0, 1], 'TYR-ASH': ['disappear', 1, 0]},
],
'ASH-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['OD1', 'HG11'], ['OD2', 'HG12'], ['HD2', 'HG13']],
['HG21', 'HG22', 'HG23'],
{'ASH-VAL': ['appear', 0, 1], 'VAL-ASH': ['disappear', 1, 0]},
],
'ASN-ASP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'OD1'], ['ND2', 'OD2']],
['HD21', 'HD22'],
{'ASN-ASP': ['disappear', 0, 1], 'ASP-ASN': ['appear', 1, 0]},
],
'ASN-CYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'SG'], ['OD1', 'HG']],
['ND2', 'HD21', 'HD22'],
{'ASN-CYS': ['disappear', 0, 1], 'CYS-ASN': ['appear', 1, 0]},
],
'ASN-GLH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['ND2', 'HG3'], ['HD21', 'CD'], ['HD22', 'OE1']],
['OE2', 'HE2'],
{'ASN-GLH': ['appear', 0, 1], 'GLH-ASN': ['disappear', 1, 0]},
],
'ASN-GLN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['ND2', 'HG3'], ['HD21', 'CD'], ['HD22', 'OE1']],
['NE2', 'HE21', 'HE22'],
{'GLN-ASN': ['disappear', 1, 0], 'ASN-GLN': ['appear', 0, 1]},
],
'ASN-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['ND2', 'HG3'], ['HD21', 'CD'], ['HD22', 'OE1']],
['OE2'],
{'GLU-ASN': ['disappear', 1, 0], 'ASN-GLU': ['appear', 0, 1]},
],
'ASN-GLY': [
['N', 'H', 'CA', 'O', 'C'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB2', 'HB3', 'CG', 'OD1', 'ND2', 'HD21', 'HD22'),
{'ASN-GLY': ['disappear', 0, 1], 'GLY-ASN': ['appear', 1, 0]},
],
'ASN-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'ND1'], ['ND2', 'CD2'], ['HD21', 'HD1'], ['HD22', 'CE1']],
['HD2', 'NE2', 'HE1'],
{'ASN-HID': ['appear', 0, 1], 'HID-ASN': ['disappear', 1, 0]},
],
'ASN-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'ND1'], ['ND2', 'CD2'], ['HD21', 'CE1'], ['HD22', 'HD2']],
['NE2', 'HE1', 'HE2'],
{'ASN-HIE': ['appear', 0, 1], 'HIE-ASN': ['disappear', 1, 0]},
],
'ASN-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'ND1'], ['ND2', 'CD2'], ['HD21', 'HD1'], ['HD22', 'CE1']],
['HD2', 'NE2', 'HE1', 'HE2'],
{'ASN-HIP': ['appear', 0, 1], 'HIP-ASN': ['disappear', 1, 0]},
],
'ASN-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['OD1', 'HG12'], ['ND2', 'HG13'], ['HD21', 'CD1'],
['HD22', 'HG21']],
['HG22', 'HG23', 'HD11', 'HD12', 'HD13'],
{'ASN-ILE': ['appear', 0, 1], 'ILE-ASN': ['disappear', 1, 0]},
],
'ASN-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD22', 'HD22'], ['HD21', 'HD21'], ['OD1', 'HG'], ['ND2', 'CD1']],
['CD2', 'HD11', 'HD12', 'HD13', 'HD23'],
{'ASN-LEU': ['appear', 0, 1], 'LEU-ASN': ['disappear', 1, 0]},
],
'ASN-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['ND2', 'HG3'], ['HD21', 'CD'], ['HD22', 'HD2']],
['HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-ASN': ['disappear', 1, 0], 'ASN-LYN': ['appear', 0, 1]},
],
'ASN-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['ND2', 'HG3'], ['HD21', 'CD'], ['HD22', 'HD2']],
['HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'ASN-LYS': ['appear', 0, 1], 'LYS-ASN': ['disappear', 1, 0]},
],
'ASN-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['ND2', 'HG3'], ['HD21', 'SD'], ['HD22', 'CE']],
['HE1', 'HE2', 'HE3'],
{'MET-ASN': ['disappear', 1, 0], 'ASN-MET': ['appear', 0, 1]},
],
'ASN-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'CD1'], ['ND2', 'CD2'], ['HD21', 'HD1'], ['HD22', 'CE1']],
['HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'HZ'],
{'PHE-ASN': ['disappear', 1, 0], 'ASN-PHE': ['appear', 0, 1]},
],
'ASN-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['OD1', 'HG2'], ['ND2', 'HG3'], ['H', 'CD'], ['HD21', 'HD2'],
['HD22', 'HD3']],
[],
{'PRO-ASN': ['', 1, 0], 'ASN-PRO': ['', 0, 1]},
],
'ASN-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['OD1', 'HG']],
['ND2', 'HD21', 'HD22'],
{'ASN-SER': ['disappear', 0, 1], 'SER-ASN': ['appear', 1, 0]},
],
'ASN-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['OD1', 'HG1'], ['ND2', 'HG21'], ['HD21', 'HG22'],
['HD22', 'HG23']],
[],
{'THR-ASN': ['', 1, 0], 'ASN-THR': ['', 0, 1]},
],
'ASN-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'CD1'], ['ND2', 'CD2'], ['HD21', 'HD1'], ['HD22', 'NE1']],
['CE3', 'CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-ASN': ['disappear', 1, 0], 'ASN-TRP': ['appear', 0, 1]},
],
'ASN-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'CD1'], ['ND2', 'CD2'], ['HD21', 'HD1'], ['HD22', 'CE1']],
['HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'ASN-TYR': ['appear', 0, 1], 'TYR-ASN': ['disappear', 1, 0]},
],
'ASN-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['OD1', 'HG11'], ['ND2', 'HG12'], ['HD21', 'HG13'],
['HD22', 'HG21']],
['HG22', 'HG23'],
{'ASN-VAL': ['appear', 0, 1], 'VAL-ASN': ['disappear', 1, 0]},
],
'ASP-CYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'SG'], ['OD1', 'HG']],
['OD2'],
{'ASP-CYS': ['disappear', 0, 1], 'CYS-ASP': ['appear', 1, 0]},
],
'ASP-GLH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3']],
['CD', 'OE1', 'OE2', 'HE2'],
{'ASP-GLH': ['appear', 0, 1], 'GLH-ASP': ['disappear', 1, 0]},
],
'ASP-GLN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3']],
['CD', 'OE1', 'NE2', 'HE21', 'HE22'],
{'ASP-GLN': ['appear', 0, 1], 'GLN-ASP': ['disappear', 1, 0]},
],
'ASP-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3']],
['CD', 'OE1', 'OE2'],
{'GLU-ASP': ['disappear', 1, 0], 'ASP-GLU': ['appear', 0, 1]},
],
'ASP-GLY': [
['N', 'H', 'CA', 'O', 'C'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB2', 'HB3', 'CG', 'OD1', 'OD2'),
{'ASP-GLY': ['disappear', 0, 1], 'GLY-ASP': ['appear', 1, 0]},
],
'ASP-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'ND1'], ['OD2', 'CD2']],
['HD1', 'CE1', 'HD2', 'NE2', 'HE1'],
{'HID-ASP': ['disappear', 1, 0], 'ASP-HID': ['appear', 0, 1]},
],
'ASP-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'ND1'], ['OD2', 'CD2']],
['CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'HIE-ASP': ['disappear', 1, 0], 'ASP-HIE': ['appear', 0, 1]},
],
'ASP-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'ND1'], ['OD2', 'CD2']],
['HD1', 'CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'ASP-HIP': ['appear', 0, 1], 'HIP-ASP': ['disappear', 1, 0]},
],
'ASP-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['OD1', 'HG12'], ['OD2', 'HG13']],
['CD1', 'HG21', 'HG22', 'HG23', 'HD11', 'HD12', 'HD13'],
{'ASP-ILE': ['appear', 0, 1], 'ILE-ASP': ['disappear', 1, 0]},
],
'ASP-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG'], ['OD2', 'CD1']],
['CD2', 'HD11', 'HD12', 'HD13', 'HD21', 'HD22', 'HD23'],
{'LEU-ASP': ['disappear', 1, 0], 'ASP-LEU': ['appear', 0, 1]},
],
'ASP-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3']],
['CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'ASP-LYN': ['appear', 0, 1], 'LYN-ASP': ['disappear', 1, 0]},
],
'ASP-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3']],
['CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-ASP': ['disappear', 1, 0], 'ASP-LYS': ['appear', 0, 1]},
],
'ASP-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'HG2'], ['OD2', 'HG3']],
['SD', 'CE', 'HE1', 'HE2', 'HE3'],
{'ASP-MET': ['appear', 0, 1], 'MET-ASP': ['disappear', 1, 0]},
],
'ASP-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'CD1'], ['OD2', 'CD2']],
['HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'HZ'],
{'ASP-PHE': ['appear', 0, 1], 'PHE-ASP': ['disappear', 1, 0]},
],
'ASP-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['OD1', 'HG2'], ['OD2', 'HG3'], ['H', 'CD']],
['HD2', 'HD3'],
{'PRO-ASP': ['disappear', 1, 0], 'ASP-PRO': ['appear', 0, 1]},
],
'ASP-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['OD1', 'HG']],
['OD2'],
{'SER-ASP': ['appear', 1, 0], 'ASP-SER': ['disappear', 0, 1]},
],
'ASP-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['OD1', 'HG1'], ['OD2', 'HG21']],
['HG22', 'HG23'],
{'THR-ASP': ['disappear', 1, 0], 'ASP-THR': ['appear', 0, 1]},
],
'ASP-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'CD1'], ['OD2', 'CD2']],
['HD1', 'NE1', 'CE3', 'CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-ASP': ['disappear', 1, 0], 'ASP-TRP': ['appear', 0, 1]},
],
'ASP-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['OD1', 'CD1'], ['OD2', 'CD2']],
['HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'ASP-TYR': ['appear', 0, 1], 'TYR-ASP': ['disappear', 1, 0]},
],
'ASP-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['OD1', 'HG11'], ['OD2', 'HG12']],
['HG13', 'HG21', 'HG22', 'HG23'],
{'ASP-VAL': ['appear', 0, 1], 'VAL-ASP': ['disappear', 1, 0]},
],
'CYS-GLH': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'HG2']],
['HG3', 'CD', 'OE1', 'OE2', 'HE2'],
{'CYS-GLH': ['appear', 0, 1], 'GLH-CYS': ['disappear', 1, 0]},
],
'CYS-GLN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'HG2']],
['HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22'],
{'GLN-CYS': ['disappear', 1, 0], 'CYS-GLN': ['appear', 0, 1]},
],
'CYS-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'HG2']],
['HG3', 'CD', 'OE1', 'OE2'],
{'CYS-GLU': ['appear', 0, 1], 'GLU-CYS': ['disappear', 1, 0]},
],
'CYS-GLY': [
['N', 'H', 'CA', 'O', 'C'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB2', 'HB3', 'SG', 'HG'),
{'CYS-GLY': ['disappear', 0, 1], 'GLY-CYS': ['appear', 1, 0]},
],
'CYS-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'ND1']],
['CD2', 'HD1', 'CE1', 'HD2', 'NE2', 'HE1'],
{'HID-CYS': ['disappear', 1, 0], 'CYS-HID': ['appear', 0, 1]},
],
'CYS-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'ND1']],
['CD2', 'CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'CYS-HIE': ['appear', 0, 1], 'HIE-CYS': ['disappear', 1, 0]},
],
'CYS-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'ND1']],
['CD2', 'HD1', 'CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'CYS-HIP': ['appear', 0, 1], 'HIP-CYS': ['disappear', 1, 0]},
],
'CYS-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['SG', 'CG2'], ['HG', 'HG12']],
['HG13', 'CD1', 'HG21', 'HG22', 'HG23', 'HD11', 'HD12', 'HD13'],
{'CYS-ILE': ['appear', 0, 1], 'ILE-CYS': ['disappear', 1, 0]},
],
'CYS-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HG', 'HG'], ['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG']],
['CD1', 'CD2', 'HD11', 'HD12', 'HD13', 'HD21', 'HD22', 'HD23'],
{'CYS-LEU': ['appear', 0, 1], 'LEU-CYS': ['disappear', 1, 0]},
],
'CYS-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'HG2']],
['HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'CYS-LYN': ['appear', 0, 1], 'LYN-CYS': ['disappear', 1, 0]},
],
'CYS-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'HG2']],
['HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-CYS': ['disappear', 1, 0], 'CYS-LYS': ['appear', 0, 1]},
],
'CYS-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'HG2']],
['HG3', 'SD', 'CE', 'HE1', 'HE2', 'HE3'],
{'CYS-MET': ['appear', 0, 1], 'MET-CYS': ['disappear', 1, 0]},
],
'CYS-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'CD1']],
['CD2', 'HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'HZ'],
{'PHE-CYS': ['disappear', 1, 0], 'CYS-PHE': ['appear', 0, 1]},
],
'CYS-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'HG2'], ['H', 'CD']],
['HG3', 'HD2', 'HD3'],
{'CYS-PRO': ['appear', 0, 1], 'PRO-CYS': ['disappear', 1, 0]},
],
'CYS-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HG', 'HG'], ['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'OG']],
[],
{'CYS-SER': ['', 0, 1], 'SER-CYS': ['', 1, 0]},
],
'CYS-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['SG', 'CG2'], ['HG', 'HG1']],
['HG21', 'HG22', 'HG23'],
{'CYS-THR': ['appear', 0, 1], 'THR-CYS': ['disappear', 1, 0]},
],
'CYS-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'CD1']],
['CD2', 'HD1', 'NE1', 'CE3', 'CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'CYS-TRP': ['appear', 0, 1], 'TRP-CYS': ['disappear', 1, 0]},
],
'CYS-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['SG', 'CG'], ['HG', 'CD1']],
['CD2', 'HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'TYR-CYS': ['disappear', 1, 0], 'CYS-TYR': ['appear', 0, 1]},
],
'CYS-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['SG', 'CG2'], ['HG', 'HG11']],
['HG12', 'HG13', 'HG21', 'HG22', 'HG23'],
{'VAL-CYS': ['disappear', 1, 0], 'CYS-VAL': ['appear', 0, 1]},
],
'GLH-GLN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'OE1'], ['OE2', 'NE2'], ['HE2', 'HE21']],
['HE22'],
{'GLN-GLH': ['disappear', 1, 0], 'GLH-GLN': ['appear', 0, 1]},
],
'GLH-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'OE2'],
[],
['HE2'],
{'GLU-GLH': ['appear', 1, 0], 'GLH-GLU': ['disappear', 0, 1]},
],
'GLH-GLY': [
['N', 'H', 'CA', 'C', 'O'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'OE2', 'HE2'),
{'GLH-GLY': ['disappear', 0, 1], 'GLY-GLH': ['appear', 1, 0]},
],
'GLH-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2'], ['HE2', 'NE2']],
['HE1'],
{'HID-GLH': ['disappear', 1, 0], 'GLH-HID': ['appear', 0, 1]},
],
'GLH-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HE2', 'HE2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'CE1'], ['OE1', 'HD2'], ['OE2', 'NE2']],
['HE1'],
{'HIE-GLH': ['disappear', 1, 0], 'GLH-HIE': ['appear', 0, 1]},
],
'GLH-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HE2', 'HE2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['NE2', 'HE1'],
{'GLH-HIP': ['appear', 0, 1], 'HIP-GLH': ['disappear', 1, 0]},
],
'GLH-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG12'], ['HG3', 'HG13'], ['CD', 'CD1'], ['OE1', 'HG21'],
['OE2', 'HG22'], ['HE2', 'HG23']],
['HD11', 'HD12', 'HD13'],
{'ILE-GLH': ['disappear', 1, 0], 'GLH-ILE': ['appear', 0, 1]},
],
'GLH-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG'], ['HG3', 'CD1'], ['CD', 'CD2'], ['OE1', 'HD11'], ['OE2', 'HD12'], ['HE2', 'HD13']],
['HD21', 'HD22', 'HD23'],
{'LEU-GLH': ['disappear', 1, 0], 'GLH-LEU': ['appear', 0, 1]},
],
'GLH-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['HE2', 'HE2'], ['OE1', 'HD2'], ['OE2', 'HD3']],
['CE', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'GLH-LYN': ['appear', 0, 1], 'LYN-GLH': ['disappear', 1, 0]},
],
'GLH-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['HE2', 'HE2'], ['OE1', 'HD2'], ['OE2', 'HD3']],
['CE', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-GLH': ['disappear', 1, 0], 'GLH-LYS': ['appear', 0, 1]},
],
'GLH-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG2'], ['HG3', 'HG3'], ['HE2', 'HE2'], ['CD', 'SD'], ['OE1', 'CE'], ['OE2', 'HE1']],
['HE3'],
{'GLH-MET': ['appear', 0, 1], 'MET-GLH': ['disappear', 1, 0]},
],
'GLH-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HE2', 'HE2'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['CE2', 'HE1', 'CZ', 'HZ'],
{'PHE-GLH': ['disappear', 1, 0], 'GLH-PHE': ['appear', 0, 1]},
],
'GLH-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'CD'], ['OE1', 'HD2'],
['OE2', 'HD3']],
['H', 'HE2'],
{'GLH-PRO': ['disappear', 0, 1], 'PRO-GLH': ['appear', 1, 0]},
],
'GLH-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG2', 'HG']],
['HG3', 'CD', 'OE1', 'OE2', 'HE2'],
{'SER-GLH': ['appear', 1, 0], 'GLH-SER': ['disappear', 0, 1]},
],
'GLH-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['CD', 'HG22'],
['OE1', 'HG23']],
['OE2', 'HE2'],
{'THR-GLH': ['appear', 1, 0], 'GLH-THR': ['disappear', 0, 1]},
],
'GLH-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'NE1'], ['OE2', 'CE3'], ['HE2', 'CE2']],
['HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'GLH-TRP': ['appear', 0, 1], 'TRP-GLH': ['disappear', 1, 0]},
],
'GLH-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HE2', 'HE2'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['CE2', 'HE1', 'CZ', 'OH', 'HH'],
{'TYR-GLH': ['disappear', 1, 0], 'GLH-TYR': ['appear', 0, 1]},
],
'GLH-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['CD', 'HG13'],
['OE1', 'HG21'], ['OE2', 'HG22'], ['HE2', 'HG23']],
[],
{'VAL-GLH': ['', 1, 0], 'GLH-VAL': ['', 0, 1]},
],
'GLN-GLU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'OE1'], ['NE2', 'OE2']],
['HE21', 'HE22'],
{'GLU-GLN': ['appear', 1, 0], 'GLN-GLU': ['disappear', 0, 1]},
],
'GLN-GLY': [
['N', 'H', 'CA', 'C', 'O'],
[['HA', 'HA2'], ['CB', 'HA3']],
['HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22'],
{'GLY-GLN': ['appear', 1, 0], 'GLN-GLY': ['disappear', 0, 1]},
],
'GLN-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['NE2', 'NE2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['HE21', 'HD2'],
['HE22', 'HE1']],
[],
{'GLN-HID': ['', 0, 1], 'HID-GLN': ['', 1, 0]},
],
'GLN-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['NE2', 'NE2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'CE1'], ['OE1', 'HD2'], ['HE21', 'HE1'],
['HE22', 'HE2']],
[],
{'GLN-HIE': ['', 0, 1], 'HIE-GLN': ['', 1, 0]},
],
'GLN-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['NE2', 'NE2'], ['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['HE21', 'HD2'],
['HE22', 'HE1']],
['HE2'],
{'HIP-GLN': ['disappear', 1, 0], 'GLN-HIP': ['appear', 0, 1]},
],
'GLN-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG12'], ['HG3', 'HG13'], ['CD', 'CD1'], ['OE1', 'HG21'],
['NE2', 'HG22'], ['HE21', 'HG23'], ['HE22', 'HD11']],
['HD12', 'HD13'],
{'GLN-ILE': ['appear', 0, 1], 'ILE-GLN': ['disappear', 1, 0]},
],
'GLN-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG'], ['HG3', 'CD1'], ['CD', 'CD2'], ['OE1', 'HD11'], ['NE2', 'HD12'], ['HE21', 'HD13'],
['HE22', 'HD21']],
['HD22', 'HD23'],
{'GLN-LEU': ['appear', 0, 1], 'LEU-GLN': ['disappear', 1, 0]},
],
'GLN-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'HD2'], ['NE2', 'HD3'], ['HE21', 'CE'], ['HE22', 'HE2']],
['HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-GLN': ['disappear', 1, 0], 'GLN-LYN': ['appear', 0, 1]},
],
'GLN-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'HD2'], ['NE2', 'HD3'], ['HE21', 'CE'], ['HE22', 'HE2']],
['HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'GLN-LYS': ['appear', 0, 1], 'LYS-GLN': ['disappear', 1, 0]},
],
'GLN-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'SD'], ['OE1', 'CE'], ['NE2', 'HE1'], ['HE21', 'HE2'], ['HE22', 'HE3']],
[],
{'MET-GLN': ['', 1, 0], 'GLN-MET': ['', 0, 1]},
],
'GLN-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['NE2', 'HD2'], ['HE21', 'CE2'],
['HE22', 'HE1']],
['CZ', 'HE2', 'HZ'],
{'PHE-GLN': ['disappear', 1, 0], 'GLN-PHE': ['appear', 0, 1]},
],
'GLN-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'CD'], ['OE1', 'HD2'],
['NE2', 'HD3']],
['H', 'HE21', 'HE22'],
{'GLN-PRO': ['disappear', 0, 1], 'PRO-GLN': ['appear', 1, 0]},
],
'GLN-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG2', 'HG']],
['HG3', 'CD', 'OE1', 'NE2', 'HE21', 'HE22'],
{'GLN-SER': ['disappear', 0, 1], 'SER-GLN': ['appear', 1, 0]},
],
'GLN-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['CD', 'HG22'],
['OE1', 'HG23']],
['NE2', 'HE21', 'HE22'],
{'THR-GLN': ['appear', 1, 0], 'GLN-THR': ['disappear', 0, 1]},
],
'GLN-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'NE1'], ['NE2', 'CE3'], ['HE21', 'CE2'],
['HE22', 'HE1']],
['HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-GLN': ['disappear', 1, 0], 'GLN-TRP': ['appear', 0, 1]},
],
'GLN-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['NE2', 'HD2'], ['HE21', 'CE2'],
['HE22', 'HE1']],
['CZ', 'HE2', 'OH', 'HH'],
{'GLN-TYR': ['appear', 0, 1], 'TYR-GLN': ['disappear', 1, 0]},
],
'GLN-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['CD', 'HG13'],
['OE1', 'HG21'], ['NE2', 'HG22'], ['HE21', 'HG23']],
['HE22'],
{'GLN-VAL': ['disappear', 0, 1], 'VAL-GLN': ['appear', 1, 0]},
],
'GLU-GLY': [
['N', 'H', 'CA', 'O', 'C'],
[['HA', 'HA2'], ['CB', 'HA3']],
('HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'OE1', 'OE2'),
{'GLU-GLY': ['disappear', 0, 1], 'GLY-GLU': ['appear', 1, 0]},
],
'GLU-HID': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['NE2', 'HE1'],
{'GLU-HID': ['appear', 0, 1], 'HID-GLU': ['disappear', 1, 0]},
],
'GLU-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'CE1'], ['OE1', 'HD2'], ['OE2', 'NE2']],
['HE1', 'HE2'],
{'GLU-HIE': ['appear', 0, 1], 'HIE-GLU': ['disappear', 1, 0]},
],
'GLU-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'ND1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['NE2', 'HE1', 'HE2'],
{'GLU-HIP': ['appear', 0, 1], 'HIP-GLU': ['disappear', 1, 0]},
],
'GLU-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG12'], ['HG3', 'HG13'], ['CD', 'CD1'], ['OE1', 'HG21'],
['OE2', 'HG22']],
['HG23', 'HD11', 'HD12', 'HD13'],
{'GLU-ILE': ['appear', 0, 1], 'ILE-GLU': ['disappear', 1, 0]},
],
'GLU-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG'], ['HG3', 'CD1'], ['CD', 'CD2'], ['OE1', 'HD11'], ['OE2', 'HD12']],
['HD13', 'HD21', 'HD22', 'HD23'],
{'GLU-LEU': ['appear', 0, 1], 'LEU-GLU': ['disappear', 1, 0]},
],
'GLU-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'HD2'], ['OE2', 'HD3']],
['CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-GLU': ['disappear', 1, 0], 'GLU-LYN': ['appear', 0, 1]},
],
'GLU-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD'],
[['OE1', 'HD2'], ['OE2', 'HD3']],
['CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'GLU-LYS': ['appear', 0, 1], 'LYS-GLU': ['disappear', 1, 0]},
],
'GLU-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'SD'], ['OE1', 'CE'], ['OE2', 'HE1']],
['HE2', 'HE3'],
{'MET-GLU': ['disappear', 1, 0], 'GLU-MET': ['appear', 0, 1]},
],
'GLU-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['CE2', 'HE1', 'CZ', 'HE2', 'HZ'],
{'PHE-GLU': ['disappear', 1, 0], 'GLU-PHE': ['appear', 0, 1]},
],
'GLU-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'CD'], ['OE1', 'HD2'],
['OE2', 'HD3']],
['H'],
{'PRO-GLU': ['appear', 1, 0], 'GLU-PRO': ['disappear', 0, 1]},
],
'GLU-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG2', 'HG']],
['HG3', 'CD', 'OE1', 'OE2'],
{'GLU-SER': ['disappear', 0, 1], 'SER-GLU': ['appear', 1, 0]},
],
'GLU-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['CD', 'HG22'],
['OE1', 'HG23']],
['OE2'],
{'THR-GLU': ['appear', 1, 0], 'GLU-THR': ['disappear', 0, 1]},
],
'GLU-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'NE1'], ['OE2', 'CE3']],
['CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-GLU': ['disappear', 1, 0], 'GLU-TRP': ['appear', 0, 1]},
],
'GLU-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['OE1', 'CE1'], ['OE2', 'HD2']],
['CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'GLU-TYR': ['appear', 0, 1], 'TYR-GLU': ['disappear', 1, 0]},
],
'GLU-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['CD', 'HG13'],
['OE1', 'HG21'], ['OE2', 'HG22']],
['HG23'],
{'GLU-VAL': ['appear', 0, 1], 'VAL-GLU': ['disappear', 1, 0]},
],
'GLY-HID': [
['N', 'H', 'CA', 'C', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'ND1', 'HD1', 'CD2', 'HD2', 'CE1', 'HE1', 'NE2'),
{'GLY-HID': ['appear', 0, 1], 'HID-GLY': ['disappear', 1, 0]},
],
'GLY-HIE': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'ND1', 'CD2', 'HD2', 'CE1', 'HE1', 'NE2', 'HE2'),
{'HIE-GLY': ['disappear', 1, 0], 'GLY-HIE': ['appear', 0, 1]},
],
'GLY-HIP': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'ND1', 'HD1', 'CD2', 'HD2', 'CE1', 'HE1', 'NE2', 'HE2'),
{'GLY-HIP': ['appear', 0, 1], 'HIP-GLY': ['disappear', 1, 0]},
],
'GLY-ILE': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB', 'CG1', 'HG12', 'HG13', 'CG2', 'HG21', 'HG22', 'HG23', 'CD1', 'HD11', 'HD12', 'HD13'),
{'ILE-GLY': ['disappear', 1, 0], 'GLY-ILE': ['appear', 0, 1]},
],
'GLY-LEU': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'HG', 'CD1', 'HD11', 'HD12', 'HD13', 'CD2', 'HD21', 'HD22', 'HD23'),
{'LEU-GLY': ['disappear', 1, 0], 'GLY-LEU': ['appear', 0, 1]},
],
'GLY-LYN': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'),
{'GLY-LYN': ['appear', 0, 1], 'LYN-GLY': ['disappear', 1, 0]},
],
'GLY-LYS': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'),
{'LYS-GLY': ['disappear', 1, 0], 'GLY-LYS': ['appear', 0, 1]},
],
'GLY-MET': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'HG2', 'HG3', 'SD', 'CE', 'HE1', 'HE2', 'HE3'),
{'GLY-MET': ['appear', 0, 1], 'MET-GLY': ['disappear', 1, 0]},
],
'GLY-PHE': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CD2', 'HD2', 'CE1', 'HE1', 'CE2', 'HE2', 'CZ', 'HZ'),
{'GLY-PHE': ['appear', 0, 1], 'PHE-GLY': ['disappear', 1, 0]},
],
'GLY-PRO': [
['C', 'N', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB'], ['H', 'CD']],
['HB2', 'HB3', 'CG', 'HG2', 'HG3', 'HD2', 'HD3'],
{'GLY-PRO': ['appear', 0, 1], 'PRO-GLY': ['disappear', 1, 0]}
],
'GLY-SER': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'OG', 'HG'),
{'SER-GLY': ['disappear', 1, 0], 'GLY-SER': ['appear', 0, 1]},
],
'GLY-THR': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB', 'OG1', 'HG1', 'CG2', 'HG21', 'HG22', 'HG23'),
{'GLY-THR': ['appear', 0, 1], 'THR-GLY': ['disappear', 1, 0]},
],
'GLY-TRP': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CD2', 'NE1', 'HE1', 'CE3', 'HE3', 'CE2', 'CZ3', 'HZ3', 'CZ2', 'HZ2', 'CH2',
'HH2'),
{'GLY-TRP': ['appear', 0, 1], 'TRP-GLY': ['disappear', 1, 0]},
],
'GLY-TYR': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CD2', 'HD2', 'CE1', 'HE1', 'CE2', 'HE2', 'CZ', 'OH', 'HH'),
{'TYR-GLY': ['disappear', 1, 0], 'GLY-TYR': ['appear', 0, 1]},
],
'GLY-VAL': [
['C', 'N', 'H', 'CA', 'O'],
[['HA2', 'HA'], ['HA3', 'CB']],
('HB', 'CG1', 'HG11', 'HG12', 'HG13', 'CG2', 'HG21', 'HG22', 'HG23'),
{'VAL-GLY': ['disappear', 1, 0], 'GLY-VAL': ['appear', 0, 1]},
],
'HID-HIE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'CD2', 'HD2', 'CE1', 'HE1', 'NE2'],
[['HD1', 'HE2']],
[],
{'HID-HIE': ['', 0, 1], 'HIE-HID': ['', 1, 0]},
],
'HID-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'ND1', 'CD2', 'HD2', 'CE1', 'HE1', 'NE2', 'HD1'],
[],
['HE2'],
{'HID-HIP': ['appear', 1, 0], 'HIP-HID': ['disappear', 0, 1]},
],
'HID-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['ND1', 'HG12'], ['CD2', 'HG13'], ['HD1', 'CD1'],
['CE1', 'HG21'], ['HD2', 'HG22'], ['NE2', 'HG23'], ['HE1', 'HD11']],
['HD12', 'HD13'],
{'ILE-HID': ['disappear', 1, 0], 'HID-ILE': ['appear', 0, 1]},
],
'HID-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CD2', 'CD2'], ['ND1', 'HG'], ['HD1', 'CD1'], ['CE1', 'HD11'], ['HD2', 'HD12'], ['NE2', 'HD13'],
['HE1', 'HD21']],
['HD22', 'HD23'],
{'HID-LEU': ['appear', 0, 1], 'LEU-HID': ['disappear', 1, 0]},
],
'HID-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['HD1', 'CD'], ['CE1', 'HD3'], ['NE2', 'CE'], ['HE1', 'HE2']],
['HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-HID': ['disappear', 1, 0], 'HID-LYN': ['appear', 0, 1]},
],
'HID-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['HD1', 'CD'], ['CE1', 'HD3'], ['NE2', 'CE'], ['HE1', 'HE2']],
['HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'HID-LYS': ['appear', 0, 1], 'LYS-HID': ['disappear', 1, 0]},
],
'HID-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HE1', 'HE1'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['HD1', 'SD'], ['CE1', 'CE'], ['HD2', 'HE2'], ['NE2', 'HE3']],
[],
{'HID-MET': ['', 0, 1], 'MET-HID': ['', 1, 0]},
],
'HID-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['ND1', 'CD1'], ['HD1', 'HD1'], ['CD2', 'CD2'], ['HD2', 'HD2'], ['CE1', 'CE1'], ['HE1', 'HE1'],
['NE2', 'CE2']],
['HE2', 'CZ', 'HZ'],
{'HID-PHE': ['appear', 0, 1], 'PHE-HID': ['disappear', 1, 0]},
],
'HID-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['CE1', 'CD'], ['HD2', 'HD2'],
['HE1', 'HD3']],
['H', 'HD1', 'NE2'],
{'HID-PRO': ['disappear', 0, 1], 'PRO-HID': ['appear', 1, 0]},
],
'HID-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['ND1', 'HG']],
['CD2', 'HD1', 'CE1', 'HD2', 'NE2', 'HE1'],
{'HID-SER': ['disappear', 0, 1], 'SER-HID': ['appear', 1, 0]},
],
'HID-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['ND1', 'HG1'], ['CD2', 'HG21'], ['HD1', 'HG22'],
['CE1', 'HG23']],
['HD2', 'NE2', 'HE1'],
{'HID-THR': ['disappear', 0, 1], 'THR-HID': ['appear', 1, 0]},
],
'HID-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['ND1', 'CD1'], ['HD1', 'HD1'], ['CD2', 'CD2'], ['CE1', 'NE1'], ['NE2', 'CE3'], ['HD2', 'CE2'],
['HE1', 'HE1']],
['CZ3', 'HZ3', 'CZ2', 'HZ2', 'CH2', 'HH2', 'HE3'],
{'HID-TRP': ['appear', 0, 1], 'TRP-HID': ['disappear', 1, 0]},
],
'HID-TYR': [
['N', 'CA', 'HA', 'C', 'H', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['ND1', 'CD1'], ['HD1', 'HD1'], ['CD2', 'CD2'], ['HD2', 'HD2'], ['CE1', 'CE1'], ['HE1', 'HE1'],
['NE2', 'CE2']],
['HE2', 'CZ', 'OH', 'HH'],
{'HID-TYR': ['appear', 0, 1], 'PHE-TYR': ['disappear', 1, 0]},
],
'HID-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['ND1', 'HG11'], ['CD2', 'HG12'], ['HD1', 'HG13'],
['CE1', 'HG21'], ['HD2', 'HG22'], ['NE2', 'HG23']],
['HE1'],
{'HID-VAL': ['disappear', 0, 1], 'VAL-HID': ['appear', 1, 0]},
],
'HIE-HIP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'HB2', 'HB3', 'CG', 'CB', 'ND1', 'CD2', 'HD2', 'CE1', 'HE1', 'NE2', 'HE2'],
[],
['HD1'],
{'HIE-HIP': ['appear', 1, 0], 'HIP-HIE': ['disappear', 0, 1]},
],
'HIE-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['ND1', 'HG12'], ['CD2', 'HG13'], ['CE1', 'CD1'],
['HD2', 'HG21'], ['NE2', 'HG22'], ['HE1', 'HG23'], ['HE2', 'HD11']],
['HD12', 'HD13'],
{'HIE-ILE': ['appear', 0, 1], 'ILE-HIE': ['disappear', 1, 0]},
],
'HIE-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CD2', 'CD2'], ['ND1', 'HG'], ['CE1', 'CD1'], ['HD2', 'HD11'], ['NE2', 'HD12'], ['HE1', 'HD13'],
['HE2', 'HD21']],
['HD22', 'HD23'],
{'HIE-LEU': ['appear', 0, 1], 'LEU-HIE': ['disappear', 1, 0]},
],
'HIE-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HE2', 'HE2'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['CE1', 'CD'], ['NE2', 'HD3'], ['HE1', 'CE']],
['HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-HIE': ['disappear', 1, 0], 'HIE-LYN': ['appear', 0, 1]},
],
'HIE-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HE2', 'HE2'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['CE1', 'CD'], ['NE2', 'HD3'], ['HE1', 'CE']],
['HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'HIE-LYS': ['appear', 0, 1], 'LYS-HIE': ['disappear', 1, 0]},
],
'HIE-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HE1', 'HE1'], ['HE2', 'HE2'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['CE1', 'SD'], ['HD2', 'CE'], ['NE2', 'HE3']],
[],
{'MET-HIE': ['', 1, 0], 'HIE-MET': ['', 0, 1]},
],
'HIE-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['ND1', 'CD1'], ['HE2', 'HE2'], ['CD2', 'CD2'], ['HD2', 'HD2'], ['CE1', 'CE1'], ['HE1', 'HE1'],
['NE2', 'CE2']],
['HD1', 'CZ', 'HZ'],
{'HIE-PHE': ['appear', 0, 1], 'PHE-HIE': ['disappear', 1, 0]},
],
'HIE-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['CE1', 'CD'], ['HD2', 'HD2'],
['HE1', 'HD3']],
['H', 'NE2', 'HE2'],
{'HIE-PRO': ['disappear', 0, 1], 'PRO-HIE': ['appear', 1, 0]},
],
'HIE-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['ND1', 'HG']],
['CD2', 'CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'HIE-SER': ['disappear', 0, 1], 'SER-HIE': ['appear', 1, 0]},
],
'HIE-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['ND1', 'HG1'], ['CD2', 'HG21'], ['CE1', 'HG22'],
['HD2', 'HG23']],
['NE2', 'HE1', 'HE2'],
{'THR-HIE': ['appear', 1, 0], 'HIE-THR': ['disappear', 0, 1]},
],
'HIE-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['ND1', 'CD1'], ['CD2', 'CD2'], ['CE1', 'NE1'], ['NE2', 'CE3'], ['HE2', 'HE3'], ['HD2', 'CE2'],
['HE1', 'HE1']],
['CZ3', 'HZ3', 'HD1', 'CZ2', 'HZ2', 'CH2', 'HH2'],
{'HIE-TRP': ['appear', 0, 1], 'TRP-HIE': ['disappear', 1, 0]},
],
'HIE-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['ND1', 'CD1'], ['HE2', 'HE2'], ['CD2', 'CD2'], ['HD2', 'HD2'], ['CE1', 'CE1'], ['HE1', 'HE1'],
['NE2', 'CE2']],
['HD1', 'CZ', 'OH', 'HH'],
{'HIE-TYR': ['appear', 0, 1], 'TYR-HIE': ['disappear', 1, 0]},
],
'HIE-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['ND1', 'HG11'], ['CD2', 'HG12'], ['CE1', 'HG13'],
['HD2', 'HG21'], ['NE2', 'HG22'], ['HE1', 'HG23']],
['HE2'],
{'VAL-HIE': ['appear', 1, 0], 'HIE-VAL': ['disappear', 0, 1]},
],
'HIP-ILE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['ND1', 'HG12'], ['CD2', 'HG13'], ['HD1', 'CD1'],
['CE1', 'HG21'], ['HD2', 'HG22'], ['NE2', 'HG23'], ['HE1', 'HD11'], ['HE2', 'HD12']],
['HD13'],
{'ILE-HIP': ['disappear', 1, 0], 'HIP-ILE': ['appear', 0, 1]},
],
'HIP-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CD2', 'CD2'], ['ND1', 'HG'], ['HD1', 'CD1'], ['CE1', 'HD11'], ['HD2', 'HD12'], ['NE2', 'HD13'],
['HE1', 'HD21'], ['HE2', 'HD22']],
['HD23'],
{'HIP-LEU': ['appear', 0, 1], 'LEU-HIP': ['disappear', 1, 0]},
],
'HIP-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HE2', 'HE2'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['HD1', 'CD'], ['CE1', 'HD3'], ['NE2', 'CE'],
['HE1', 'HE3']],
['NZ', 'HZ1', 'HZ2'],
{'HIP-LYN': ['appear', 0, 1], 'LYN-HIP': ['disappear', 1, 0]},
],
'HIP-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HE2', 'HE2'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['HD1', 'CD'], ['CE1', 'HD3'], ['NE2', 'CE'],
['HE1', 'HE3']],
['NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-HIP': ['disappear', 1, 0], 'HIP-LYS': ['appear', 0, 1]},
],
'HIP-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HE1', 'HE1'], ['HE2', 'HE2'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['HD1', 'SD'], ['CE1', 'CE'], ['HD2', 'HE3']],
['NE2'],
{'HIP-MET': ['disappear', 0, 1], 'MET-HIP': ['appear', 1, 0]},
],
'HIP-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['ND1', 'CD1'], ['HD1', 'HD1'], ['CD2', 'CD2'], ['HD2', 'HD2'], ['CE1', 'CE1'], ['HE1', 'HE1'], ['HE2', 'HE2'],
['NE2', 'CE2']],
['CZ', 'HZ'],
{'HIP-PHE': ['appear', 0, 1], 'PHE-HIP': ['disappear', 1, 0]},
],
'HIP-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['ND1', 'HG2'], ['CD2', 'HG3'], ['CE1', 'CD'], ['HD2', 'HD2'],
['HE1', 'HD3']],
['H', 'NE2', 'HD1', 'HE2'],
{'HIP-PRO': ['disappear', 0, 1], 'PRO-HIP': ['appear', 1, 0]},
],
'HIP-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['ND1', 'HG']],
['CD2', 'HD1', 'CE1', 'HD2', 'NE2', 'HE1', 'HE2'],
{'HIP-SER': ['disappear', 0, 1], 'SER-HIP': ['appear', 1, 0]},
],
'HIP-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['ND1', 'HG1'], ['CD2', 'HG21'], ['HD1', 'HG22'],
['CE1', 'HG23']],
['HD2', 'NE2', 'HE1', 'HE2'],
{'HIP-THR': ['disappear', 0, 1], 'THR-HIP': ['appear', 1, 0]},
],
'HIP-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['ND1', 'CD1'], ['HD1', 'HD1'], ['CD2', 'CD2'], ['CE1', 'NE1'], ['NE2', 'CE3'], ['HE2', 'HE3'], ['HD2', 'CE2'],
['HE1', 'HE1']],
['CZ3', 'HZ3', 'CZ2', 'HZ2', 'CH2', 'HH2'],
{'HIP-TRP': ['appear', 0, 1], 'TRP-HIP': ['disappear', 1, 0]},
],
'HIP-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['ND1', 'CD1'], ['HD1', 'HD1'], ['CD2', 'CD2'], ['HD2', 'HD2'], ['CE1', 'CE1'], ['HE1', 'HE1'], ['NE2', 'CE2'],
['HE2', 'HE2']],
['CZ', 'OH', 'HH'],
{'HIP-TYR': ['appear', 0, 1], 'TYR-HIP': ['disappear', 1, 0]},
],
'HIP-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['ND1', 'HG11'], ['CD2', 'HG12'], ['HD1', 'HG13'],
['CE1', 'HG21'], ['HD2', 'HG22'], ['NE2', 'HG23']],
['HE1', 'HE2'],
{'VAL-HIP': ['appear', 1, 0], 'HIP-VAL': ['disappear', 0, 1]},
],
'ILE-LEU': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['CG1', 'HB3'], ['CG2', 'CG'], ['CD1', 'CD1'], ['HG13', 'CD2'], ['HG12', 'HG'],
['HG21', 'HD21'], ['HG22', 'HD22'], ['HD11', 'HD11'], ['HD12', 'HD12'], ['HD13', 'HD13'], ['HG23', 'HD23']],
[],
{'LEU-ILE': ['', 1, 0], 'ILE-LEU': ['', 0, 1]},
],
'ILE-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['CG1', 'HB3'], ['CG2', 'CG'], ['HG12', 'HG2'], ['HG13', 'HG3'], ['CD1', 'CD'], ['HG21', 'HD2'],
['HG22', 'HD3'], ['HG23', 'CE'], ['HD11', 'HE2'], ['HD12', 'HE3'], ['HD13', 'NZ']],
['HZ1', 'HZ2'],
{'ILE-LYN': ['appear', 0, 1], 'LYN-ILE': ['disappear', 1, 0]},
],
'ILE-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['CG1', 'HB3'], ['CG2', 'CG'], ['HG12', 'HG2'], ['HG13', 'HG3'], ['CD1', 'CD'], ['HG21', 'HD2'],
['HG22', 'HD3'], ['HG23', 'CE'], ['HD11', 'HE2'], ['HD12', 'HE3'], ['HD13', 'NZ']],
['HZ1', 'HZ2', 'HZ3'],
{'LYS-ILE': ['disappear', 1, 0], 'ILE-LYS': ['appear', 0, 1]},
],
'ILE-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['CG1', 'HB3'], ['CG2', 'CG'], ['HG12', 'HG2'], ['HG13', 'HG3'], ['CD1', 'SD'], ['HG21', 'CE'],
['HG22', 'HE1'], ['HG23', 'HE2'], ['HD11', 'HE3']],
['HD12', 'HD13'],
{'ILE-MET': ['disappear', 0, 1], 'MET-ILE': ['appear', 1, 0]},
],
'ILE-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['CG1', 'HB3'], ['CG2', 'CG'], ['CD1', 'CD1'], ['HG12', 'CD2'], ['HG23', 'CE2'],
['HG13', 'HD1'], ['HG21', 'CE1'], ['HG22', 'HD2'], ['HD11', 'HE1'], ['HD12', 'CZ'], ['HD13', 'HE2']],
['HZ'],
{'PHE-ILE': ['disappear', 1, 0], 'ILE-PHE': ['appear', 0, 1]},
],
'ILE-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['CG1', 'HB3'], ['CG2', 'CG'], ['HG12', 'HG2'], ['HG13', 'HG3'], ['CD1', 'CD'], ['HG21', 'HD2'],
['HG22', 'HD3']],
['H', 'HG23', 'HD11', 'HD12', 'HD13'],
{'ILE-PRO': ['disappear', 0, 1], 'PRO-ILE': ['appear', 1, 0]},
],
'ILE-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['CG1', 'HB3'], ['CG2', 'OG'], ['HG12', 'HG']],
['HG13', 'CD1', 'HG21', 'HG22', 'HG23', 'HD11', 'HD12', 'HD13'],
{'SER-ILE': ['appear', 1, 0], 'ILE-SER': ['disappear', 0, 1]},
],
'ILE-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HG21', 'HG21'], ['HB', 'HB'], ['HG23', 'HG23'], ['CG2', 'CG2'], ['HG22', 'HG22'], ['CG1', 'OG1'],
['HG12', 'HG1']],
['HG13', 'CD1', 'HD11', 'HD12', 'HD13'],
{'THR-ILE': ['appear', 1, 0], 'ILE-THR': ['disappear', 0, 1]},
],
'ILE-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['CD1', 'CD1'], ['HB', 'HB2'], ['CG1', 'HB3'], ['CG2', 'CG'], ['HG12', 'CD2'], ['HG13', 'HD1'],
['HG21', 'NE1'], ['HG22', 'CE3'], ['HG23', 'CE2'], ['HD11', 'HE1'], ['HD12', 'HE3'], ['HD13', 'CZ3']],
['CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-ILE': ['disappear', 1, 0], 'ILE-TRP': ['appear', 0, 1]},
],
'ILE-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['CG1', 'HB3'], ['CG2', 'CG'], ['CD1', 'CD1'], ['HG12', 'CD2'], ['HG13', 'HD1'],
['HG21', 'CE1'], ['HG22', 'HD2'], ['HG23', 'CE2'], ['HD11', 'HE1'], ['HD12', 'CZ'], ['HD13', 'HE2']],
['OH', 'HH'],
{'TYR-ILE': ['disappear', 1, 0], 'ILE-TYR': ['appear', 0, 1]},
],
'ILE-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['CG2', 'CG2'], ['HB', 'HB'], ['CG1', 'CG1'], ['HG12', 'HG12'], ['HG13', 'HG13'], ['HG21', 'HG21'],
['HG23', 'HG23'], ['HG22', 'HG22'], ['CD1', 'HG11']],
['HD11', 'HD12', 'HD13'],
{'VAL-ILE': ['appear', 1, 0], 'ILE-VAL': ['disappear', 0, 1]},
],
'LEU-LYN': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG', 'HG2'], ['CD1', 'HG3'], ['CD2', 'CD'], ['HD11', 'HD2'], ['HD12', 'HD3'], ['HD13', 'CE'],
['HD21', 'HE2'], ['HD22', 'HE3'], ['HD23', 'NZ']],
['HZ1', 'HZ2'],
{'LYN-LEU': ['disappear', 1, 0], 'LEU-LYN': ['appear', 0, 1]},
],
'LEU-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG', 'HG2'], ['CD1', 'HG3'], ['CD2', 'CD'], ['HD11', 'HD2'], ['HD12', 'HD3'], ['HD13', 'CE'],
['HD21', 'HE2'], ['HD22', 'HE3'], ['HD23', 'NZ']],
['HZ1', 'HZ2', 'HZ3'],
{'LEU-LYS': ['appear', 0, 1], 'LYS-LEU': ['disappear', 1, 0]},
],
'LEU-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG', 'HG2'], ['CD1', 'HG3'], ['CD2', 'SD'], ['HD11', 'CE'], ['HD12', 'HE1'], ['HD13', 'HE2'],
['HD21', 'HE3']],
['HD22', 'HD23'],
{'MET-LEU': ['appear', 1, 0], 'LEU-MET': ['disappear', 0, 1]},
],
'LEU-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CD1', 'CD1'], ['CD2', 'CD2'], ['HG', 'HD1'], ['HD11', 'CE1'], ['HD12', 'HD2'], ['HD13', 'CE2'],
['HD21', 'HE1'], ['HD22', 'CZ'], ['HD23', 'HE2']],
['HZ'],
{'PHE-LEU': ['disappear', 1, 0], 'LEU-PHE': ['appear', 0, 1]},
],
'LEU-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG', 'HG2'], ['CD1', 'HG3'], ['HD11', 'HD2'], ['HD12', 'HD3'],
['HD13', 'CD']],
['H', 'CD2', 'HD21', 'HD22', 'HD23'],
{'PRO-LEU': ['appear', 1, 0], 'LEU-PRO': ['disappear', 0, 1]},
],
'LEU-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG', 'HG']],
['CD1', 'CD2', 'HD11', 'HD12', 'HD13', 'HD21', 'HD22', 'HD23'],
{'LEU-SER': ['disappear', 0, 1], 'SER-LEU': ['appear', 1, 0]},
],
'LEU-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG', 'HG1'], ['CD1', 'HG21'], ['CD2', 'HG22'],
['HD11', 'HG23']],
['HD12', 'HD13', 'HD21', 'HD22', 'HD23'],
{'THR-LEU': ['appear', 1, 0], 'LEU-THR': ['disappear', 0, 1]},
],
'LEU-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CD1', 'CD1'], ['CD2', 'CD2'], ['HG', 'HD1'], ['HD11', 'NE1'], ['HD12', 'CE3'], ['HD13', 'CE2'],
['HD21', 'HE1'], ['HD22', 'HE3'], ['HD23', 'CZ3']],
['CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-LEU': ['disappear', 1, 0], 'LEU-TRP': ['appear', 0, 1]},
],
'LEU-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CD1', 'CD1'], ['CD2', 'CD2'], ['HG', 'HD1'], ['HD11', 'CE1'], ['HD12', 'HD2'], ['HD13', 'CE2'],
['HD21', 'HE1'], ['HD22', 'CZ'], ['HD23', 'HE2']],
['OH', 'HH'],
{'TYR-LEU': ['disappear', 1, 0], 'LEU-TYR': ['appear', 0, 1]},
],
'LEU-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG', 'HG11'], ['CD1', 'HG12'], ['CD2', 'HG13'],
['HD11', 'HG21'], ['HD12', 'HG22'], ['HD13', 'HG23']],
['HD21', 'HD22', 'HD23'],
{'VAL-LEU': ['appear', 1, 0], 'LEU-VAL': ['disappear', 0, 1]},
],
'LYN-LYS': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'HG2', 'HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3',
'NZ', 'HZ1', 'HZ2'],
[],
['HZ3'],
{'LYN-LYS': ['appear', 0, 1], 'LYS-LYN': ['disappear', 1, 0]},
],
'LYN-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CE', 'CE'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['HE2', 'HE2'], ['HE3', 'HE3'], ['CD', 'SD'], ['HD2', 'HE1']],
['HD3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-MET': ['disappear', 0, 1], 'MET-LYN': ['appear', 1, 0]},
],
'LYN-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HE2', 'HE2'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD3', 'CE1'], ['CE', 'CE2'],
['HE3', 'HE1'], ['NZ', 'CZ'], ['HZ1', 'HZ']],
['HZ2'],
{'LYN-PHE': ['disappear', 0, 1], 'PHE-LYN': ['appear', 1, 0]},
],
'LYN-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'CD'], ['HD3', 'HD3'],
['HD2', 'HD2']],
['H', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'PRO-LYN': ['appear', 1, 0], 'LYN-PRO': ['disappear', 0, 1]},
],
'LYN-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG2', 'HG']],
['HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'SER-LYN': ['appear', 1, 0], 'LYN-SER': ['disappear', 0, 1]},
],
'LYN-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['CD', 'HG22'],
['HD2', 'HG23']],
['HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'LYN-THR': ['disappear', 0, 1], 'THR-LYN': ['appear', 1, 0]},
],
'LYN-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HZ2', 'HZ2'], ['HE3', 'HE3'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD2', 'NE1'], ['HD3', 'CE3'],
['CE', 'CE2'], ['HE2', 'HE1'], ['NZ', 'CZ3'], ['HZ1', 'CZ2']],
['HZ3', 'CH2', 'HH2'],
{'TRP-LYN': ['disappear', 1, 0], 'LYN-TRP': ['appear', 0, 1]},
],
'LYN-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HE2', 'HE2'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD3', 'CE1'], ['CE', 'CE2'],
['HE3', 'HE1'], ['NZ', 'CZ'], ['HZ1', 'OH'], ['HZ2', 'HH']],
[],
{'TYR-LYN': ['', 1, 0], 'LYN-TYR': ['', 0, 1]},
],
'LYN-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['CD', 'HG13'],
['HD2', 'HG21'], ['HD3', 'HG22'], ['CE', 'HG23']],
['HE2', 'HE3', 'NZ', 'HZ1', 'HZ2'],
{'VAL-LYN': ['appear', 1, 0], 'LYN-VAL': ['disappear', 0, 1]},
],
'LYS-MET': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CD', 'SD'], ['CE', 'CE'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['HE2', 'HE2'], ['HE3', 'HE3'], ['HD2', 'HE1']],
['HD3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'MET-LYS': ['appear', 1, 0], 'LYS-MET': ['disappear', 0, 1]},
],
'LYS-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HE2', 'HE2'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD3', 'CE1'], ['CE', 'CE2'],
['HE3', 'HE1'], ['NZ', 'CZ'], ['HZ1', 'HZ']],
['HZ2', 'HZ3'],
{'LYS-PHE': ['disappear', 0, 1], 'PHE-LYS': ['appear', 1, 0]},
],
'LYS-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['CD', 'CD'], ['HD3', 'HD3'],
['HD2', 'HD2']],
['H', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-PRO': ['disappear', 0, 1], 'PRO-LYS': ['appear', 1, 0]},
],
'LYS-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG2', 'HG']],
['HG3', 'CD', 'HD2', 'HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-SER': ['disappear', 0, 1], 'SER-LYS': ['appear', 1, 0]},
],
'LYS-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['CD', 'HG22'],
['HD2', 'HG23']],
['HD3', 'CE', 'HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-THR': ['disappear', 0, 1], 'THR-LYS': ['appear', 1, 0]},
],
'LYS-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HZ2', 'HZ2'], ['HZ3', 'HZ3'], ['HE3', 'HE3'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD2', 'NE1'],
['HD3', 'CE3'], ['CE', 'CE2'], ['HE2', 'HE1'], ['NZ', 'CZ3'], ['HZ1', 'CZ2']],
['CH2', 'HH2'],
{'LYS-TRP': ['appear', 0, 1], 'TRP-LYS': ['disappear', 1, 0]},
],
'LYS-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HD2', 'HD2'], ['HE2', 'HE2'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'HD1'], ['HD3', 'CE1'], ['CE', 'CE2'],
['HE3', 'HE1'], ['NZ', 'CZ'], ['HZ1', 'OH'], ['HZ2', 'HH']],
['HZ3'],
{'LYS-TYR': ['disappear', 0, 1], 'TYR-LYS': ['appear', 1, 0]},
],
'LYS-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['CD', 'HG13'],
['HD2', 'HG21'], ['HD3', 'HG22'], ['CE', 'HG23']],
['HE2', 'HE3', 'NZ', 'HZ1', 'HZ2', 'HZ3'],
{'LYS-VAL': ['disappear', 0, 1], 'VAL-LYS': ['appear', 1, 0]},
],
'MET-PHE': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['SD', 'HD1'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CE', 'CE1'], ['HE1', 'HE1'], ['HE2', 'HE2'], ['HE3', 'HD2']],
['CE2', 'CZ', 'HZ'],
{'MET-PHE': ['appear', 0, 1], 'PHE-MET': ['disappear', 1, 0]},
],
'MET-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'HG2'], ['HG3', 'HG3'], ['SD', 'CD'], ['CE', 'HD3'],
['HE1', 'HD2']],
['H', 'HE2', 'HE3'],
{'MET-PRO': ['disappear', 0, 1], 'PRO-MET': ['appear', 1, 0]},
],
'MET-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['HG2', 'HG']],
['HG3', 'SD', 'CE', 'HE1', 'HE2', 'HE3'],
{'SER-MET': ['appear', 1, 0], 'MET-SER': ['disappear', 0, 1]},
],
'MET-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['SD', 'HG22'], ['CE', 'HG23']],
['HE1', 'HE2', 'HE3'],
{'MET-THR': ['disappear', 0, 1], 'THR-MET': ['appear', 1, 0]},
],
'MET-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['SD', 'HD1'], ['CE', 'NE1'], ['HE2', 'CE3'], ['HE1', 'HE1'], ['HE3', 'HE3']],
['CE2', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'MET-TRP': ['appear', 0, 1], 'TRP-MET': ['disappear', 1, 0]},
],
'MET-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['HG2', 'CD1'], ['HG3', 'CD2'], ['SD', 'HD1'], ['CE', 'CE1'], ['HE3', 'HD2'], ['HE1', 'HE1'], ['HE2', 'CE2']],
['HE2', 'CZ', 'OH', 'HH'],
{'TYR-MET': ['disappear', 1, 0], 'MET-TYR': ['appear', 0, 1]},
],
'MET-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'CG1'], ['HB3', 'CG2'], ['CG', 'HB'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['SD', 'HG13'], ['CE', 'HG21'],
['HE1', 'HG22'], ['HE2', 'HG23']],
['HE3'],
{'VAL-MET': ['appear', 1, 0], 'MET-VAL': ['disappear', 0, 1]},
],
'PHE-PRO': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['CD1', 'HG2'], ['CD2', 'HG3'], ['CE1', 'CD'], ['HD2', 'HD2'],
['HD1', 'HD3']],
['H', 'HE1', 'CE2', 'HE2', 'CZ', 'HZ'],
{'PHE-PRO': ['disappear', 0, 1], 'PRO-PHE': ['appear', 1, 0]}
],
'PHE-SER': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['CG', 'OG'], ['CD1', 'HG']],
['CD2', 'HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'HZ'],
{'PHE-SER': ['disappear', 0, 1], 'SER-PHE': ['appear', 1, 0]},
],
'PHE-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['CD1', 'HG1'], ['CD2', 'HG21'], ['HD1', 'HG22'],
['CE1', 'HG23']],
['HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'HZ'],
{'THR-PHE': ['appear', 1, 0], 'PHE-THR': ['disappear', 0, 1]},
],
'PHE-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CD1', 'CD1'], ['HD1', 'HD1'], ['CD2', 'CD2'], ['HD2', 'NE1'], ['CE1', 'CE3'], ['HE1', 'HE3'], ['CE2', 'CE2'],
['HE2', 'HE1'], ['CZ', 'CZ3'], ['HZ', 'HZ3']],
['CZ2', 'HZ2', 'CH2', 'HH2'],
{'PHE-TRP': ['appear', 0, 1], 'TRP-PHE': ['disappear', 1, 0]}
],
'PHE-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG', 'CD1', 'HD1', 'CD2', 'HD2', 'CE1', 'HE1', 'CE2',
'HE2', 'CZ'],
[['HZ', 'OH']],
['HH'],
{'PHE-TYR': ['appear', 0, 1], 'TYR-PHE': ['disappear', 1, 0]}
],
'PHE-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['CD1', 'HG11'], ['CD2', 'HG12'], ['HD1', 'HG13'],
['CE1', 'HG21'], ['HD2', 'HG22'], ['CE2', 'HG23']],
['HE1', 'CZ', 'HE2', 'HZ'],
{'PHE-VAL': ['disappear', 0, 1], 'VAL-PHE': ['appear', 1, 0]},
],
'PRO-SER': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'OG'], ['HG2', 'HG'], ['CD', 'H']],
['HG3', 'HD2', 'HD3'],
{'PRO-SER': ['disappear', 0, 1], 'SER-PRO': ['appear', 1, 0]}
],
'PRO-THR': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['CG', 'CG2'], ['HG2', 'HG1'], ['HG3', 'HG21'], ['CD', 'H'], ['HD2', 'HG22'],
['HD3', 'HG23']],
[],
{'PRO-THR': ['', 0, 1], 'THR-PRO': ['', 1, 0]}
],
'PRO-TRP': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'H'], ['HD2', 'HD1'],
['HD3', 'NE1']],
['CE3', 'CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'PRO-TRP': ['appear', 0, 1], 'THR-PRO': ['disappear', 1, 0]}
],
'PRO-TYR': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB2'], ['HB3', 'HB3'], ['CG', 'CG'], ['HG2', 'CD1'], ['HG3', 'CD2'], ['CD', 'H'], ['HD2', 'HD1'],
['HD3', 'HD2']],
['CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'PRO-TYR': ['appear', 0, 1], 'TYR-PRO': ['disappear', 1, 0]}
],
'PRO-VAL': [
['N', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['CG', 'CG1'], ['HB3', 'CG2'], ['HG2', 'HG11'], ['HG3', 'HG12'], ['CD', 'H'], ['HD2', 'HG13'],
['HD3', 'HG21']],
['HG22', 'HG23'],
{'PRO-VAL': ['appear', 0, 1], 'VAL-PRO': ['disappear', 1, 0]}
],
'SER-THR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'OG1'], ['OG', 'CG2'], ['HG', 'HG1']],
['HG21', 'HG22', 'HG23'],
{'THR-SER': ['disappear', 1, 0], 'SER-THR': ['appear', 0, 1]},
],
'SER-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['OG', 'CG'], ['HG', 'CD1']],
['CD2', 'HD1', 'NE1', 'CE3', 'CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-SER': ['disappear', 1, 0], 'SER-TRP': ['appear', 0, 1]},
],
'SER-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB3', 'HB3'], ['HB2', 'HB2'], ['OG', 'CG'], ['HG', 'CD1']],
['CD2', 'HD1', 'CE1', 'HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'SER-TYR': ['appear', 0, 1], 'TYR-SER': ['disappear', 1, 0]},
],
'SER-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['OG', 'CG2'], ['HG', 'HG11']],
['HG12', 'HG13', 'HG21', 'HG22', 'HG23'],
{'SER-VAL': ['appear', 0, 1], 'VAL-SER': ['disappear', 1, 0]},
],
'THR-TRP': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['OG1', 'HB3'], ['CG2', 'CG'], ['HG1', 'CD1'], ['HG21', 'CD2'], ['HG22', 'HD1'],
['HG23', 'NE1']],
['CE3', 'CE2', 'HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'TRP-THR': ['disappear', 1, 0], 'THR-TRP': ['appear', 0, 1]},
],
'THR-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB', 'HB2'], ['OG1', 'HB3'], ['CG2', 'CG'], ['HG1', 'CD1'], ['HG21', 'CD2'], ['HG22', 'HD1'],
['HG23', 'CE1']],
['HD2', 'CE2', 'HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'THR-TYR': ['appear', 0, 1], 'TYR-THR': ['disappear', 1, 0]},
],
'THR-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HG21', 'HG21'], ['HB', 'HB'], ['HG23', 'HG23'], ['CG2', 'CG2'], ['HG22', 'HG22'], ['OG1', 'CG1'],
['HG1', 'HG11']],
['HG12', 'HG13'],
{'THR-VAL': ['appear', 0, 1], 'VAL-THR': ['disappear', 1, 0]},
],
'TRP-TYR': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB', 'HB2', 'HB3', 'CG'],
[['CD1', 'CD1'], ['HD1', 'HD1'], ['CD2', 'CD2'], ['NE1', 'HD2'], ['CE3', 'CE1'], ['HE1', 'HE1'], ['CE2', 'CE2'],
['HE3', 'HE2'], ['CZ3', 'CZ'], ['HZ3', 'OH'], ['CZ2', 'HH']],
['HZ2', 'CH2', 'HH2'],
{'TRP-TYR': ['disappear', 0, 1], 'TYR-TRP': ['appear', 1, 0]}
],
'TRP-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['CD1', 'HG11'], ['CD2', 'HG12'], ['HD1', 'HG13'],
['NE1', 'HG21'], ['CE3', 'HG22'], ['CE2', 'HG23']],
['HE1', 'HE3', 'CZ3', 'CZ2', 'HZ3', 'HZ2', 'CH2', 'HH2'],
{'VAL-TRP': ['appear', 1, 0], 'TRP-VAL': ['disappear', 0, 1]},
],
'TYR-VAL': [
['N', 'H', 'CA', 'HA', 'C', 'O', 'CB'],
[['HB2', 'HB'], ['HB3', 'CG1'], ['CG', 'CG2'], ['CD1', 'HG11'], ['CD2', 'HG12'], ['HD1', 'HG13'],
['CE1', 'HG21'], ['HD2', 'HG22'], ['CE2', 'HG23']],
['HE1', 'CZ', 'HE2', 'OH', 'HH'],
{'TYR-VAL': ['disappear', 0, 1], 'VAL-TYR': ['appear', 1, 0]},
],
}
input_keywords = {
"all": ["ARG", "HIE", "HID", "HIP", "LYS", "ASP", "GLU", "SER", "THR",
"ASN", "GLN", "CYS", "GLY", "PRO", "ALA", "VAL", "ILE", "LEU",
"MET", "PHE", "TYR", "TRP"],
"all_protonated": ["ARG", "HIE", "HID", "HIP", "LYS", "LYN", "ASP", "ASH", "GLU", "GLH",
"SER", "THR", "ASN", "GLN", "CYS", "CYT", "GLY", "PRO", "ALA", "VAL",
"ILE", "LEU", "MET", "PHE", "TYR", "TRP"],
"positive": ["ARG", "HIE", "HID", "HIP", "LYS"],
"positive_protonated": ["ARG", "HIE", "HID", "HIP", "LYS", "LYN"],
"negative": ["ASP", "GLU"],
"negative_protonated": ["ASP", "ASH", "GLU", "GLH"],
"polar": ["SER", "THR", "ASN", "GLN"],
"special": ["CYS", "CYT", "GLY", "PRO"],
"hydrophobic": ["ALA", "VAL", "ILE", "LEU", "MET", "PHE", "TYR", "TRP"]
}
| 52.094021 | 156 | 0.325895 |
ab2c4cc62e2c4375c03fa7576732d8a2a67653ad
| 746 |
py
|
Python
|
setup.py
|
RichardPan01/wechat_articles_spider
|
b91385603cd0e09d76e96fc1876f75c8f22c43f2
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
RichardPan01/wechat_articles_spider
|
b91385603cd0e09d76e96fc1876f75c8f22c43f2
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
RichardPan01/wechat_articles_spider
|
b91385603cd0e09d76e96fc1876f75c8f22c43f2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="wechatarticles",
version="0.5.7",
author="wnma3mz",
author_email="wnma3mz@gmail.com",
description="wechat articles scrapy",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/wnma3mz/wechat_articles_spider",
packages=setuptools.find_packages(),
install_requires=[
'requests>=2.20.0', 'beautifulsoup4>=4.7.1'
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
),
)
| 28.692308 | 61 | 0.662198 |
8547cc6a3a41ed2ad00b01b2126b131f3048cfce
| 50,522 |
py
|
Python
|
mmseg/ops/tensor_color_jitter.py
|
openseg-group/mmsegmentation
|
23939f09d2b0bd30fc26eb7f8af974f1f5441210
|
[
"Apache-2.0"
] | 2 |
2020-07-10T12:13:56.000Z
|
2020-11-09T07:09:29.000Z
|
mmseg/ops/tensor_color_jitter.py
|
openseg-group/mmsegmentation
|
23939f09d2b0bd30fc26eb7f8af974f1f5441210
|
[
"Apache-2.0"
] | null | null | null |
mmseg/ops/tensor_color_jitter.py
|
openseg-group/mmsegmentation
|
23939f09d2b0bd30fc26eb7f8af974f1f5441210
|
[
"Apache-2.0"
] | 2 |
2020-07-28T09:12:55.000Z
|
2021-01-04T07:49:59.000Z
|
import warnings
from typing import Optional, Dict, Tuple
import torch
from torch import Tensor
from torch.nn.functional import grid_sample, conv2d, interpolate, pad as torch_pad
from torch.jit.annotations import List, BroadcastingList2
def _is_tensor_a_torch_image(x: Tensor) -> bool:
return x.ndim >= 2
def _get_image_size(img: Tensor) -> List[int]:
"""Returns (w, h) of tensor image"""
if _is_tensor_a_torch_image(img):
return [img.shape[-1], img.shape[-2]]
raise TypeError("Unexpected input type")
def _get_image_num_channels(img: Tensor) -> int:
if img.ndim == 2:
return 1
elif img.ndim > 2:
return img.shape[-3]
raise TypeError("Input ndim should be 2 or more. Got {}".format(img.ndim))
def _max_value(dtype: torch.dtype) -> float:
# TODO: replace this method with torch.iinfo when it gets torchscript support.
# https://github.com/pytorch/pytorch/issues/41492
a = torch.tensor(2, dtype=dtype)
signed = 1 if torch.tensor(0, dtype=dtype).is_signed() else 0
bits = 1
max_value = torch.tensor(-signed, dtype=torch.long)
while True:
next_value = a.pow(bits - signed).sub(1)
if next_value > max_value:
max_value = next_value
bits *= 2
else:
return max_value.item()
return max_value.item()
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
"""PRIVATE METHOD. Convert a tensor image to the given ``dtype`` and scale the values accordingly
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
image (torch.Tensor): Image to be converted
dtype (torch.dtype): Desired data type of the output
Returns:
(torch.Tensor): Converted image
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
if image.dtype == dtype:
return image
# TODO: replace with image.dtype.is_floating_point when torchscript supports it
if torch.empty(0, dtype=image.dtype).is_floating_point():
# TODO: replace with dtype.is_floating_point when torchscript supports it
if torch.tensor(0, dtype=dtype).is_floating_point():
return image.to(dtype)
# float to int
if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or (
image.dtype == torch.float64 and dtype == torch.int64
):
msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely."
raise RuntimeError(msg)
# https://github.com/pytorch/vision/pull/2078#issuecomment-612045321
# For data in the range 0-1, (float * 255).to(uint) is only 255
# when float is exactly 1.0.
# `max + 1 - epsilon` provides more evenly distributed mapping of
# ranges of floats to ints.
eps = 1e-3
max_val = _max_value(dtype)
result = image.mul(max_val + 1.0 - eps)
return result.to(dtype)
else:
input_max = _max_value(image.dtype)
output_max = _max_value(dtype)
# int to float
# TODO: replace with dtype.is_floating_point when torchscript supports it
if torch.tensor(0, dtype=dtype).is_floating_point():
image = image.to(dtype)
return image / input_max
# int to int
if input_max > output_max:
# factor should be forced to int for torch jit script
# otherwise factor is a float and image // factor can produce different results
factor = int((input_max + 1) // (output_max + 1))
image = image // factor
return image.to(dtype)
else:
# factor should be forced to int for torch jit script
# otherwise factor is a float and image * factor can produce different results
factor = int((output_max + 1) // (input_max + 1))
image = image.to(dtype)
return image * factor
def vflip(img: Tensor) -> Tensor:
"""PRIVATE METHOD. Vertically flip the given the Image Tensor.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image Tensor to be flipped in the form [..., C, H, W].
Returns:
Tensor: Vertically flipped image Tensor.
"""
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
return img.flip(-2)
def hflip(img: Tensor) -> Tensor:
"""PRIVATE METHOD. Horizontally flip the given the Image Tensor.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image Tensor to be flipped in the form [..., C, H, W].
Returns:
Tensor: Horizontally flipped image Tensor.
"""
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
return img.flip(-1)
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
"""PRIVATE METHOD. Crop the given Image Tensor.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be cropped in the form [..., H, W]. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
Returns:
Tensor: Cropped image.
"""
if not _is_tensor_a_torch_image(img):
raise TypeError("tensor is not a torch image.")
return img[..., top:top + height, left:left + width]
def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
"""PRIVATE METHOD. Convert the given RGB Image Tensor to Grayscale.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
For RGB to Grayscale conversion, ITU-R 601-2 luma transform is performed which
is L = R * 0.2989 + G * 0.5870 + B * 0.1140
Args:
img (Tensor): Image to be converted to Grayscale in the form [C, H, W].
num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.
Returns:
Tensor: Grayscale version of the image.
if num_output_channels = 1 : returned image is single channel
if num_output_channels = 3 : returned image is 3 channel with r = g = b
"""
if img.ndim < 3:
raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))
c = img.shape[-3]
if c != 3:
raise TypeError("Input image tensor should 3 channels, but found {}".format(c))
if num_output_channels not in (1, 3):
raise ValueError('num_output_channels should be either 1 or 3')
r, g, b = img.unbind(dim=-3)
# This implementation closely follows the TF one:
# https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138
l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype)
l_img = l_img.unsqueeze(dim=-3)
if num_output_channels == 3:
return l_img.expand(img.shape)
return l_img
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
"""PRIVATE METHOD. Adjust brightness of an RGB image.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
Tensor: Brightness adjusted image.
"""
if brightness_factor < 0:
raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor))
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
return _blend(img, torch.zeros_like(img), brightness_factor)
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
"""PRIVATE METHOD. Adjust contrast of an RGB image.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
Tensor: Contrast adjusted image.
"""
if contrast_factor < 0:
raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor))
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
mean = torch.mean(rgb_to_grayscale(img).to(dtype), dim=(-3, -2, -1), keepdim=True)
return _blend(img, mean, contrast_factor)
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
"""PRIVATE METHOD. Adjust hue of an image.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
img (Tensor): Image to be adjusted. Image type is either uint8 or float.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
Tensor: Hue adjusted image.
"""
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):
raise TypeError('Input img should be Tensor image')
orig_dtype = img.dtype
if img.dtype == torch.uint8:
img = img.to(dtype=torch.float32) / 255.0
img = _rgb2hsv(img)
h, s, v = img.unbind(dim=-3)
h = (h + hue_factor) % 1.0
img = torch.stack((h, s, v), dim=-3)
img_hue_adj = _hsv2rgb(img)
if orig_dtype == torch.uint8:
img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype)
return img_hue_adj
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
"""PRIVATE METHOD. Adjust color saturation of an RGB image.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. Can be any
non negative number. 0 gives a black and white image, 1 gives the
original image while 2 enhances the saturation by a factor of 2.
Returns:
Tensor: Saturation adjusted image.
"""
if saturation_factor < 0:
raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor))
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
return _blend(img, rgb_to_grayscale(img), saturation_factor)
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
r"""PRIVATE METHOD. Adjust gamma of an RGB image.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
.. math::
`I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}`
See `Gamma Correction`_ for more details.
.. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
Args:
img (Tensor): Tensor of RBG values to be adjusted.
gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter.
gain (float): The constant multiplier.
"""
if not isinstance(img, torch.Tensor):
raise TypeError('Input img should be a Tensor.')
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
result = img
dtype = img.dtype
if not torch.is_floating_point(img):
result = convert_image_dtype(result, torch.float32)
result = (gain * result ** gamma).clamp(0, 1)
result = convert_image_dtype(result, dtype)
result = result.to(dtype)
return result
def center_crop(img: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
"""DEPRECATED. Crop the Image Tensor and resize it to desired size.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
.. warning::
This method is deprecated and will be removed in future releases.
Please, use ``F.center_crop`` instead.
Args:
img (Tensor): Image to be cropped.
output_size (sequence or int): (height, width) of the crop box. If int,
it is used for both directions
Returns:
Tensor: Cropped image.
"""
warnings.warn(
"This method is deprecated and will be removed in future releases. "
"Please, use ``F.center_crop`` instead."
)
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
_, image_width, image_height = img.size()
crop_height, crop_width = output_size
# crop_top = int(round((image_height - crop_height) / 2.))
# Result can be different between python func and scripted func
# Temporary workaround:
crop_top = int((image_height - crop_height + 1) * 0.5)
# crop_left = int(round((image_width - crop_width) / 2.))
# Result can be different between python func and scripted func
# Temporary workaround:
crop_left = int((image_width - crop_width + 1) * 0.5)
return crop(img, crop_top, crop_left, crop_height, crop_width)
def five_crop(img: Tensor, size: BroadcastingList2[int]) -> List[Tensor]:
"""DEPRECATED. Crop the given Image Tensor into four corners and the central crop.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
.. warning::
This method is deprecated and will be removed in future releases.
Please, use ``F.five_crop`` instead.
.. Note::
This transform returns a List of Tensors and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
img (Tensor): Image to be cropped.
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
List: List (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
"""
warnings.warn(
"This method is deprecated and will be removed in future releases. "
"Please, use ``F.five_crop`` instead."
)
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
_, image_width, image_height = img.size()
crop_height, crop_width = size
if crop_width > image_width or crop_height > image_height:
msg = "Requested crop size {} is bigger than input size {}"
raise ValueError(msg.format(size, (image_height, image_width)))
tl = crop(img, 0, 0, crop_width, crop_height)
tr = crop(img, image_width - crop_width, 0, image_width, crop_height)
bl = crop(img, 0, image_height - crop_height, crop_width, image_height)
br = crop(img, image_width - crop_width, image_height - crop_height, image_width, image_height)
center = center_crop(img, (crop_height, crop_width))
return [tl, tr, bl, br, center]
def ten_crop(img: Tensor, size: BroadcastingList2[int], vertical_flip: bool = False) -> List[Tensor]:
"""DEPRECATED. Crop the given Image Tensor into four corners and the central crop plus the
flipped version of these (horizontal flipping is used by default).
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
.. warning::
This method is deprecated and will be removed in future releases.
Please, use ``F.ten_crop`` instead.
.. Note::
This transform returns a List of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
img (Tensor): Image to be cropped.
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
vertical_flip (bool): Use vertical flipping instead of horizontal
Returns:
List: List (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
Corresponding top left, top right, bottom left, bottom right and center crop
and same for the flipped image's tensor.
"""
warnings.warn(
"This method is deprecated and will be removed in future releases. "
"Please, use ``F.ten_crop`` instead."
)
if not _is_tensor_a_torch_image(img):
raise TypeError('tensor is not a torch image.')
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
first_five = five_crop(img, size)
if vertical_flip:
img = vflip(img)
else:
img = hflip(img)
second_five = five_crop(img, size)
return first_five + second_five
def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor:
bound = 1.0 if img1.is_floating_point() else 255.0
return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype)
def _rgb2hsv(img):
r, g, b = img.unbind(dim=-3)
# Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/
# src/libImaging/Convert.c#L330
maxc = torch.max(img, dim=-3).values
minc = torch.min(img, dim=-3).values
# The algorithm erases S and H channel where `maxc = minc`. This avoids NaN
# from happening in the results, because
# + S channel has division by `maxc`, which is zero only if `maxc = minc`
# + H channel has division by `(maxc - minc)`.
#
# Instead of overwriting NaN afterwards, we just prevent it from occuring so
# we don't need to deal with it in case we save the NaN in a buffer in
# backprop, if it is ever supported, but it doesn't hurt to do so.
eqc = maxc == minc
cr = maxc - minc
# Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine.
ones = torch.ones_like(maxc)
s = cr / torch.where(eqc, ones, maxc)
# Note that `eqc => maxc = minc = r = g = b`. So the following calculation
# of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it
# would not matter what values `rc`, `gc`, and `bc` have here, and thus
# replacing denominator with 1 when `eqc` is fine.
cr_divisor = torch.where(eqc, ones, cr)
rc = (maxc - r) / cr_divisor
gc = (maxc - g) / cr_divisor
bc = (maxc - b) / cr_divisor
hr = (maxc == r) * (bc - gc)
hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
h = (hr + hg + hb)
h = torch.fmod((h / 6.0 + 1.0), 1.0)
return torch.stack((h, s, maxc), dim=-3)
def _hsv2rgb(img):
h, s, v = img.unbind(dim=-3)
i = torch.floor(h * 6.0)
f = (h * 6.0) - i
i = i.to(dtype=torch.int32)
p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
i = i % 6
mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1)
a1 = torch.stack((v, q, p, p, t, v), dim=-3)
a2 = torch.stack((t, v, v, q, p, p), dim=-3)
a3 = torch.stack((p, p, t, v, v, q), dim=-3)
a4 = torch.stack((a1, a2, a3), dim=-4)
return torch.einsum("...ijk, ...xijk -> ...xjk", mask.to(dtype=img.dtype), a4)
def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor:
# padding is left, right, top, bottom
# crop if needed
if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0:
crop_left, crop_right, crop_top, crop_bottom = [-min(x, 0) for x in padding]
img = img[..., crop_top:img.shape[-2] - crop_bottom, crop_left:img.shape[-1] - crop_right]
padding = [max(x, 0) for x in padding]
in_sizes = img.size()
x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...]
left_indices = [i for i in range(padding[0] - 1, -1, -1)] # e.g. [3, 2, 1, 0]
right_indices = [-(i + 1) for i in range(padding[1])] # e.g. [-1, -2, -3]
x_indices = torch.tensor(left_indices + x_indices + right_indices)
y_indices = [i for i in range(in_sizes[-2])]
top_indices = [i for i in range(padding[2] - 1, -1, -1)]
bottom_indices = [-(i + 1) for i in range(padding[3])]
y_indices = torch.tensor(top_indices + y_indices + bottom_indices)
ndim = img.ndim
if ndim == 3:
return img[:, y_indices[:, None], x_indices[None, :]]
elif ndim == 4:
return img[:, :, y_indices[:, None], x_indices[None, :]]
else:
raise RuntimeError("Symmetric padding of N-D tensors are not supported yet")
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
r"""PRIVATE METHOD. Pad the given Tensor Image on all sides with specified padding mode and fill value.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be padded.
padding (int or tuple or list): Padding on each border. If a single int is provided this
is used to pad all borders. If a tuple or list of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple or list of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively. In torchscript mode padding as single int is not supported, use a tuple or
list of length 1: ``[padding, ]``.
fill (int): Pixel fill value for constant fill. Default is 0.
This value is only used when the padding_mode is constant
padding_mode (str): Type of padding. Should be: constant, edge or reflect. Default is constant.
Mode symmetric is not yet supported for Tensor inputs.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
Tensor: Padded image.
"""
if not _is_tensor_a_torch_image(img):
raise TypeError("tensor is not a torch image.")
if not isinstance(padding, (int, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (int, float)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, tuple):
padding = list(padding)
if isinstance(padding, list) and len(padding) not in [1, 2, 4]:
raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
if isinstance(padding, int):
if torch.jit.is_scripting():
# This maybe unreachable
raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]")
pad_left = pad_right = pad_top = pad_bottom = padding
elif len(padding) == 1:
pad_left = pad_right = pad_top = pad_bottom = padding[0]
elif len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
else:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
p = [pad_left, pad_right, pad_top, pad_bottom]
if padding_mode == "edge":
# remap padding_mode str
padding_mode = "replicate"
elif padding_mode == "symmetric":
# route to another implementation
return _pad_symmetric(img, p)
need_squeeze = False
if img.ndim < 4:
img = img.unsqueeze(dim=0)
need_squeeze = True
out_dtype = img.dtype
need_cast = False
if (padding_mode != "constant") and img.dtype not in (torch.float32, torch.float64):
# Here we temporary cast input tensor to float
# until pytorch issue is resolved :
# https://github.com/pytorch/pytorch/issues/40763
need_cast = True
img = img.to(torch.float32)
img = torch_pad(img, p, mode=padding_mode, value=float(fill))
if need_squeeze:
img = img.squeeze(dim=0)
if need_cast:
img = img.to(out_dtype)
return img
def resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor:
r"""PRIVATE METHOD. Resize the input Tensor to the given size.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be resized.
size (int or tuple or list): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaining
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
In torchscript mode padding as a single int is not supported, use a tuple or
list of length 1: ``[size, ]``.
interpolation (int, optional): Desired interpolation. Default is bilinear (=2). Other supported values:
nearest(=0) and bicubic(=3).
Returns:
Tensor: Resized image.
"""
if not _is_tensor_a_torch_image(img):
raise TypeError("tensor is not a torch image.")
if not isinstance(size, (int, tuple, list)):
raise TypeError("Got inappropriate size arg")
if not isinstance(interpolation, int):
raise TypeError("Got inappropriate interpolation arg")
_interpolation_modes = {
0: "nearest",
2: "bilinear",
3: "bicubic",
}
if interpolation not in _interpolation_modes:
raise ValueError("This interpolation mode is unsupported with Tensor input")
if isinstance(size, tuple):
size = list(size)
if isinstance(size, list) and len(size) not in [1, 2]:
raise ValueError("Size must be an int or a 1 or 2 element tuple/list, not a "
"{} element tuple/list".format(len(size)))
w, h = _get_image_size(img)
if isinstance(size, int):
size_w, size_h = size, size
elif len(size) < 2:
size_w, size_h = size[0], size[0]
else:
size_w, size_h = size[1], size[0] # Convention (h, w)
if isinstance(size, int) or len(size) < 2:
if w < h:
size_h = int(size_w * h / w)
else:
size_w = int(size_h * w / h)
if (w <= h and w == size_w) or (h <= w and h == size_h):
return img
# make image NCHW
need_squeeze = False
if img.ndim < 4:
img = img.unsqueeze(dim=0)
need_squeeze = True
mode = _interpolation_modes[interpolation]
out_dtype = img.dtype
need_cast = False
if img.dtype not in (torch.float32, torch.float64):
need_cast = True
img = img.to(torch.float32)
# Define align_corners to avoid warnings
align_corners = False if mode in ["bilinear", "bicubic"] else None
img = interpolate(img, size=[size_h, size_w], mode=mode, align_corners=align_corners)
if need_squeeze:
img = img.squeeze(dim=0)
if need_cast:
if mode == "bicubic":
img = img.clamp(min=0, max=255)
img = img.to(out_dtype)
return img
def _assert_grid_transform_inputs(
img: Tensor,
matrix: Optional[List[float]],
resample: int,
fillcolor: Optional[int],
_interpolation_modes: Dict[int, str],
coeffs: Optional[List[float]] = None,
):
if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):
raise TypeError("Input img should be Tensor Image")
if matrix is not None and not isinstance(matrix, list):
raise TypeError("Argument matrix should be a list")
if matrix is not None and len(matrix) != 6:
raise ValueError("Argument matrix should have 6 float values")
if coeffs is not None and len(coeffs) != 8:
raise ValueError("Argument coeffs should have 8 float values")
if fillcolor is not None:
warnings.warn("Argument fill/fillcolor is not supported for Tensor input. Fill value is zero")
if resample not in _interpolation_modes:
raise ValueError("Resampling mode '{}' is unsupported with Tensor input".format(resample))
def _cast_squeeze_in(img: Tensor, req_dtype: torch.dtype) -> Tuple[Tensor, bool, bool, torch.dtype]:
need_squeeze = False
# make image NCHW
if img.ndim < 4:
img = img.unsqueeze(dim=0)
need_squeeze = True
out_dtype = img.dtype
need_cast = False
if out_dtype != req_dtype:
need_cast = True
img = img.to(req_dtype)
return img, need_cast, need_squeeze, out_dtype
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype):
if need_squeeze:
img = img.squeeze(dim=0)
if need_cast:
# it is better to round before cast
img = torch.round(img).to(out_dtype)
return img
def _apply_grid_transform(img: Tensor, grid: Tensor, mode: str) -> Tensor:
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, grid.dtype)
if img.shape[0] > 1:
# Apply same grid to a batch of images
grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3])
img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False)
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
return img
def _gen_affine_grid(
theta: Tensor, w: int, h: int, ow: int, oh: int,
) -> Tensor:
# https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/
# AffineGridGenerator.cpp#L18
# Difference with AffineGridGenerator is that:
# 1) we normalize grid values after applying theta
# 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate
d = 0.5
base_grid = torch.empty(1, oh, ow, 3, dtype=theta.dtype, device=theta.device)
x_grid = torch.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, steps=ow, device=theta.device)
base_grid[..., 0].copy_(x_grid)
y_grid = torch.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, steps=oh, device=theta.device).unsqueeze_(-1)
base_grid[..., 1].copy_(y_grid)
base_grid[..., 2].fill_(1)
rescaled_theta = theta.transpose(1, 2) / torch.tensor([0.5 * w, 0.5 * h], dtype=theta.dtype, device=theta.device)
output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta)
return output_grid.view(1, oh, ow, 2)
def affine(
img: Tensor, matrix: List[float], resample: int = 0, fillcolor: Optional[int] = None
) -> Tensor:
"""PRIVATE METHOD. Apply affine transformation on the Tensor image keeping image center invariant.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): image to be rotated.
matrix (list of floats): list of 6 float values representing inverse matrix for affine transformation.
resample (int, optional): An optional resampling filter. Default is nearest (=0). Other supported values:
bilinear(=2).
fillcolor (int, optional): this option is not supported for Tensor input. Fill value for the area outside the
transform in the output image is always 0.
Returns:
Tensor: Transformed image.
"""
_interpolation_modes = {
0: "nearest",
2: "bilinear",
}
_assert_grid_transform_inputs(img, matrix, resample, fillcolor, _interpolation_modes)
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
shape = img.shape
# grid will be generated on the same device as theta and img
grid = _gen_affine_grid(theta, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2])
mode = _interpolation_modes[resample]
return _apply_grid_transform(img, grid, mode)
def _compute_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
# Inspired of PIL implementation:
# https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054
# pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points.
pts = torch.tensor([
[-0.5 * w, -0.5 * h, 1.0],
[-0.5 * w, 0.5 * h, 1.0],
[0.5 * w, 0.5 * h, 1.0],
[0.5 * w, -0.5 * h, 1.0],
])
theta = torch.tensor(matrix, dtype=torch.float).reshape(1, 2, 3)
new_pts = pts.view(1, 4, 3).bmm(theta.transpose(1, 2)).view(4, 2)
min_vals, _ = new_pts.min(dim=0)
max_vals, _ = new_pts.max(dim=0)
# Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0
tol = 1e-4
cmax = torch.ceil((max_vals / tol).trunc_() * tol)
cmin = torch.floor((min_vals / tol).trunc_() * tol)
size = cmax - cmin
return int(size[0]), int(size[1])
def rotate(
img: Tensor, matrix: List[float], resample: int = 0, expand: bool = False, fill: Optional[int] = None
) -> Tensor:
"""PRIVATE METHOD. Rotate the Tensor image by angle.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): image to be rotated.
matrix (list of floats): list of 6 float values representing inverse matrix for rotation transformation.
Translation part (``matrix[2]`` and ``matrix[5]``) should be in pixel coordinates.
resample (int, optional): An optional resampling filter. Default is nearest (=0). Other supported values:
bilinear(=2).
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
fill (n-tuple or int or float): this option is not supported for Tensor input.
Fill value for the area outside the transform in the output image is always 0.
Returns:
Tensor: Rotated image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
_interpolation_modes = {
0: "nearest",
2: "bilinear",
}
_assert_grid_transform_inputs(img, matrix, resample, fill, _interpolation_modes)
w, h = img.shape[-1], img.shape[-2]
ow, oh = _compute_output_size(matrix, w, h) if expand else (w, h)
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
# grid will be generated on the same device as theta and img
grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh)
mode = _interpolation_modes[resample]
return _apply_grid_transform(img, grid, mode)
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device):
# https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/
# src/libImaging/Geometry.c#L394
#
# x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
# y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1)
#
theta1 = torch.tensor([[
[coeffs[0], coeffs[1], coeffs[2]],
[coeffs[3], coeffs[4], coeffs[5]]
]], dtype=dtype, device=device)
theta2 = torch.tensor([[
[coeffs[6], coeffs[7], 1.0],
[coeffs[6], coeffs[7], 1.0]
]], dtype=dtype, device=device)
d = 0.5
base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device)
x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device)
base_grid[..., 0].copy_(x_grid)
y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1)
base_grid[..., 1].copy_(y_grid)
base_grid[..., 2].fill_(1)
rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device)
output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1)
output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2))
output_grid = output_grid1 / output_grid2 - 1.0
return output_grid.view(1, oh, ow, 2)
def perspective(
img: Tensor, perspective_coeffs: List[float], interpolation: int = 2, fill: Optional[int] = None
) -> Tensor:
"""PRIVATE METHOD. Perform perspective transform of the given Tensor image.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be transformed.
perspective_coeffs (list of float): perspective transformation coefficients.
interpolation (int): Interpolation type. Default, ``PIL.Image.BILINEAR``.
fill (n-tuple or int or float): this option is not supported for Tensor input. Fill value for the area
outside the transform in the output image is always 0.
Returns:
Tensor: transformed image.
"""
if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):
raise TypeError('Input img should be Tensor Image')
_interpolation_modes = {
0: "nearest",
2: "bilinear",
}
_assert_grid_transform_inputs(
img,
matrix=None,
resample=interpolation,
fillcolor=fill,
_interpolation_modes=_interpolation_modes,
coeffs=perspective_coeffs
)
ow, oh = img.shape[-1], img.shape[-2]
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=img.device)
mode = _interpolation_modes[interpolation]
return _apply_grid_transform(img, grid, mode)
def _get_gaussian_kernel1d(kernel_size: int, sigma: float) -> Tensor:
ksize_half = (kernel_size - 1) * 0.5
x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
pdf = torch.exp(-0.5 * (x / sigma).pow(2))
kernel1d = pdf / pdf.sum()
return kernel1d
def _get_gaussian_kernel2d(
kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device
) -> Tensor:
kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0]).to(device, dtype=dtype)
kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1]).to(device, dtype=dtype)
kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :])
return kernel2d
def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor:
"""PRIVATE METHOD. Performs Gaussian blurring on the img by given kernel.
.. warning::
Module ``transforms.functional_tensor`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (Tensor): Image to be blurred
kernel_size (sequence of int or int): Kernel size of the Gaussian kernel ``(kx, ky)``.
sigma (sequence of float or float, optional): Standard deviation of the Gaussian kernel ``(sx, sy)``.
Returns:
Tensor: An image that is blurred using gaussian kernel of given parameters
"""
if not (isinstance(img, torch.Tensor) or _is_tensor_a_torch_image(img)):
raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device)
kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, kernel.dtype)
# padding = (left, right, top, bottom)
padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2]
img = torch_pad(img, padding, mode="reflect")
img = conv2d(img, kernel, groups=img.shape[-3])
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
return img
class TensorColorJitter(torch.nn.Module):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
super().__init__()
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
@torch.jit.unused
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - float(value), center + float(value)]
if clip_first_on_zero:
value[0] = max(value[0], 0.0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
@torch.jit.unused
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
brightness_factor = random.uniform(brightness[0], brightness[1])
transforms.append(Lambda(lambda img: adjust_brightness(img, brightness_factor)))
if contrast is not None:
contrast_factor = random.uniform(contrast[0], contrast[1])
transforms.append(Lambda(lambda img: adjust_contrast(img, contrast_factor)))
if saturation is not None:
saturation_factor = random.uniform(saturation[0], saturation[1])
transforms.append(Lambda(lambda img: adjust_saturation(img, saturation_factor)))
if hue is not None:
hue_factor = random.uniform(hue[0], hue[1])
transforms.append(Lambda(lambda img: adjust_hue(img, hue_factor)))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def forward(self, img):
"""
Args:
img (Tensor): Input image.
Returns:
PIL Image or Tensor: Color jittered image.
"""
fn_idx = torch.randperm(4)
for fn_id in fn_idx:
if fn_id == 0 and self.brightness is not None:
brightness = self.brightness
brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()
img = adjust_brightness(img, brightness_factor)
if fn_id == 1 and self.contrast is not None:
contrast = self.contrast
contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()
img = adjust_contrast(img, contrast_factor)
if fn_id == 2 and self.saturation is not None:
saturation = self.saturation
saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()
img = adjust_saturation(img, saturation_factor)
if fn_id == 3 and self.hue is not None:
hue = self.hue
hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
img = adjust_hue(img, hue_factor)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
return format_string
| 38.833205 | 119 | 0.643066 |
112b0f7d095701eeb2325ef241d8653a506ec96b
| 12,065 |
py
|
Python
|
tools/custom_check.py
|
tuhinspatra/tyro-zulip-bot
|
29c149af1b01955e50564e31cff03331fa723880
|
[
"MIT"
] | 1 |
2020-05-25T11:52:31.000Z
|
2020-05-25T11:52:31.000Z
|
tools/custom_check.py
|
armag-pro/tyro-zulip-bot
|
29c149af1b01955e50564e31cff03331fa723880
|
[
"MIT"
] | 6 |
2020-03-24T16:39:54.000Z
|
2021-04-30T20:46:43.000Z
|
tools/custom_check.py
|
tuhinspatra/tyro-zulip-bot
|
29c149af1b01955e50564e31cff03331fa723880
|
[
"MIT"
] | 3 |
2019-01-26T21:40:16.000Z
|
2019-02-24T20:16:26.000Z
|
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import traceback
from server_lib.printer import print_err, colors
from typing import cast, Any, Callable, Dict, List, Optional, Tuple
def build_custom_checkers(by_lang):
# type: (Dict[str, List[str]]) -> Tuple[Callable[[], bool], Callable[[], bool]]
RuleList = List[Dict[str, Any]]
def custom_check_file(fn, identifier, rules, skip_rules=None, max_length=None):
# type: (str, str, RuleList, Optional[Any], Optional[int]) -> bool
failed = False
color = next(colors)
line_tups = []
for i, line in enumerate(open(fn)):
line_newline_stripped = line.strip('\n')
line_fully_stripped = line_newline_stripped.strip()
skip = False
for rule in skip_rules or []:
if re.match(rule, line):
skip = True
if line_fully_stripped.endswith(' # nolint'):
continue
if skip:
continue
tup = (i, line, line_newline_stripped, line_fully_stripped)
line_tups.append(tup)
rules_to_apply = []
fn_dirname = os.path.dirname(fn)
for rule in rules:
exclude_list = rule.get('exclude', set())
if fn in exclude_list or fn_dirname in exclude_list:
continue
if rule.get("include_only"):
found = False
for item in rule.get("include_only", set()):
if item in fn:
found = True
if not found:
continue
rules_to_apply.append(rule)
for rule in rules_to_apply:
exclude_lines = {
line for
(exclude_fn, line) in rule.get('exclude_line', set())
if exclude_fn == fn
}
pattern = rule['pattern']
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if line_fully_stripped in exclude_lines:
exclude_lines.remove(line_fully_stripped)
continue
try:
line_to_check = line_fully_stripped
if rule.get('strip') is not None:
if rule['strip'] == '\n':
line_to_check = line_newline_stripped
else:
raise Exception("Invalid strip rule")
if re.search(pattern, line_to_check):
print_err(identifier, color, '{} at {} line {}:'.format(
rule['description'], fn, i+1))
print_err(identifier, color, line)
failed = True
except Exception:
print("Exception with %s at %s line %s" % (rule['pattern'], fn, i+1))
traceback.print_exc()
if exclude_lines:
print('Please remove exclusions for file %s: %s' % (fn, exclude_lines))
lastLine = None
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if isinstance(line, bytes):
line_length = len(line.decode("utf-8"))
else:
line_length = len(line)
if (max_length is not None and line_length > max_length and
'# type' not in line and 'test' not in fn and 'example' not in fn and
not re.match("\[[ A-Za-z0-9_:,&()-]*\]: http.*", line) and
not re.match("`\{\{ external_api_uri_subdomain \}\}[^`]+`", line) and
"#ignorelongline" not in line and 'migrations' not in fn):
print("Line too long (%s) at %s line %s: %s" % (len(line), fn, i+1, line_newline_stripped))
failed = True
lastLine = line
if lastLine and ('\n' not in lastLine):
print("No newline at the end of file. Fix with `sed -i '$a\\' %s`" % (fn,))
failed = True
return failed
whitespace_rules = [
# This linter should be first since bash_rules depends on it.
{'pattern': '\s+$',
'strip': '\n',
'description': 'Fix trailing whitespace'},
{'pattern': '\t',
'strip': '\n',
'description': 'Fix tab-based whitespace'},
] # type: RuleList
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != '\s+$']) + [
# Two spaces trailing a line with other content is okay--it's a markdown line break.
# This rule finds one space trailing a non-space, three or more trailing spaces, and
# spaces on an empty line.
{'pattern': '((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)',
'strip': '\n',
'description': 'Fix trailing whitespace'},
{'pattern': '^#+[A-Za-z0-9]',
'strip': '\n',
'description': 'Missing space after # in heading'},
] # type: RuleList
python_rules = cast(RuleList, [
{'pattern': '".*"%\([a-z_].*\)?$',
'description': 'Missing space around "%"'},
{'pattern': "'.*'%\([a-z_].*\)?$",
'description': 'Missing space around "%"'},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="'},
{'pattern': '":\w[^"]*$',
'description': 'Missing whitespace after ":"'},
{'pattern': "':\w[^']*$",
'description': 'Missing whitespace after ":"'},
{'pattern': "^\s+[#]\w",
'strip': '\n',
'description': 'Missing whitespace after "#"'},
{'pattern': "assertEquals[(]",
'description': 'Use assertEqual, not assertEquals (which is deprecated).'},
{'pattern': 'self: Any',
'description': 'you can omit Any annotation for self',
'good_lines': ['def foo (self):'],
'bad_lines': ['def foo(self: Any):']},
{'pattern': "== None",
'description': 'Use `is None` to check whether something is None'},
{'pattern': "type:[(]",
'description': 'Missing whitespace after ":" in type annotation'},
{'pattern': "# type [(]",
'description': 'Missing : after type in type annotation'},
{'pattern': "#type",
'description': 'Missing whitespace after "#" in type annotation'},
{'pattern': 'if[(]',
'description': 'Missing space between if and ('},
{'pattern': ", [)]",
'description': 'Unnecessary whitespace between "," and ")"'},
{'pattern': "% [(]",
'description': 'Unnecessary whitespace between "%" and "("'},
# This next check could have false positives, but it seems pretty
# rare; if we find any, they can be added to the exclude list for
# this rule.
{'pattern': ' % [a-zA-Z0-9_.]*\)?$',
'description': 'Used % comprehension without a tuple'},
{'pattern': '.*%s.* % \([a-zA-Z0-9_.]*\)$',
'description': 'Used % comprehension without a tuple'},
# This rule might give false positives in virtualenv setup files which should be excluded,
# and comments which should be rewritten to avoid use of "python2", "python3", etc.
{'pattern': 'python[23]',
'include_only': set(['zulip/', 'zulip_botserver/']),
'description': 'Explicit python invocations should not include a version'},
{'pattern': '__future__',
'include_only': set(['zulip_bots/zulip_bots/bots/']),
'description': 'Bots no longer need __future__ imports.'},
{'pattern': '#!/usr/bin/env python$',
'include_only': set(['zulip_bots/']),
'description': 'Python shebangs must be python3'},
{'pattern': '(^|\s)open\s*\(',
'description': 'open() should not be used in Zulip\'s bots. Use functions'
' provided by the bots framework to access the filesystem.',
'include_only': set(['zulip_bots/zulip_bots/bots/'])},
{'pattern': 'pprint',
'description': 'Used pprint, which is most likely a debugging leftover. For user output, use print().'},
{'pattern': '\(BotTestCase\)',
'bad_lines': ['class TestSomeBot(BotTestCase):'],
'description': 'Bot test cases should directly inherit from BotTestCase *and* DefaultTests.'},
{'pattern': '\(DefaultTests, BotTestCase\)',
'bad_lines': ['class TestSomeBot(DefaultTests, BotTestCase):'],
'good_lines': ['class TestSomeBot(BotTestCase, DefaultTests):'],
'description': 'Bot test cases should inherit from BotTestCase before DefaultTests.'},
]) + whitespace_rules
bash_rules = [
{'pattern': '#!.*sh [-xe]',
'description': 'Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches'
' to set -x|set -e'},
] + whitespace_rules[0:1] # type: RuleList
prose_style_rules = [
{'pattern': '[^\/\#\-\"]([jJ]avascript)', # exclude usage in hrefs/divs
'description': "javascript should be spelled JavaScript"},
{'pattern': '[^\/\-\.\"\'\_\=\>]([gG]ithub)[^\.\-\_\"\<]', # exclude usage in hrefs/divs
'description': "github should be spelled GitHub"},
{'pattern': '[oO]rganisation', # exclude usage in hrefs/divs
'description': "Organization is spelled with a z"},
{'pattern': '!!! warning',
'description': "!!! warning is invalid; it's spelled '!!! warn'"},
{'pattern': '[^-_]botserver(?!rc)|bot server',
'description': "Use Botserver instead of botserver or Botserver."},
] # type: RuleList
json_rules = [] # type: RuleList # fix newlines at ends of files
# It is okay that json_rules is empty, because the empty list
# ensures we'll still check JSON files for whitespace.
markdown_rules = markdown_whitespace_rules + prose_style_rules + [
{'pattern': '\[(?P<url>[^\]]+)\]\((?P=url)\)',
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.'}
]
help_markdown_rules = markdown_rules + [
{'pattern': '[a-z][.][A-Z]',
'description': "Likely missing space after end of sentence"},
{'pattern': '[rR]ealm',
'description': "Realms are referred to as Organizations in user-facing docs."},
]
txt_rules = whitespace_rules
def check_custom_checks_py():
# type: () -> bool
failed = False
for fn in by_lang['py']:
if 'custom_check.py' in fn:
continue
if custom_check_file(fn, 'py', python_rules, max_length=140):
failed = True
return failed
def check_custom_checks_nonpy():
# type: () -> bool
failed = False
for fn in by_lang['sh']:
if custom_check_file(fn, 'sh', bash_rules):
failed = True
for fn in by_lang['json']:
if custom_check_file(fn, 'json', json_rules):
failed = True
markdown_docs_length_exclude = {
"zulip_bots/zulip_bots/bots/converter/doc.md",
"tools/server_lib/README.md",
}
for fn in by_lang['md']:
max_length = None
if fn not in markdown_docs_length_exclude:
max_length = 120
rules = markdown_rules
if fn.startswith("templates/zerver/help"):
rules = help_markdown_rules
if custom_check_file(fn, 'md', rules, max_length=max_length):
failed = True
for fn in by_lang['txt'] + by_lang['text']:
if custom_check_file(fn, 'txt', txt_rules):
failed = True
for fn in by_lang['yaml']:
if custom_check_file(fn, 'yaml', txt_rules):
failed = True
return failed
return (check_custom_checks_py, check_custom_checks_nonpy)
| 44.685185 | 113 | 0.5477 |
def56c559708e37e9d67fc12f0b9bb73e731bfeb
| 2,414 |
py
|
Python
|
src/sage/combinat/algebraic_combinatorics.py
|
saraedum/sage-renamed
|
d2da67b14da2ad766a5906425d60d43a3b3e1270
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/algebraic_combinatorics.py
|
saraedum/sage-renamed
|
d2da67b14da2ad766a5906425d60d43a3b3e1270
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/algebraic_combinatorics.py
|
saraedum/sage-renamed
|
d2da67b14da2ad766a5906425d60d43a3b3e1270
|
[
"BSL-1.0"
] | null | null | null |
r"""
Algebraic combinatorics
=======================
Quickref
--------
.. TODO:: write it!
Thematic tutorials
------------------
.. TODO:: get Sphinx to create those cross links properly
- `Algebraic Combinatorics in Sage <../../../../thematic_tutorials/algebraic_combinatorics.html>`_
- `Lie Methods and Related Combinatorics in Sage <../../../../thematic_tutorials/lie.html>`_
- `Linear Programming (Mixed Integer) <../../../../thematic_tutorials/linear_programming.html>`_
Enumerated sets of combinatorial objects
----------------------------------------
- :ref:`sage.combinat.catalog_partitions`
- :class:`~sage.combinat.gelfand_tsetlin_patterns.GelfandTsetlinPattern`, :class:`~sage.combinat.gelfand_tsetlin_patterns.GelfandTsetlinPatterns`
- :class:`~sage.combinat.knutson_tao_puzzles.KnutsonTaoPuzzleSolver`
Combinatorial Hopf Algebras
---------------------------
- :ref:`sage.combinat.sf`
- :ref:`sage.combinat.ncsf_qsym`
- :ref:`sage.combinat.schubert_polynomial`
- :ref:`sage.combinat.ncsym`
- :ref:`sage.combinat.grossman_larson_algebras`
- :ref:`sage.combinat.fqsym`
- :ref:`sage.combinat.chas.wqsym`
Groups and Algebras
-------------------
- :ref:`Catalog of algebras <sage.algebras.catalog>`
- :ref:`Groups <sage.groups.groups_catalog>`
- :class:`SymmetricGroup`, :class:`CoxeterGroup`, :class:`WeylGroup`
- :class:`~sage.combinat.diagram_algebras.PartitionAlgebra`
- :class:`~sage.algebras.iwahori_hecke_algebra.IwahoriHeckeAlgebra`
- :class:`~sage.combinat.symmetric_group_algebra.SymmetricGroupAlgebra`
- :class:`~sage.algebras.nil_coxeter_algebra.NilCoxeterAlgebra`
- :class:`~sage.algebras.affine_nil_temperley_lieb.AffineNilTemperleyLiebTypeA`
- :ref:`sage.combinat.descent_algebra`
- :ref:`sage.combinat.diagram_algebras`
Combinatorial Representation Theory
-----------------------------------
- :ref:`sage.combinat.root_system`
- :ref:`sage.combinat.crystals`
- :ref:`sage.combinat.rigged_configurations`
- :ref:`sage.combinat.cluster_algebra_quiver`
- :class:`~sage.combinat.kazhdan_lusztig.KazhdanLusztigPolynomial`
- :class:`~sage.combinat.symmetric_group_representations.SymmetricGroupRepresentation`
- :ref:`sage.combinat.yang_baxter_graph`
- :ref:`sage.combinat.hall_polynomial`
Operads and their algebras
--------------------------
- :ref:`sage.combinat.free_dendriform_algebra`
- :ref:`sage.combinat.free_prelie_algebra`
- :ref:`sage.algebras.free_zinbiel_algebra`
"""
| 34.485714 | 145 | 0.718724 |
137d123ef820285f36696eae718fe84dde3fb541
| 179,869 |
py
|
Python
|
CPAC/pipeline/cpac_pipeline.py
|
tbweng/C-PAC
|
12a1807865273891aa3a566429ac9fe76c12532c
|
[
"BSD-3-Clause"
] | null | null | null |
CPAC/pipeline/cpac_pipeline.py
|
tbweng/C-PAC
|
12a1807865273891aa3a566429ac9fe76c12532c
|
[
"BSD-3-Clause"
] | null | null | null |
CPAC/pipeline/cpac_pipeline.py
|
tbweng/C-PAC
|
12a1807865273891aa3a566429ac9fe76c12532c
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import time
import six
import re
import csv
import shutil
import pickle
import copy
import json
import pandas as pd
import pkg_resources as p
import networkx as nx
import logging as cb_logging
from time import strftime
import nipype
import nipype.pipeline.engine as pe
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
import nipype.interfaces.afni as afni
from nipype.interfaces.afni import preprocess
import nipype.interfaces.ants as ants
import nipype.interfaces.c3 as c3
from nipype.interfaces.utility import Merge
from nipype.pipeline.engine.utils import format_dot
from nipype import config
from nipype import logging
from indi_aws import aws_utils, fetch_creds
import CPAC
from CPAC.network_centrality.pipeline import (
create_network_centrality_workflow
)
from CPAC.anat_preproc.anat_preproc import create_anat_preproc
from CPAC.EPI_DistCorr.EPI_DistCorr import create_EPI_DistCorr
from CPAC.func_preproc.func_preproc import (
create_func_preproc,
create_wf_edit_func
)
from CPAC.seg_preproc.seg_preproc import create_seg_preproc
from CPAC.warp.pipeline import (
output_to_standard,
z_score_standardize,
fisher_z_score_standardize,
output_smooth,
calc_avg
)
from CPAC.warp.pipeline import ants_apply_warps_asl_mni
from CPAC.registration import (
create_bbregister_asl_to_anat,
create_register_asl_to_anat
)
from CPAC.registration import (
create_fsl_flirt_linear_reg,
create_fsl_fnirt_nonlinear_reg,
create_register_func_to_anat,
create_bbregister_func_to_anat,
create_wf_calculate_ants_warp,
create_wf_apply_ants_warp,
create_wf_c3d_fsl_to_itk,
create_wf_collect_transforms
)
from CPAC.nuisance import create_nuisance_workflow, bandpass_voxels, NuisanceRegressor
from CPAC.aroma import create_aroma
from CPAC.median_angle import create_median_angle_correction
from CPAC.generate_motion_statistics import motion_power_statistics
from CPAC.scrubbing import create_scrubbing_preproc
from CPAC.timeseries import (
create_surface_registration,
get_roi_timeseries,
get_voxel_timeseries,
get_vertices_timeseries,
get_spatial_map_timeseries
)
from CPAC.network_centrality import (
create_resting_state_graphs,
get_cent_zscore
)
from CPAC.warp.pipeline import (
ants_apply_warps_func_mni,
ants_apply_inverse_warps_template_to_func
)
from CPAC.vmhc.vmhc import create_vmhc
from CPAC.reho.reho import create_reho
from CPAC.alff.alff import create_alff
from CPAC.sca.sca import create_sca, create_temporal_reg
from CPAC.connectome.pipeline import create_connectome
from CPAC.utils.symlinks import create_symlinks
from CPAC.utils.datasource import (
create_func_datasource,
create_anat_datasource,
create_roi_mask_dataflow,
create_spatial_map_dataflow,
create_check_for_s3_node
)
from CPAC.utils import Configuration, Strategy, Outputs, function, find_files
from CPAC.qc.pipeline import create_qc_workflow
from CPAC.qc.utils import generate_qc_pages
from CPAC.utils.utils import (
extract_one_d,
get_scan_params,
get_tr,
extract_txt,
create_log,
extract_output_mean,
create_output_mean_csv,
get_zscore,
get_fisher_zscore,
add_afni_prefix
)
logger = logging.getLogger('nipype.workflow')
# TODO ASH move to somewhere else
def pick_wm(seg_prob_list):
seg_prob_list.sort()
return seg_prob_list[-1]
def create_log_node(workflow, logged_wf, output, index, scan_id=None):
try:
log_dir = workflow.config['logging']['log_directory']
if logged_wf:
log_wf = create_log(wf_name='log_%s' % logged_wf.name)
log_wf.inputs.inputspec.workflow = logged_wf.name
log_wf.inputs.inputspec.index = index
log_wf.inputs.inputspec.log_dir = log_dir
workflow.connect(logged_wf, output, log_wf, 'inputspec.inputs')
else:
log_wf = create_log(wf_name='log_done_%s' % scan_id,
scan_id=scan_id)
log_wf.base_dir = log_dir
log_wf.inputs.inputspec.workflow = 'DONE'
log_wf.inputs.inputspec.index = index
log_wf.inputs.inputspec.log_dir = log_dir
log_wf.inputs.inputspec.inputs = log_dir
return log_wf
except Exception as e:
print(e)
def prep_workflow(sub_dict, c, run, pipeline_timing_info=None,
p_name=None, plugin='MultiProc', plugin_args=None):
'''
Function to prepare and, optionally, run the C-PAC workflow
Parameters
----------
sub_dict : dictionary
subject dictionary with anatomical and functional image paths
c : Configuration object
CPAC pipeline configuration dictionary object
run : boolean
flag to indicate whether to run the prepared workflow
pipeline_timing_info : list (optional); default=None
list of pipeline info for reporting timing information
p_name : string (optional); default=None
name of pipeline
plugin : string (optional); defaule='MultiProc'
nipype plugin to utilize when the workflow is ran
plugin_args : dictionary (optional); default=None
plugin-specific arguments for the workflow plugin
Returns
-------
workflow : nipype workflow
the prepared nipype workflow object containing the parameters
specified in the config
'''
# Import packages
from CPAC.utils.utils import check_config_resources, check_system_deps
import pickle
ndmg_out = False
# Assure that changes on config will not affect other parts
c = copy.copy(c)
subject_id = sub_dict['subject_id']
if sub_dict['unique_id']:
subject_id += "_" + sub_dict['unique_id']
log_dir = os.path.join(c.logDirectory, 'pipeline_%s' % c.pipelineName, subject_id)
if not os.path.exists(log_dir):
os.makedirs(os.path.join(log_dir))
# TODO ASH Enforce c.run_logging to be boolean
# TODO ASH Schema validation
config.update_config({
'logging': {
'log_directory': log_dir,
'log_to_file': bool(getattr(c, 'run_logging', True))
}
})
logging.update_logging(config)
# Start timing here
pipeline_start_time = time.time()
# at end of workflow, take timestamp again, take time elapsed and check
# tempfile add time to time data structure inside tempfile, and increment
# number of subjects
# Check pipeline config resources
sub_mem_gb, num_cores_per_sub, num_ants_cores = \
check_config_resources(c)
if plugin_args:
plugin_args['memory_gb'] = sub_mem_gb
plugin_args['n_procs'] = num_cores_per_sub
else:
plugin_args = {'memory_gb': sub_mem_gb, 'n_procs': num_cores_per_sub}
# perhaps in future allow user to set threads maximum
# this is for centrality mostly
# import mkl
numThreads = '1'
os.environ['OMP_NUM_THREADS'] = '1' # str(num_cores_per_sub)
os.environ['MKL_NUM_THREADS'] = '1' # str(num_cores_per_sub)
os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS'] = str(num_ants_cores)
# calculate maximum potential use of cores according to current pipeline
# configuration
max_core_usage = int(c.maxCoresPerParticipant) * \
int(c.numParticipantsAtOnce)
information = """
C-PAC version: {cpac_version}
Setting maximum number of cores per participant to {cores}
Setting number of participants at once to {participants}
Setting OMP_NUM_THREADS to {threads}
Setting MKL_NUM_THREADS to {threads}
Setting ANTS/ITK thread usage to {ants_threads}
Maximum potential number of cores that might be used during this run: {max_cores}
"""
logger.info(information.format(
cpac_version=CPAC.__version__,
cores=c.maxCoresPerParticipant,
participants=c.numParticipantsAtOnce,
threads=numThreads,
ants_threads=c.num_ants_threads,
max_cores=max_core_usage
))
# TODO ASH temporary code, remove
# TODO ASH maybe scheme validation/normalization
already_skullstripped = c.already_skullstripped[0]
if already_skullstripped == 2:
already_skullstripped = 0
elif already_skullstripped == 3:
already_skullstripped = 1
subject_info = {}
subject_info['subject_id'] = subject_id
subject_info['start_time'] = pipeline_start_time
# Check system dependencies
check_system_deps(check_ants='ANTS' in c.regOption,
check_ica_aroma='1' in str(c.runICA[0]))
# absolute paths of the dirs
c.workingDirectory = os.path.abspath(c.workingDirectory)
if 's3://' not in c.outputDirectory:
c.outputDirectory = os.path.abspath(c.outputDirectory)
# Workflow setup
workflow_name = 'resting_preproc_' + str(subject_id)
workflow = pe.Workflow(name=workflow_name)
workflow.base_dir = c.workingDirectory
workflow.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': os.path.abspath(c.crashLogDirectory)
}
# Extract credentials path if it exists
try:
creds_path = sub_dict['creds_path']
if creds_path and 'none' not in creds_path.lower():
if os.path.exists(creds_path):
input_creds_path = os.path.abspath(creds_path)
else:
err_msg = 'Credentials path: "%s" for subject "%s" was not ' \
'found. Check this path and try again.' % (
creds_path, subject_id)
raise Exception(err_msg)
else:
input_creds_path = None
except KeyError:
input_creds_path = None
# TODO ASH normalize file paths with schema validator
template_anat_keys = [
("anat", "template_brain_only_for_anat"),
("anat", "template_skull_for_anat"),
("anat", "ref_mask"),
("anat", "template_symmetric_brain_only"),
("anat", "template_symmetric_skull"),
("anat", "dilated_symmetric_brain_mask"),
("anat", "templateSpecificationFile"),
("anat", "lateral_ventricles_mask"),
("anat", "PRIORS_CSF"),
("anat", "PRIORS_GRAY"),
("anat", "PRIORS_WHITE"),
("other", "configFileTwomm"),
]
for key_type, key in template_anat_keys:
node = create_check_for_s3_node(
key,
getattr(c, key), key_type,
input_creds_path, c.workingDirectory
)
setattr(c, key, node)
if c.reGenerateOutputs is True:
working_dir = os.path.join(c.workingDirectory, workflow_name)
erasable = list(find_files(working_dir, '*sink*')) + \
list(find_files(working_dir, '*link*')) + \
list(find_files(working_dir, '*log*'))
for f in erasable:
if os.path.isfile(f):
os.remove(f)
else:
shutil.rmtree(f)
"""""""""""""""""""""""""""""""""""""""""""""""""""
PREPROCESSING
"""""""""""""""""""""""""""""""""""""""""""""""""""
strat_initial = Strategy()
strat_list = []
num_strat = 0
workflow_bit_id = {}
workflow_counter = 0
anat_flow = create_anat_datasource('anat_gather_%d' % num_strat)
anat_flow.inputs.inputnode.subject = subject_id
anat_flow.inputs.inputnode.anat = sub_dict['anat']
anat_flow.inputs.inputnode.creds_path = input_creds_path
anat_flow.inputs.inputnode.dl_dir = c.workingDirectory
strat_initial.update_resource_pool({
'anatomical': (anat_flow, 'outputspec.anat')
})
if 'brain_mask' in sub_dict.keys():
if sub_dict['brain_mask'] and sub_dict['brain_mask'].lower() != 'none':
brain_flow = create_anat_datasource('brain_gather_%d' % num_strat)
brain_flow.inputs.inputnode.subject = subject_id
brain_flow.inputs.inputnode.anat = sub_dict['brain_mask']
brain_flow.inputs.inputnode.creds_path = input_creds_path
brain_flow.inputs.inputnode.dl_dir = c.workingDirectory
strat_initial.update_resource_pool({
'anatomical_brain_mask': (brain_flow, 'outputspec.anat')
})
strat_list.append(strat_initial)
workflow_bit_id['anat_preproc'] = workflow_counter
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
if 'anatomical_brain_mask' in strat:
anat_preproc = create_anat_preproc(method='mask',
already_skullstripped=already_skullstripped,
wf_name='anat_preproc_mask_%d' % num_strat)
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file, anat_preproc,
'inputspec.anat')
node, out_file = strat['anatomical_brain_mask']
workflow.connect(node, out_file,
anat_preproc, 'inputspec.brain_mask')
strat.append_name(anat_preproc.name)
strat.set_leaf_properties(anat_preproc, 'outputspec.brain')
strat.update_resource_pool({
'anatomical_brain': (anat_preproc, 'outputspec.brain'),
'anatomical_reorient': (anat_preproc, 'outputspec.reorient'),
})
create_log_node(workflow, anat_preproc,
'outputspec.brain', num_strat)
strat_list += new_strat_list
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
if 'anatomical_brain_mask' in strat:
continue
if "AFNI" not in c.skullstrip_option and "BET" not in c.skullstrip_option:
err = '\n\n[!] C-PAC says: Your skull-stripping method options ' \
'setting does not include either \'AFNI\' or \'BET\'.\n\n' \
'Options you provided:\nskullstrip_option: {0}' \
'\n\n'.format(str(c.skullstrip_option))
raise Exception(err)
if "AFNI" in c.skullstrip_option:
anat_preproc = create_anat_preproc(method='afni',
already_skullstripped=already_skullstripped,
wf_name='anat_preproc_afni_%d' % num_strat)
anat_preproc.inputs.AFNI_options.set(
shrink_factor=c.skullstrip_shrink_factor,
var_shrink_fac=c.skullstrip_var_shrink_fac,
shrink_fac_bot_lim=c.skullstrip_shrink_factor_bot_lim,
avoid_vent=c.skullstrip_avoid_vent,
niter=c.skullstrip_n_iterations,
pushout=c.skullstrip_pushout,
touchup=c.skullstrip_touchup,
fill_hole=c.skullstrip_fill_hole,
avoid_eyes=c.skullstrip_avoid_eyes,
use_edge=c.skullstrip_use_edge,
exp_frac=c.skullstrip_exp_frac,
smooth_final=c.skullstrip_smooth_final,
push_to_edge=c.skullstrip_push_to_edge,
use_skull=c.skullstrip_use_skull,
perc_int=c.skullstrip_perc_int,
max_inter_iter=c.skullstrip_max_inter_iter,
blur_fwhm=c.skullstrip_blur_fwhm,
fac=c.skullstrip_fac,
)
node, out_file = strat['anatomical']
workflow.connect(node, out_file,
anat_preproc, 'inputspec.anat')
if "BET" in c.skullstrip_option:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(anat_preproc.name)
strat.set_leaf_properties(anat_preproc, 'outputspec.brain')
strat.update_resource_pool({
'anatomical_brain': (anat_preproc, 'outputspec.brain'),
'anatomical_reorient': (anat_preproc, 'outputspec.reorient'),
})
create_log_node(workflow, anat_preproc,
'outputspec.brain', num_strat)
strat_list += new_strat_list
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
if 'anatomical_brain_mask' in strat:
continue
if 'anatomical_brain' in strat:
continue
if "BET" in c.skullstrip_option:
anat_preproc = create_anat_preproc(method='fsl',
already_skullstripped=already_skullstripped,
wf_name='anat_preproc_bet_%d' % num_strat)
anat_preproc.inputs.BET_options.set(
frac=c.bet_frac,
mask_boolean=c.bet_mask_boolean,
mesh_boolean=c.bet_mesh_boolean,
outline=c.bet_outline,
padding=c.bet_padding,
radius=c.bet_radius,
reduce_bias=c.bet_reduce_bias,
remove_eyes=c.bet_remove_eyes,
robust=c.bet_robust,
skull=c.bet_skull,
surfaces=c.bet_surfaces,
threshold=c.bet_threshold,
vertical_gradient=c.bet_vertical_gradient,
)
node, out_file = strat['anatomical']
workflow.connect(node, out_file, anat_preproc, 'inputspec.anat')
strat.append_name(anat_preproc.name)
strat.set_leaf_properties(anat_preproc, 'outputspec.brain')
strat.update_resource_pool({
'anatomical_brain': (anat_preproc, 'outputspec.brain'),
'anatomical_reorient': (anat_preproc, 'outputspec.reorient'),
})
create_log_node(workflow, anat_preproc,
'outputspec.brain', num_strat)
strat_list += new_strat_list
# Set Up FWHM iterable
# T1 -> Template, Non-linear registration (FNIRT or ANTS)
new_strat_list = []
workflow_counter += 1
# either run FSL anatomical-to-MNI registration, or...
workflow_bit_id['anat_mni_register'] = workflow_counter
if 'FSL' in c.regOption:
for num_strat, strat in enumerate(strat_list):
# this is to prevent the user from running FNIRT if they are
# providing already-skullstripped inputs. this is because
# FNIRT requires an input with the skull still on
if already_skullstripped == 1:
err_msg = '\n\n[!] CPAC says: FNIRT (for anatomical ' \
'registration) will not work properly if you ' \
'are providing inputs that have already been ' \
'skull-stripped.\n\nEither switch to using ' \
'ANTS for registration or provide input ' \
'images that have not been already ' \
'skull-stripped.\n\n'
logger.info(err_msg)
raise Exception
flirt_reg_anat_mni = create_fsl_flirt_linear_reg(
'anat_mni_flirt_register_%d' % num_strat
)
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
flirt_reg_anat_mni, 'inputspec.input_brain')
# pass the reference files
workflow.connect(
c.template_brain_only_for_anat, 'local_path',
flirt_reg_anat_mni, 'inputspec.reference_brain'
)
if 'ANTS' in c.regOption:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(flirt_reg_anat_mni.name)
strat.set_leaf_properties(flirt_reg_anat_mni,
'outputspec.output_brain')
strat.update_resource_pool({
'anatomical_to_mni_linear_xfm': (flirt_reg_anat_mni, 'outputspec.linear_xfm'),
'mni_to_anatomical_linear_xfm': (flirt_reg_anat_mni, 'outputspec.invlinear_xfm'),
'anatomical_to_standard': (flirt_reg_anat_mni, 'outputspec.output_brain')
})
create_log_node(workflow, flirt_reg_anat_mni, 'outputspec.output_brain',
num_strat)
strat_list += new_strat_list
new_strat_list = []
try:
fsl_linear_reg_only = c.fsl_linear_reg_only
except AttributeError:
fsl_linear_reg_only = [0]
if 'FSL' in c.regOption and 0 in fsl_linear_reg_only:
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
if 'anat_mni_flirt_register' in nodes:
fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg(
'anat_mni_fnirt_register_%d' % num_strat
)
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
fnirt_reg_anat_mni, 'inputspec.input_brain')
# pass the reference files
workflow.connect(
c.template_brain_only_for_anat, 'local_path',
fnirt_reg_anat_mni, 'inputspec.reference_brain'
)
node, out_file = strat['anatomical_reorient']
workflow.connect(node, out_file,
fnirt_reg_anat_mni, 'inputspec.input_skull')
node, out_file = strat['anatomical_to_mni_linear_xfm']
workflow.connect(node, out_file,
fnirt_reg_anat_mni, 'inputspec.linear_aff')
workflow.connect(
c.template_skull_for_anat, 'local_path',
fnirt_reg_anat_mni, 'inputspec.reference_skull'
)
workflow.connect(
c.ref_mask, 'local_path',
fnirt_reg_anat_mni, 'inputspec.ref_mask'
)
# assign the FSL FNIRT config file specified in pipeline
# config.yml
fnirt_reg_anat_mni.inputs.inputspec.fnirt_config = c.fnirtConfig
if 1 in fsl_linear_reg_only:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(fnirt_reg_anat_mni.name)
strat.set_leaf_properties(fnirt_reg_anat_mni,
'outputspec.output_brain')
strat.update_resource_pool({
'anatomical_to_mni_nonlinear_xfm': (fnirt_reg_anat_mni, 'outputspec.nonlinear_xfm'),
'anatomical_to_standard': (fnirt_reg_anat_mni, 'outputspec.output_brain')
}, override=True)
create_log_node(workflow, fnirt_reg_anat_mni, 'outputspec.output_brain',
num_strat)
strat_list += new_strat_list
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
# or run ANTS anatomical-to-MNI registration instead
if 'ANTS' in c.regOption and \
'anat_mni_flirt_register' not in nodes and \
'anat_mni_fnirt_register' not in nodes:
ants_reg_anat_mni = \
create_wf_calculate_ants_warp(
'anat_mni_ants_register_%d' % num_strat,
num_threads=num_ants_cores
)
# calculating the transform with the skullstripped is
# reported to be better, but it requires very high
# quality skullstripping. If skullstripping is imprecise
# registration with skull is preferred
# TODO ASH assess with schema validator
if 1 in c.regWithSkull:
if already_skullstripped == 1:
err_msg = '\n\n[!] CPAC says: You selected ' \
'to run anatomical registration with ' \
'the skull, but you also selected to ' \
'use already-skullstripped images as ' \
'your inputs. This can be changed ' \
'in your pipeline configuration ' \
'editor.\n\n'
logger.info(err_msg)
raise Exception
# get the skull-stripped anatomical from resource pool
node, out_file = strat['anatomical_brain']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_anat_mni,
'inputspec.anatomical_brain')
# pass the reference file
workflow.connect(
c.template_brain_only_for_anat, 'local_path',
ants_reg_anat_mni, 'inputspec.reference_brain'
)
# get the reorient skull-on anatomical from resource pool
node, out_file = strat['anatomical_reorient']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_anat_mni,
'inputspec.anatomical_skull')
# pass the reference file
workflow.connect(
c.template_skull_for_anat, 'local_path',
ants_reg_anat_mni, 'inputspec.reference_skull'
)
else:
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file, ants_reg_anat_mni,
'inputspec.anatomical_brain')
# pass the reference file
workflow.connect(
c.template_brain_only_for_anat, 'local_path',
ants_reg_anat_mni, 'inputspec.reference_brain'
)
ants_reg_anat_mni.inputs.inputspec.set(
dimension=3,
use_histogram_matching=True,
winsorize_lower_quantile=0.01,
winsorize_upper_quantile=0.99,
metric=['MI', 'MI', 'CC'],
metric_weight=[1, 1, 1],
radius_or_number_of_bins=[32, 32, 4],
sampling_strategy=['Regular', 'Regular', None],
sampling_percentage=[0.25, 0.25, None],
number_of_iterations=[
[1000, 500, 250, 100],
[1000, 500, 250, 100],
[100, 100, 70, 20]
],
convergence_threshold=[1e-8, 1e-8, 1e-9],
convergence_window_size=[10, 10, 15],
transforms=['Rigid', 'Affine', 'SyN'],
transform_parameters=[[0.1], [0.1], [0.1, 3, 0]],
shrink_factors=[
[8, 4, 2, 1],
[8, 4, 2, 1],
[6, 4, 2, 1]
],
smoothing_sigmas=[
[3, 2, 1, 0],
[3, 2, 1, 0],
[3, 2, 1, 0]
]
)
strat.append_name(ants_reg_anat_mni.name)
strat.set_leaf_properties(ants_reg_anat_mni,
'outputspec.normalized_output_brain')
strat.update_resource_pool({
'ants_initial_xfm': (ants_reg_anat_mni, 'outputspec.ants_initial_xfm'),
'ants_rigid_xfm': (ants_reg_anat_mni, 'outputspec.ants_rigid_xfm'),
'ants_affine_xfm': (ants_reg_anat_mni, 'outputspec.ants_affine_xfm'),
'anatomical_to_mni_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.warp_field'),
'mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_mni, 'outputspec.inverse_warp_field'),
'anat_to_mni_ants_composite_xfm': (ants_reg_anat_mni, 'outputspec.composite_transform'),
'anatomical_to_standard': (ants_reg_anat_mni, 'outputspec.normalized_output_brain')
})
create_log_node(workflow, ants_reg_anat_mni,
'outputspec.normalized_output_brain', num_strat)
strat_list += new_strat_list
# [SYMMETRIC] T1 -> Symmetric Template, Non-linear registration (FNIRT/ANTS)
new_strat_list = []
workflow_counter += 1
if 1 in c.runVMHC and 1 in getattr(c, 'runFunctional', [1]):
workflow_bit_id['anat_mni_symmetric_register'] = workflow_counter
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
if 'FSL' in c.regOption and \
'anat_mni_ants_register' not in nodes:
# this is to prevent the user from running FNIRT if they are
# providing already-skullstripped inputs. this is because
# FNIRT requires an input with the skull still on
# TODO ASH normalize w schema validation to bool
if already_skullstripped == 1:
err_msg = '\n\n[!] CPAC says: FNIRT (for anatomical ' \
'registration) will not work properly if you ' \
'are providing inputs that have already been ' \
'skull-stripped.\n\nEither switch to using ' \
'ANTS for registration or provide input ' \
'images that have not been already ' \
'skull-stripped.\n\n'
logger.info(err_msg)
raise Exception
flirt_reg_anat_symm_mni = create_fsl_flirt_linear_reg(
'anat_symmetric_mni_flirt_register_%d' % num_strat
)
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
flirt_reg_anat_symm_mni,
'inputspec.input_brain')
# pass the reference files
workflow.connect(
c.template_symmetric_brain_only, 'local_path',
flirt_reg_anat_symm_mni, 'inputspec.reference_brain'
)
if 'ANTS' in c.regOption:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(flirt_reg_anat_symm_mni.name)
strat.set_leaf_properties(flirt_reg_anat_symm_mni,
'outputspec.output_brain')
strat.update_resource_pool({
'anatomical_to_symmetric_mni_linear_xfm': (
flirt_reg_anat_symm_mni, 'outputspec.linear_xfm'),
'symmetric_mni_to_anatomical_linear_xfm': (
flirt_reg_anat_symm_mni, 'outputspec.invlinear_xfm'),
'symmetric_anatomical_to_standard': (
flirt_reg_anat_symm_mni, 'outputspec.output_brain')
})
create_log_node(workflow, flirt_reg_anat_symm_mni,
'outputspec.output_brain',
num_strat)
strat_list += new_strat_list
new_strat_list = []
try:
fsl_linear_reg_only = c.fsl_linear_reg_only
except AttributeError:
fsl_linear_reg_only = [0]
if 'FSL' in c.regOption and 0 in fsl_linear_reg_only:
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
if 'anat_mni_flirt_register' in nodes:
fnirt_reg_anat_symm_mni = create_fsl_fnirt_nonlinear_reg(
'anat_symmetric_mni_fnirt_register_%d' % num_strat
)
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
fnirt_reg_anat_symm_mni,
'inputspec.input_brain')
# pass the reference files
workflow.connect(
c.template_brain_only_for_anat, 'local_path',
fnirt_reg_anat_symm_mni, 'inputspec.reference_brain'
)
node, out_file = strat['anatomical_reorient']
workflow.connect(node, out_file,
fnirt_reg_anat_symm_mni,
'inputspec.input_skull')
node, out_file = strat['anatomical_to_mni_linear_xfm']
workflow.connect(node, out_file,
fnirt_reg_anat_symm_mni,
'inputspec.linear_aff')
workflow.connect(
c.template_symmetric_skull, 'local_path',
fnirt_reg_anat_symm_mni, 'inputspec.reference_skull'
)
workflow.connect(
c.dilated_symmetric_brain_mask, 'local_path',
fnirt_reg_anat_symm_mni, 'inputspec.ref_mask'
)
strat.append_name(fnirt_reg_anat_symm_mni.name)
strat.set_leaf_properties(fnirt_reg_anat_symm_mni,
'outputspec.output_brain')
strat.update_resource_pool({
'anatomical_to_symmetric_mni_nonlinear_xfm': (
fnirt_reg_anat_symm_mni, 'outputspec.nonlinear_xfm'),
'symmetric_anatomical_to_standard': (
fnirt_reg_anat_symm_mni, 'outputspec.output_brain')
}, override=True)
create_log_node(workflow, fnirt_reg_anat_symm_mni,
'outputspec.output_brain',
num_strat)
strat_list += new_strat_list
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
# or run ANTS anatomical-to-MNI registration instead
if 'ANTS' in c.regOption and \
'anat_mni_flirt_register' not in nodes and \
'anat_mni_fnirt_register' not in nodes and \
'anat_symmetric_mni_flirt_register' not in nodes and \
'anat_symmetric_mni_fnirt_register' not in nodes:
ants_reg_anat_symm_mni = \
create_wf_calculate_ants_warp(
'anat_symmetric_mni_ants_register_%d' % num_strat,
num_threads=num_ants_cores
)
# calculating the transform with the skullstripped is
# reported to be better, but it requires very high
# quality skullstripping. If skullstripping is imprecise
# registration with skull is preferred
if 1 in c.regWithSkull:
if already_skullstripped == 1:
err_msg = '\n\n[!] CPAC says: You selected ' \
'to run anatomical registration with ' \
'the skull, but you also selected to ' \
'use already-skullstripped images as ' \
'your inputs. This can be changed ' \
'in your pipeline configuration ' \
'editor.\n\n'
logger.info(err_msg)
raise Exception
# get the skullstripped anatomical from resource pool
node, out_file = strat['anatomical_brain']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_anat_symm_mni,
'inputspec.anatomical_brain')
# pass the reference file
workflow.connect(c.template_symmetric_brain_only, 'local_path',
ants_reg_anat_symm_mni, 'inputspec.reference_brain')
# get the reorient skull-on anatomical from resource
# pool
node, out_file = strat['anatomical_reorient']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
ants_reg_anat_symm_mni,
'inputspec.anatomical_skull')
# pass the reference file
workflow.connect(c.template_symmetric_skull, 'local_path',
ants_reg_anat_symm_mni, 'inputspec.reference_skull')
else:
# get the skullstripped anatomical from resource pool
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
ants_reg_anat_symm_mni,
'inputspec.anatomical_brain')
# pass the reference file
workflow.connect(c.template_symmetric_brain_only, 'local_path',
ants_reg_anat_symm_mni, 'inputspec.reference_brain')
ants_reg_anat_symm_mni.inputs.inputspec.set(
dimension=3,
use_histogram_matching=True,
winsorize_lower_quantile=0.01,
winsorize_upper_quantile=0.99,
metric=['MI', 'MI', 'CC'],
metric_weight=[1, 1, 1],
radius_or_number_of_bins=[32, 32, 4],
sampling_strategy=['Regular', 'Regular', None],
sampling_percentage=[0.25, 0.25, None],
number_of_iterations=[[1000, 500, 250, 100],
[1000, 500, 250, 100],
[100, 100, 70, 20]],
convergence_threshold=[1e-8, 1e-8, 1e-9],
convergence_window_size=[10, 10, 15],
transforms=['Rigid', 'Affine', 'SyN'],
transform_parameters=[[0.1], [0.1], [0.1, 3, 0]],
shrink_factors=[[8, 4, 2, 1],
[8, 4, 2, 1],
[6, 4, 2, 1]],
smoothing_sigmas=[[3, 2, 1, 0],
[3, 2, 1, 0],
[3, 2, 1, 0]]
)
strat.append_name(ants_reg_anat_symm_mni.name)
strat.set_leaf_properties(ants_reg_anat_symm_mni,
'outputspec.normalized_output_brain')
strat.update_resource_pool({
'ants_symmetric_initial_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_initial_xfm'),
'ants_symmetric_rigid_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_rigid_xfm'),
'ants_symmetric_affine_xfm': (ants_reg_anat_symm_mni, 'outputspec.ants_affine_xfm'),
'anatomical_to_symmetric_mni_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.warp_field'),
'symmetric_mni_to_anatomical_nonlinear_xfm': (ants_reg_anat_symm_mni, 'outputspec.inverse_warp_field'),
'anat_to_symmetric_mni_ants_composite_xfm': (ants_reg_anat_symm_mni, 'outputspec.composite_transform'),
'symmetric_anatomical_to_standard': (ants_reg_anat_symm_mni, 'outputspec.normalized_output_brain')
})
create_log_node(workflow, ants_reg_anat_symm_mni,
'outputspec.normalized_output_brain',
num_strat)
strat_list += new_strat_list
# Inserting Segmentation Preprocessing Workflow
new_strat_list = []
workflow_counter += 1
if 1 in c.runSegmentationPreprocessing:
workflow_bit_id['seg_preproc'] = workflow_counter
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
seg_preproc = None
# TODO ASH based on config, instead of nodes?
if 'anat_mni_fnirt_register' in nodes or 'anat_mni_flirt_register' in nodes:
seg_preproc = create_seg_preproc(c, use_ants=False,
wf_name='seg_preproc_%d' % num_strat)
elif 'anat_mni_ants_register' in nodes:
seg_preproc = create_seg_preproc(c, use_ants=True,
wf_name='seg_preproc_%d' % num_strat)
# TODO ASH review
if seg_preproc is None:
continue
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
seg_preproc, 'inputspec.brain')
if 'anat_mni_fnirt_register' in nodes or 'anat_mni_flirt_register' in nodes:
node, out_file = strat['mni_to_anatomical_linear_xfm']
workflow.connect(node, out_file,
seg_preproc,
'inputspec.standard2highres_mat')
elif 'anat_mni_ants_register' in nodes:
node, out_file = strat['ants_initial_xfm']
workflow.connect(node, out_file,
seg_preproc,
'inputspec.standard2highres_init')
node, out_file = strat['ants_rigid_xfm']
workflow.connect(node, out_file,
seg_preproc,
'inputspec.standard2highres_rig')
node, out_file = strat['ants_affine_xfm']
workflow.connect(node, out_file,
seg_preproc,
'inputspec.standard2highres_mat')
workflow.connect(c.PRIORS_CSF, 'local_path',
seg_preproc, 'inputspec.PRIOR_CSF')
workflow.connect(c.PRIORS_GRAY, 'local_path',
seg_preproc, 'inputspec.PRIOR_GRAY')
workflow.connect(c.PRIORS_WHITE, 'local_path',
seg_preproc, 'inputspec.PRIOR_WHITE')
# TODO ASH review with forking function
if 0 in c.runSegmentationPreprocessing:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(seg_preproc.name)
strat.update_resource_pool({
'anatomical_gm_mask': (seg_preproc, 'outputspec.gm_mask'),
'anatomical_csf_mask': (seg_preproc, 'outputspec.csf_mask'),
'anatomical_wm_mask': (seg_preproc, 'outputspec.wm_mask'),
'seg_probability_maps': (seg_preproc, 'outputspec.probability_maps'),
'seg_mixeltype': (seg_preproc, 'outputspec.mixeltype'),
'seg_partial_volume_map': (seg_preproc, 'outputspec.partial_volume_map'),
'seg_partial_volume_files': (seg_preproc, 'outputspec.partial_volume_files'),
})
create_log_node(workflow,
seg_preproc, 'outputspec.partial_volume_map',
num_strat)
# print(str(strat.get_resource_pool()))
strat_list += new_strat_list
# Inserting Functional Data workflow
if 'func' in sub_dict and \
1 in getattr(c, 'runFunctional', [1]):
# pipeline needs to have explicit [0] to disable functional workflow
for num_strat, strat in enumerate(strat_list):
asl_paths_dict = {}
func_paths_dict = {}
for func_key, func_dict in sub_dict['func'].iteritems():
# select bold scans only
if 'scantype' not in func_dict or func_dict['scantype'] != 'asl':
func_paths_dict[func_key] = func_dict
print('func paths: ', func_paths_dict)
# select asl scans
if 'scantype' in func_dict and func_dict['scantype'] == 'asl':
asl_paths_dict[func_key] = func_dict
print('asl paths: ', asl_paths_dict)
# if asl file exist, create asl workflow
if any(asl_paths_dict):
# Start ASL workflow builder
from CPAC.asl_preproc.asl_preproc import create_asl_preproc
print("Begin ASL workflow builder...")
asl_preproc = create_asl_preproc(c, strat,
wf_name='asl_preproc_%d' % num_strat)
# datasource for asl
asl_wf = create_func_datasource(asl_paths_dict,
'asl_gather_%d' % num_strat)
asl_wf.inputs.inputnode.set(
subject=subject_id,
creds_path=input_creds_path,
dl_dir=c.workingDirectory
)
asl_wf.get_node('inputnode').iterables = \
("scan", asl_paths_dict.keys())
workflow.connect(asl_wf, 'outputspec.rest', asl_preproc,
'inputspec.asl_file')
print(str(strat.get_resource_pool()))
# get the reorient skull-on anatomical from resource
# pool
node, out_file = strat['anatomical_reorient']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
asl_preproc,
'inputspec.anatomical_skull')
# get the skullstripped anatomical from resource pool
node, out_file = strat['anatomical_brain']
# pass the anatomical to the workflow
workflow.connect(node, out_file,
asl_preproc,
'inputspec.anatomical_brain')
# Input segmentation pve for white matter
# segmentation
node, out_file = strat['seg_partial_volume_files']
workflow.connect(node, (out_file, pick_wm),
asl_preproc,
'inputspec.seg_wm_pve')
# 7/8/19 try: move reg to cpac_pipepline.py
# 7/10/19: use the asl2anat.mat from oxford_asl
"""
# Func -> T1 Registration (Initial Linear reg)
dist_corr = False
asl_to_anat = create_register_asl_to_anat(dist_corr,
'asl_to_anat_FLIRT'
'_%d' % num_strat)
# Input registration parameters
asl_to_anat.inputs.inputspec.interp = 'trilinear'
# Input functional image (mean functional)
workflow.connect(asl_preproc, 'outputspec.meanasl',
asl_to_anat, 'inputspec.asl')
# Input skull-stripped anatomical (anat.nii.gz)
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
asl_to_anat, 'inputspec.anat')
strat.update_resource_pool({
'mean_asl_in_anat': (asl_to_anat, 'outputspec.anat_asl_nobbreg'),
'asl_to_anat_linear_xfm': (asl_to_anat, 'outputspec.asl_to_anat_linear_xfm_nobbreg')
})
# Func -> T1 Registration (BBREG)
asl_to_anat_bbreg = create_bbregister_asl_to_anat(
dist_corr,
'asl_to_anat_bbreg_%d' % num_strat
)
# Input registration parameters
# TODO: $FSLDIR
asl_to_anat_bbreg.inputs.inputspec.bbr_schedule = \
'/usr/share/fsl/6.0/etc/flirtsch/bbr.sch'
# Input mean asl image
workflow.connect(asl_preproc, 'outputspec.meanasl',
asl_to_anat_bbreg, 'inputspec.asl')
# Input anatomical whole-head image (reoriented)
node, out_file = strat['anatomical_reorient']
workflow.connect(node, out_file,
asl_to_anat_bbreg,
'inputspec.anat_skull')
# Input initial linear reg
workflow.connect(asl_to_anat, 'outputspec.asl_to_anat_linear_xfm_nobbreg',
asl_to_anat_bbreg,
'inputspec.linear_reg_matrix')
# Input segmentation probability maps for white matter
# segmentation
node, out_file = strat['seg_probability_maps']
workflow.connect(node, (out_file, pick_wm),
asl_to_anat_bbreg,
'inputspec.anat_wm_segmentation')
strat.update_resource_pool({
'mean_asl_in_anat': (asl_to_anat_bbreg, 'outputspec.anat_asl'),
'asl_to_anat_linear_xfm': (asl_to_anat_bbreg, 'outputspec.asl_to_anat_linear_xfm')
}, override=True)
"""
# T1 -> MNI (done in main pipeline)
##################
### asl -> MNI ###
##################
warp_diffdata_wf = ants_apply_warps_asl_mni(
workflow, strat, num_strat, num_ants_cores,
asl_preproc, 'outputspec.diffdata',
asl_preproc, 'outputspec.meanasl',
c.template_brain_only_for_func,
"diffdata_to_standard",
"Linear", 3
)
create_log_node(workflow, warp_diffdata_wf,
'outputspec.output_image', num_strat)
warp_perfusion_wf = ants_apply_warps_asl_mni(
workflow, strat, num_strat, num_ants_cores,
asl_preproc, 'outputspec.perfusion_image',
asl_preproc, 'outputspec.meanasl',
c.template_brain_only_for_func,
"perfusion_image_to_standard",
"Linear", 3
)
create_log_node(workflow, warp_perfusion_wf,
'outputspec.output_image', num_strat)
# Start BOLD workflow builder
func_wf = create_func_datasource(func_paths_dict,
'func_gather_%d' % num_strat)
func_wf.inputs.inputnode.set(
subject=subject_id,
creds_path=input_creds_path,
dl_dir=c.workingDirectory
)
func_wf.get_node('inputnode').iterables = \
("scan", func_paths_dict.keys())
# Add in nodes to get parameters from configuration file
# a node which checks if scan_parameters are present for each scan
scan_params = \
pe.Node(function.Function(input_names=['data_config_scan_params',
'subject_id',
'scan',
'pipeconfig_tr',
'pipeconfig_tpattern',
'pipeconfig_start_indx',
'pipeconfig_stop_indx'],
output_names=['tr',
'tpattern',
'ref_slice',
'start_indx',
'stop_indx'],
function=get_scan_params,
as_module=True),
name='scan_params_%d' % num_strat)
if "Selected Functional Volume" in c.func_reg_input:
get_func_volume = pe.Node(interface=afni.Calc(),
name='get_func_volume_%d' % num_strat)
get_func_volume.inputs.set(
expr='a',
single_idx=c.func_reg_input_volume,
outputtype='NIFTI_GZ'
)
workflow.connect(func_wf, 'outputspec.rest',
get_func_volume, 'in_file_a')
# wire in the scan parameter workflow
workflow.connect(func_wf, 'outputspec.scan_params',
scan_params, 'data_config_scan_params')
workflow.connect(func_wf, 'outputspec.subject',
scan_params, 'subject_id')
workflow.connect(func_wf, 'outputspec.scan',
scan_params, 'scan')
# connect in constants
scan_params.inputs.set(
pipeconfig_tr=c.TR,
pipeconfig_tpattern=c.slice_timing_pattern,
pipeconfig_start_indx=c.startIdx,
pipeconfig_stop_indx=c.stopIdx
)
# node to convert TR between seconds and milliseconds
convert_tr = pe.Node(function.Function(input_names=['tr'],
output_names=['tr'],
function=get_tr,
as_module=True),
name='convert_tr_%d' % num_strat)
strat.update_resource_pool({
'raw_functional': (func_wf, 'outputspec.rest'),
'scan_id': (func_wf, 'outputspec.scan')
})
strat.set_leaf_properties(func_wf, 'outputspec.rest')
if 1 in c.runEPI_DistCorr:
try:
strat.update_resource_pool({
"fmap_phase_diff": (func_wf, 'outputspec.phase_diff'),
"fmap_magnitude": (func_wf, 'outputspec.magnitude')
})
except:
err = "\n\n[!] You have selected to run field map " \
"distortion correction, but at least one of your " \
"scans listed in your data configuration file is " \
"missing either a field map phase difference file " \
"or a field map magnitude file, or both.\n\n"
raise Exception(err)
if "Selected Functional Volume" in c.func_reg_input:
strat.update_resource_pool({
'selected_func_volume': (get_func_volume, 'out_file')
})
# Truncate scan length based on configuration information
for num_strat, strat in enumerate(strat_list):
trunc_wf = create_wf_edit_func(
wf_name="edit_func_%d" % (num_strat)
)
# find the output data on the leaf node
node, out_file = strat.get_leaf_properties()
# connect the functional data from the leaf node into the wf
workflow.connect(node, out_file,
trunc_wf, 'inputspec.func')
# connect the other input parameters
workflow.connect(scan_params, 'start_indx',
trunc_wf, 'inputspec.start_idx')
workflow.connect(scan_params, 'stop_indx',
trunc_wf, 'inputspec.stop_idx')
# replace the leaf node with the output from the recently added
# workflow
strat.set_leaf_properties(trunc_wf, 'outputspec.edited_func')
# EPI Field-Map based Distortion Correction
new_strat_list = []
workflow_counter += 1
if 1 in c.runEPI_DistCorr:
workflow_bit_id['epi_distcorr'] = workflow_counter
for num_strat, strat in enumerate(strat_list):
if 'BET' in c.fmap_distcorr_skullstrip:
epi_distcorr = create_EPI_DistCorr(
use_BET=True,
wf_name='epi_distcorr_%d' % (num_strat)
)
epi_distcorr.inputs.bet_frac_input.bet_frac = c.fmap_distcorr_frac
epi_distcorr.get_node('bet_frac_input').iterables = \
('bet_frac', c.fmap_distcorr_frac)
else:
epi_distcorr = create_EPI_DistCorr(
use_BET=False,
wf_name='epi_distcorr_%d' % (num_strat)
)
epi_distcorr.inputs.afni_threshold_input.afni_threshold = \
c.fmap_distcorr_threshold
epi_distcorr.inputs.deltaTE_input.deltaTE = c.fmap_distcorr_deltaTE
epi_distcorr.inputs.dwellT_input.dwellT = c.fmap_distcorr_dwell_time
epi_distcorr.inputs.dwell_asym_ratio_input.dwell_asym_ratio = c.fmap_distcorr_dwell_asym_ratio
epi_distcorr.get_node('deltaTE_input').iterables = (
'deltaTE', c.fmap_distcorr_deltaTE
)
epi_distcorr.get_node('dwellT_input').iterables = (
'dwellT', c.fmap_distcorr_dwell_time
)
epi_distcorr.get_node('dwell_asym_ratio_input').iterables = (
'dwell_asym_ratio', c.fmap_distcorr_dwell_asym_ratio
)
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file, epi_distcorr,
'inputspec.func_file')
node, out_file = strat['anatomical_reorient']
workflow.connect(node, out_file, epi_distcorr,
'inputspec.anat_file')
node, out_file = strat['fmap_phase_diff']
workflow.connect(node, out_file, epi_distcorr,
'inputspec.fmap_pha')
node, out_file = strat['fmap_magnitude']
workflow.connect(node, out_file, epi_distcorr,
'inputspec.fmap_mag')
# TODO ASH review forking
if 0 in c.runEPI_DistCorr:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(epi_distcorr.name)
strat.update_resource_pool({
'despiked_fieldmap': (epi_distcorr, 'outputspec.fmap_despiked'),
'fieldmap_mask': (epi_distcorr, 'outputspec.fieldmapmask'),
'prepared_fieldmap_map': (epi_distcorr, 'outputspec.fieldmap')
})
strat_list += new_strat_list
# Slice Timing Correction Workflow
new_strat_list = []
if 1 in c.slice_timing_correction:
for num_strat, strat in enumerate(strat_list):
# create TShift AFNI node
func_slice_timing_correction = pe.Node(
interface=preprocess.TShift(),
name='func_slice_timing_correction_%d' % (num_strat))
func_slice_timing_correction.inputs.outputtype = 'NIFTI_GZ'
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file,
func_slice_timing_correction, 'in_file')
# TODO ASH normalize TR w schema validation
# we might prefer to use the TR stored in the NIFTI header
# if not, use the value in the scan_params node
if c.TR:
if isinstance(c.TR, str):
if "None" in c.TR or "none" in c.TR:
pass
else:
workflow.connect(scan_params, 'tr',
func_slice_timing_correction, 'tr')
else:
workflow.connect(scan_params, 'tr',
func_slice_timing_correction, 'tr')
if not "Use NIFTI Header" in c.slice_timing_pattern:
# add the @ prefix to the tpattern file going into
# AFNI 3dTshift - needed this so the tpattern file
# output from get_scan_params would be tied downstream
# via a connection (to avoid poofing)
add_prefix = pe.Node(util.Function(input_names=['tpattern'],
output_names=[
'afni_prefix'],
function=add_afni_prefix),
name='func_slice_timing_correction_add_afni_prefix_%d' % num_strat)
workflow.connect(scan_params, 'tpattern',
add_prefix, 'tpattern')
workflow.connect(add_prefix, 'afni_prefix',
func_slice_timing_correction,
'tpattern')
# add the name of the node to the strat name
strat.append_name(func_slice_timing_correction.name)
# set the leaf node
strat.set_leaf_properties(func_slice_timing_correction, 'out_file')
# add the outputs to the resource pool
strat.update_resource_pool({
'slice_time_corrected': (func_slice_timing_correction, 'out_file')
})
# add new strats (if forked)
strat_list += new_strat_list
# Functional Image Preprocessing Workflow
new_strat_list = []
workflow_counter += 1
workflow_bit_id['func_preproc'] = workflow_counter
if '3dAutoMask' in c.functionalMasking:
for num_strat, strat in enumerate(strat_list):
func_preproc = create_func_preproc(use_bet=False,
wf_name='func_preproc_automask_%d' % num_strat)
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file, func_preproc,
'inputspec.func')
# TODO ASH review forking
if 'BET' in c.functionalMasking:
strat = strat.clone()
new_strat_list.append(strat)
strat.append_name(func_preproc.name)
strat.set_leaf_properties(func_preproc, 'outputspec.preprocessed')
# add stuff to resource pool if we need it
strat.update_resource_pool({
'mean_functional': (func_preproc, 'outputspec.example_func'),
'functional_preprocessed_mask': (func_preproc, 'outputspec.preprocessed_mask'),
'movement_parameters': (func_preproc, 'outputspec.movement_parameters'),
'max_displacement': (func_preproc, 'outputspec.max_displacement'),
'functional_preprocessed': (func_preproc, 'outputspec.preprocessed'),
'functional_brain_mask': (func_preproc, 'outputspec.mask'),
'motion_correct': (func_preproc, 'outputspec.motion_correct'),
'coordinate_transformation': (func_preproc, 'outputspec.oned_matrix_save')
})
print('mean_functional: ', strat['mean_functional'])
create_log_node(workflow, func_preproc,
'outputspec.preprocessed', num_strat)
strat_list += new_strat_list
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
if 'BET' in c.functionalMasking and 'func_preproc_automask' not in nodes:
func_preproc = create_func_preproc(use_bet=True,
wf_name='func_preproc_bet_%d' % num_strat)
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file, func_preproc,
'inputspec.func')
strat.append_name(func_preproc.name)
strat.set_leaf_properties(func_preproc, 'outputspec.preprocessed')
# TODO redundant with above resource pool additions?
strat.update_resource_pool({
'mean_functional': (func_preproc, 'outputspec.example_func'),
'functional_preprocessed_mask': (func_preproc, 'outputspec.preprocessed_mask'),
'movement_parameters': (func_preproc, 'outputspec.movement_parameters'),
'max_displacement': (func_preproc, 'outputspec.max_displacement'),
'functional_preprocessed': (func_preproc, 'outputspec.preprocessed'),
'functional_brain_mask': (func_preproc, 'outputspec.mask'),
'motion_correct': (func_preproc, 'outputspec.motion_correct'),
'coordinate_transformation': (func_preproc, 'outputspec.oned_matrix_save'),
})
create_log_node(workflow, func_preproc, 'outputspec.preprocessed',
num_strat)
strat_list += new_strat_list
new_strat_list = []
workflow_counter += 1
# Func -> T1 Registration (Initial Linear reg)
# Depending on configuration, either passes output matrix to
# Func -> Template ApplyWarp, or feeds into linear reg of BBReg operation
# (if BBReg is enabled)
new_strat_list = []
workflow_counter += 1
if 1 in c.runRegisterFuncToAnat:
workflow_bit_id['func_to_anat'] = workflow_counter
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
# if field map-based distortion correction is on, but BBR is off,
# send in the distortion correction files here
# TODO: is this robust to the possibility of forking both
# TODO: distortion correction and BBR at the same time?
# TODO: (note if you are forking with BBR on/off, at this point
# TODO: there is still only one strat, so you would have to fork
# TODO: here instead to have a func->anat with fieldmap and
# TODO: without, and send the without-fieldmap to the BBR fork)
dist_corr = False
if 'epi_distcorr' in nodes and 1 not in c.runBBReg:
dist_corr = True
# TODO: for now, disabling dist corr when BBR is disabled
err = "\n\n[!] Field map distortion correction is enabled, " \
"but Boundary-Based Registration is off- BBR is " \
"required for distortion correction.\n\n"
raise Exception(err)
func_to_anat = create_register_func_to_anat(dist_corr,
'func_to_anat_FLIRT'
'_%d' % num_strat)
# Input registration parameters
func_to_anat.inputs.inputspec.interp = 'trilinear'
# TODO ASH normalize strings with enums?
if 'Mean Functional' in c.func_reg_input:
# Input functional image (mean functional)
node, out_file = strat['mean_functional']
workflow.connect(node, out_file,
func_to_anat, 'inputspec.func')
elif 'Selected Functional Volume' in c.func_reg_input:
# Input functional image (specific volume)
node, out_file = strat['selected_func_volume']
workflow.connect(node, out_file,
func_to_anat, 'inputspec.func')
# Input skull-stripped anatomical (anat.nii.gz)
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
func_to_anat, 'inputspec.anat')
if dist_corr:
# apply field map distortion correction outputs to
# the func->anat registration
func_to_anat.inputs.echospacing_input.set(
echospacing=c.fmap_distcorr_dwell_time[0]
)
func_to_anat.inputs.pedir_input.set(
pedir=c.fmap_distcorr_pedir
)
node, out_file = strat["despiked_fieldmap"]
workflow.connect(node, out_file,
func_to_anat, 'inputspec.fieldmap')
node, out_file = strat["fieldmap_mask"]
workflow.connect(node, out_file,
func_to_anat, 'inputspec.fieldmapmask')
# TODO ASH review forking
if 0 in c.runRegisterFuncToAnat:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(func_to_anat.name)
# strat.set_leaf_properties(func_mni_warp, 'out_file')
strat.update_resource_pool({
'mean_functional_in_anat': (func_to_anat, 'outputspec.anat_func_nobbreg'),
'functional_to_anat_linear_xfm': (func_to_anat, 'outputspec.func_to_anat_linear_xfm_nobbreg')
})
# create_log_node(workflow, func_to_anat, 'outputspec.mni_func', num_strat)
strat_list += new_strat_list
# Func -> T1 Registration (BBREG)
# Outputs 'functional_to_anat_linear_xfm', a matrix file of the
# functional-to-anatomical registration warp to be applied LATER in
# func_mni_warp, which accepts it as input 'premat'
new_strat_list = []
workflow_counter += 1
if 1 in c.runRegisterFuncToAnat and 1 in c.runBBReg:
workflow_bit_id['func_to_anat_bbreg'] = workflow_counter
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
# this is needed here in case tissue segmentation is set on/off
# and you have bbreg enabled- this will ensure bbreg will run for
# the strat that has segmentation but will not run (thus avoiding
# a crash) on the strat without segmentation
if 'seg_preproc' in nodes:
dist_corr = False
if 'epi_distcorr' in nodes:
dist_corr = True
func_to_anat_bbreg = create_bbregister_func_to_anat(
dist_corr,
'func_to_anat_bbreg_%d' % num_strat
)
# Input registration parameters
func_to_anat_bbreg.inputs.inputspec.bbr_schedule = \
c.boundaryBasedRegistrationSchedule
# TODO ASH normalize strings with enums?
if 'Mean Functional' in c.func_reg_input:
# Input functional image (mean functional)
node, out_file = strat['mean_functional']
workflow.connect(node, out_file,
func_to_anat_bbreg, 'inputspec.func')
elif 'Selected Functional Volume' in c.func_reg_input:
# Input functional image (specific volume)
node, out_file = strat['selected_func_volume']
workflow.connect(node, out_file,
func_to_anat_bbreg, 'inputspec.func')
# Input anatomical whole-head image (reoriented)
node, out_file = strat['anatomical_reorient']
workflow.connect(node, out_file,
func_to_anat_bbreg,
'inputspec.anat_skull')
node, out_file = strat['functional_to_anat_linear_xfm']
workflow.connect(node, out_file,
func_to_anat_bbreg,
'inputspec.linear_reg_matrix')
# Input segmentation probability maps for white matter
# segmentation
node, out_file = strat['seg_probability_maps']
workflow.connect(node, (out_file, pick_wm),
func_to_anat_bbreg,
'inputspec.anat_wm_segmentation')
if dist_corr:
# apply field map distortion correction outputs to
# the func->anat registration
func_to_anat_bbreg.inputs.echospacing_input.echospacing = c.fmap_distcorr_dwell_time[
0]
func_to_anat_bbreg.inputs.pedir_input.pedir = c.fmap_distcorr_pedir
node, out_file = strat["despiked_fieldmap"]
workflow.connect(node, out_file,
func_to_anat_bbreg,
'inputspec.fieldmap')
node, out_file = strat["fieldmap_mask"]
workflow.connect(node, out_file,
func_to_anat_bbreg,
'inputspec.fieldmapmask')
# TODO ASH review forking
if 0 in c.runBBReg:
strat = strat.fork()
new_strat_list.append(strat)
strat.append_name(func_to_anat_bbreg.name)
strat.update_resource_pool({
'mean_functional_in_anat': (func_to_anat_bbreg, 'outputspec.anat_func'),
'functional_to_anat_linear_xfm': (func_to_anat_bbreg, 'outputspec.func_to_anat_linear_xfm')
}, override=True)
# create_log_node(workflow, func_to_anat, 'outputspec.mni_func', num_strat)
else:
# TODO ASH review
# anatomical segmentation is not being run in this particular
# strategy/fork - we don't want this to stop workflow building
# unless there is only one strategy
if len(strat_list) > 1:
pass
else:
err = "\n\n[!] Boundary-based registration (BBR) for " \
"functional-to-anatomical registration is " \
"enabled, but anatomical segmentation is not. " \
"BBR requires the outputs of segmentation. " \
"Please modify your pipeline configuration and " \
"run again.\n\n"
raise Exception(err)
strat_list += new_strat_list
# Inserting Generate Motion Statistics Workflow
workflow_counter += 1
workflow_bit_id['gen_motion_stats'] = workflow_counter
for num_strat, strat in enumerate(strat_list):
gen_motion_stats = motion_power_statistics(
'gen_motion_stats_%d' % num_strat
)
# Special case where the workflow is not getting outputs from
# resource pool but is connected to functional datasource
workflow.connect(func_wf, 'outputspec.subject',
gen_motion_stats, 'inputspec.subject_id')
workflow.connect(func_wf, 'outputspec.scan',
gen_motion_stats, 'inputspec.scan_id')
node, out_file = strat['motion_correct']
workflow.connect(node, out_file,
gen_motion_stats, 'inputspec.motion_correct')
node, out_file = strat['movement_parameters']
workflow.connect(node, out_file,
gen_motion_stats,
'inputspec.movement_parameters')
node, out_file = strat['max_displacement']
workflow.connect(node, out_file,
gen_motion_stats, 'inputspec.max_displacement')
node, out_file = strat['functional_brain_mask']
workflow.connect(node, out_file,
gen_motion_stats, 'inputspec.mask')
node, out_file = strat['coordinate_transformation']
workflow.connect(node, out_file,
gen_motion_stats, 'inputspec.transformations')
strat.append_name(gen_motion_stats.name)
strat.update_resource_pool({
'frame_wise_displacement_power': (gen_motion_stats, 'outputspec.FDP_1D'),
'frame_wise_displacement_jenkinson': (gen_motion_stats, 'outputspec.FDJ_1D'),
'dvars': (gen_motion_stats, 'outputspec.DVARS_1D'),
'power_params': (gen_motion_stats, 'outputspec.power_params'),
'motion_params': (gen_motion_stats, 'outputspec.motion_params')
})
new_strat_list = []
workflow_bit_id['aroma_preproc'] = workflow_counter
for num_strat, strat in enumerate(strat_list):
if 1 in c.runICA:
nodes = strat.get_nodes_names()
if 'none' in str(c.TR).lower():
TR = None
else:
TR = float(c.TR)
# FNIRT ONLY! ANTS further below!
if 'FSL' in c.regOption and \
'anat_symmetric_mni_ants_register' not in nodes and \
'anat_mni_ants_register' not in nodes:
aroma_preproc = create_aroma(tr=TR,
wf_name='create_aroma_%d' % num_strat)
aroma_preproc.inputs.params.denoise_type = c.aroma_denoise_type
#aroma_preproc.inputs.params.dim = c.aroma_dim
aroma_preproc.inputs.inputspec.out_dir = os.path.join(
c.workingDirectory, workflow_name,
'create_aroma_%d' % num_strat
)
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file, aroma_preproc,
'inputspec.denoise_file')
node, out_file = strat['functional_to_anat_linear_xfm']
workflow.connect(node, out_file, aroma_preproc,
'inputspec.mat_file')
node, out_file = strat['anatomical_to_mni_nonlinear_xfm']
workflow.connect(node, out_file, aroma_preproc,
'inputspec.fnirt_warp_file')
if c.aroma_denoise_type == 'nonaggr':
strat.set_leaf_properties(aroma_preproc,
'outputspec.nonaggr_denoised_file')
strat.update_resource_pool({
'ica_aroma_denoised_functional': (
aroma_preproc, 'outputspec.nonaggr_denoised_file')
}
)
elif c.aroma_denoise_type == 'aggr':
strat.set_leaf_properties(aroma_preproc,
'outputspec.aggr_denoised_file')
strat.update_resource_pool({
'ica_aroma_denoised_functional': (
aroma_preproc, 'outputspec.aggr_denoised_file')
}
)
strat.append_name(aroma_preproc.name)
elif 'ANTS' in c.regOption and \
'anat_symmetric_mni_flirt_register' not in nodes and \
'anat_symmetric_mni_fnirt_register' not in nodes and \
'anat_mni_flirt_register' not in nodes and \
'anat_mni_fnirt_register' not in nodes:
# we don't have the FNIRT warp file, so we need to calculate
# ICA-AROMA de-noising in template space
# 4D FUNCTIONAL apply warp
node, out_file = strat.get_leaf_properties()
mean_func_node, mean_func_out_file = strat["mean_functional"]
# Insert it on the resource pool, so no need to connect externally
ants_apply_warps_func_mni(
workflow, strat, num_strat, num_ants_cores,
node, out_file,
mean_func_node, mean_func_out_file,
c.template_brain_only_for_func,
"ica_aroma_functional_to_standard",
"Linear", 3
)
aroma_preproc = create_aroma(tr=TR,
wf_name='create_aroma_%d'
% num_strat)
aroma_preproc.inputs.params.denoise_type = c.aroma_denoise_type
#aroma_preproc.inputs.params.dim = c.aroma_dim
aroma_preproc.inputs.inputspec.out_dir = os.path.join(
c.workingDirectory, workflow_name,
'create_aroma_%d' % num_strat)
node, out_file = strat['ica_aroma_functional_to_standard']
workflow.connect(node, out_file, aroma_preproc,
'inputspec.denoise_file')
# warp back
if c.aroma_denoise_type == 'nonaggr':
node, out_file = (
aroma_preproc, 'outputspec.nonaggr_denoised_file'
)
elif c.aroma_denoise_type == 'aggr':
node, out_file = (
aroma_preproc, 'outputspec.aggr_denoised_file'
)
ants_apply_inverse_warps_template_to_func(
workflow, strat, num_strat, num_ants_cores, node,
out_file, mean_func_node, mean_func_out_file,
"ica_aroma_denoised_functional", "Linear", 3
)
node, out_file = strat["ica_aroma_denoised_functional"]
strat.set_leaf_properties(node, out_file)
if c.aroma_denoise_type == 'nonaggr':
create_log_node(workflow, aroma_preproc,
'outputspec.nonaggr_denoised_file',
num_strat)
elif c.aroma_denoise_type == 'aggr':
create_log_node(workflow, aroma_preproc,
'outputspec.aggr_denoised_file',
num_strat)
strat.append_name(aroma_preproc.name)
strat_list += new_strat_list
# Inserting Nuisance Workflow
new_strat_list = []
workflow_counter += 1
if 1 in c.runNuisance:
workflow_bit_id['nuisance'] = workflow_counter
for num_strat, strat in enumerate(strat_list):
# for each strategy, create a new one without nuisance
if 0 in c.runNuisance:
new_strat_list.append(strat.fork())
nodes = strat.get_nodes_names()
has_segmentation = 'seg_preproc' in nodes
use_ants = 'anat_mni_fnirt_register' not in nodes and 'anat_mni_flirt_register' not in nodes
for regressors_selector_i, regressors_selector in enumerate(c.Regressors):
new_strat = strat.fork()
# to guarantee immutability
regressors_selector = NuisanceRegressor(
copy.deepcopy(regressors_selector),
copy.deepcopy(c.Regressors)
)
# remove tissue regressors when there is no segmentation
# on the strategy
if not has_segmentation:
for reg in ['aCompCor',
'WhiteMatter',
'GreyMatter',
'CerebrospinalFluid']:
if reg in regressors_selector:
del regressors_selector[reg]
nuisance_regression_workflow = create_nuisance_workflow(
regressors_selector,
use_ants=use_ants,
name='nuisance_{0}_{1}'.format(regressors_selector_i, num_strat)
)
node, out_file = new_strat['anatomical_brain']
workflow.connect(
node, out_file,
nuisance_regression_workflow, 'inputspec.anatomical_file_path'
)
if has_segmentation:
workflow.connect(
c.lateral_ventricles_mask, 'local_path',
nuisance_regression_workflow, 'inputspec.lat_ventricles_mask_file_path'
)
node, out_file = new_strat['anatomical_gm_mask']
workflow.connect(
node, out_file,
nuisance_regression_workflow, 'inputspec.gm_mask_file_path'
)
node, out_file = new_strat['anatomical_wm_mask']
workflow.connect(
node, out_file,
nuisance_regression_workflow, 'inputspec.wm_mask_file_path'
)
node, out_file = new_strat['anatomical_csf_mask']
workflow.connect(
node, out_file,
nuisance_regression_workflow, 'inputspec.csf_mask_file_path'
)
node, out_file = new_strat['movement_parameters']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.motion_parameters_file_path'
)
node, out_file= new_strat['functional_to_anat_linear_xfm']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.func_to_anat_linear_xfm_file_path'
)
node, out_file = new_strat.get_leaf_properties()
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.functional_file_path'
)
node, out_file = new_strat['frame_wise_displacement_jenkinson']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.fd_j_file_path'
)
node, out_file = new_strat['frame_wise_displacement_power']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.fd_p_file_path'
)
node, out_file = new_strat['dvars']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.dvars_file_path'
)
node, out_file = new_strat['functional_brain_mask']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.functional_brain_mask_file_path'
)
nuisance_regression_workflow.get_node('inputspec').iterables = ([
('selector', [regressors_selector]),
])
if use_ants:
# pass the ants_affine_xfm to the input for the
# INVERSE transform, but ants_affine_xfm gets inverted
# within the workflow
node, out_file = new_strat['ants_initial_xfm']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.anat_to_mni_initial_xfm_file_path'
)
node, out_file = new_strat['ants_rigid_xfm']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.anat_to_mni_rigid_xfm_file_path'
)
node, out_file = new_strat['ants_affine_xfm']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.anat_to_mni_affine_xfm_file_path'
)
else:
node, out_file = new_strat['mni_to_anatomical_linear_xfm']
workflow.connect(
node, out_file,
nuisance_regression_workflow,
'inputspec.mni_to_anat_linear_xfm_file_path'
)
new_strat.append_name(nuisance_regression_workflow.name)
new_strat.set_leaf_properties(
nuisance_regression_workflow,
'outputspec.residual_file_path'
)
new_strat.update_resource_pool({
'nuisance_regression_selector': regressors_selector,
'functional_nuisance_residuals': (
nuisance_regression_workflow,
'outputspec.residual_file_path')
,
'functional_nuisance_regressors': (
nuisance_regression_workflow,
'outputspec.regressors_file_path'
),
})
new_strat_list.append(new_strat)
# Be aware that this line is supposed to override the current strat_list: it is not a typo/mistake!
# Each regressor forks the strategy, instead of reusing it, to keep the code simple
strat_list = new_strat_list
# Inserting Median Angle Correction Workflow
new_strat_list = []
workflow_counter += 1
# TODO ASH normalize w schema val
if 1 in c.runMedianAngleCorrection:
workflow_bit_id['median_angle_corr'] = workflow_counter
for num_strat, strat in enumerate(strat_list):
# for each strategy, create a new one without median angle
if 0 in c.runMedianAngleCorrection:
new_strat_list.append(strat.fork())
median_angle_corr = create_median_angle_correction(
'median_angle_corr_%d' % num_strat
)
median_angle_corr.get_node('median_angle_correct').iterables = \
('target_angle_deg', c.targetAngleDeg)
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file,
median_angle_corr, 'inputspec.subject')
strat.append_name(median_angle_corr.name)
strat.set_leaf_properties(median_angle_corr, 'outputspec.subject')
strat.update_resource_pool({
'functional_median_angle_corrected': (median_angle_corr, 'outputspec.subject')
})
create_log_node(workflow,
median_angle_corr, 'outputspec.subject',
num_strat)
strat_list += new_strat_list
for num_strat, strat in enumerate(strat_list):
# Keep non-bandpassed version of the output for ALFF
strat.update_resource_pool({
'functional_freq_unfiltered': strat.get_leaf_properties()
})
# Inserting Bandpassing Workflow
for num_strat, strat in enumerate(strat_list):
if 'nuisance_regression_selector' not in strat:
continue
if not strat['nuisance_regression_selector'].get('Bandpass'):
continue
bandpass_selector = strat['nuisance_regression_selector']['Bandpass']
frequency_filter = pe.Node(
function.Function(input_names=['realigned_file',
'bandpass_freqs',
'sample_period'],
output_names=['bandpassed_file'],
function=bandpass_voxels,
as_module=True),
name='frequency_filter_%d' % num_strat
)
frequency_filter.inputs.bandpass_freqs = [
bandpass_selector.get('bottom_frequency'),
bandpass_selector.get('top_frequency')
]
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file,
frequency_filter, 'realigned_file')
strat.append_name(frequency_filter.name)
strat.set_leaf_properties(frequency_filter, 'bandpassed_file')
strat.update_resource_pool({
'functional_freq_filtered': (frequency_filter, 'bandpassed_file')
})
# Func -> Template, uses antsApplyTransforms (ANTS) or ApplyWarp (FSL) to
# apply the warp; also includes mean functional warp
new_strat_list = []
if 1 in c.runRegisterFuncToMNI:
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
# Run FSL ApplyWarp
if 'anat_mni_flirt_register' in nodes or 'anat_mni_fnirt_register' in nodes:
func_mni_warp = pe.Node(interface=fsl.ApplyWarp(),
name='func_mni_fsl_warp_%d' % num_strat)
func_mni_warp.inputs.ref_file = c.template_brain_only_for_func
functional_brain_mask_to_standard = pe.Node(
interface=fsl.ApplyWarp(),
name='func_mni_fsl_warp_mask_%d' % num_strat
)
functional_brain_mask_to_standard.inputs.interp = 'nn'
functional_brain_mask_to_standard.inputs.ref_file = c.template_skull_for_func
mean_functional_warp = pe.Node(
interface=fsl.ApplyWarp(),
name='mean_func_fsl_warp_%d' % num_strat
)
mean_functional_warp.inputs.ref_file = c.template_brain_only_for_func
motion_correct_warp = pe.Node(
interface=fsl.ApplyWarp(),
name="motion_correct_fsl_warp_%d" % num_strat
)
motion_correct_warp.inputs.ref_file = c.template_brain_only_for_func
if 'anat_mni_fnirt_register' in nodes:
node, out_file = strat['anatomical_to_mni_nonlinear_xfm']
workflow.connect(node, out_file,
func_mni_warp, 'field_file')
workflow.connect(node, out_file,
functional_brain_mask_to_standard, 'field_file')
workflow.connect(node, out_file,
mean_functional_warp, 'field_file')
workflow.connect(node, out_file,
motion_correct_warp, 'field_file')
node, out_file = strat['functional_to_anat_linear_xfm']
workflow.connect(node, out_file,
func_mni_warp, 'premat')
workflow.connect(node, out_file,
functional_brain_mask_to_standard, 'premat')
workflow.connect(node, out_file,
mean_functional_warp, 'premat')
workflow.connect(node, out_file,
motion_correct_warp, 'premat')
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file,
func_mni_warp, 'in_file')
node, out_file = strat['functional_brain_mask']
workflow.connect(node, out_file,
functional_brain_mask_to_standard, 'in_file')
node, out_file = strat['mean_functional']
workflow.connect(node, out_file,
mean_functional_warp, 'in_file')
node, out_file = strat['motion_correct']
workflow.connect(node, out_file,
motion_correct_warp, 'in_file')
elif 'anat_mni_flirt_register' in nodes:
func_anat_warp = pe.Node(interface=fsl.ApplyWarp(),
name='func_anat_fsl_warp_%d' % num_strat)
functional_brain_mask_to_anat = pe.Node(
interface=fsl.ApplyWarp(),
name='func_anat_fsl_warp_mask_%d' % num_strat
)
functional_brain_mask_to_anat.inputs.interp = 'nn'
mean_functional_to_anat = pe.Node(
interface=fsl.ApplyWarp(),
name='mean_func_to_anat_fsl_warp_%d' % num_strat
)
motion_correct_to_anat_warp = pe.Node(
interface=fsl.ApplyWarp(),
name="motion_correct_to_anat_fsl_warp_%d" % num_strat
)
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file,
func_anat_warp, 'in_file')
node, out_file = strat['functional_brain_mask']
workflow.connect(node, out_file,
functional_brain_mask_to_anat, 'in_file')
node, out_file = strat['mean_functional']
workflow.connect(node, out_file,
mean_functional_to_anat, 'in_file')
node, out_file = strat['motion_correct']
workflow.connect(node, out_file,
motion_correct_to_anat_warp, 'in_file')
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
func_anat_warp, 'ref_file')
workflow.connect(node, out_file,
functional_brain_mask_to_anat, 'ref_file')
workflow.connect(node, out_file,
mean_functional_to_anat, 'ref_file')
workflow.connect(node, out_file,
motion_correct_to_anat_warp, 'ref_file')
node, out_file = strat['functional_to_anat_linear_xfm']
workflow.connect(node, out_file,
func_anat_warp, 'premat')
workflow.connect(node, out_file,
functional_brain_mask_to_anat, 'premat')
workflow.connect(node, out_file,
mean_functional_to_anat, 'premat')
workflow.connect(node, out_file,
motion_correct_to_anat_warp, 'premat')
node, out_file = strat.get_leaf_properties()
workflow.connect(func_anat_warp, 'out_file',
func_mni_warp, 'in_file')
workflow.connect(functional_brain_mask_to_anat, 'out_file',
functional_brain_mask_to_standard, 'in_file')
workflow.connect(mean_functional_to_anat, 'out_file',
mean_functional_warp, 'in_file')
workflow.connect(motion_correct_to_anat_warp, 'out_file',
motion_correct_warp, 'in_file')
node, out_file = strat['anatomical_to_mni_linear_xfm']
workflow.connect(node, out_file,
func_mni_warp, 'premat')
workflow.connect(node, out_file,
functional_brain_mask_to_standard, 'premat')
workflow.connect(node, out_file,
mean_functional_warp, 'premat')
workflow.connect(node, out_file,
motion_correct_warp, 'premat')
strat.update_resource_pool({
'functional_to_standard': (func_mni_warp, 'out_file'),
'functional_brain_mask_to_standard': (functional_brain_mask_to_standard, 'out_file'),
'mean_functional_to_standard': (mean_functional_warp, 'out_file'),
'motion_correct_to_standard': (motion_correct_warp, 'out_file')
})
strat.append_name(func_mni_warp.name)
create_log_node(workflow,
func_mni_warp, 'out_file',
num_strat)
strat_list += new_strat_list
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
if 'ANTS' in c.regOption and \
'anat_mni_flirt_register' not in nodes and \
'anat_mni_fnirt_register' not in nodes:
# ANTS warp application
# 4D FUNCTIONAL apply warp
node, out_file = strat.get_leaf_properties()
node2, out_file2 = \
strat["mean_functional"]
warp_func_wf = ants_apply_warps_func_mni(
workflow, strat, num_strat, num_ants_cores,
node, out_file,
node2, out_file2,
c.template_brain_only_for_func,
"functional_to_standard",
"Linear", 3
)
create_log_node(workflow, warp_func_wf,
'outputspec.output_image', num_strat)
# 4D FUNCTIONAL MOTION-CORRECTED apply warp
node, out_file = \
strat['motion_correct']
node2, out_file2 = \
strat["mean_functional"]
warp_motion_wf = ants_apply_warps_func_mni(
workflow, strat, num_strat, num_ants_cores,
node, out_file,
node2, out_file2,
c.template_brain_only_for_func,
"motion_correct_to_standard",
"Linear", 3
)
create_log_node(workflow, warp_motion_wf,
'outputspec.output_image', num_strat)
# FUNCTIONAL BRAIN MASK (binary, no timeseries) apply warp
node, out_file = \
strat["functional_brain_mask"]
warp_mask_wf = ants_apply_warps_func_mni(
workflow, strat, num_strat, num_ants_cores,
node, out_file,
node, out_file,
c.template_brain_only_for_func,
"functional_brain_mask_to_standard",
"NearestNeighbor", 0
)
create_log_node(workflow, warp_mask_wf,
'outputspec.output_image', num_strat)
# FUNCTIONAL MEAN (no timeseries) apply warp
node, out_file = \
strat["mean_functional"]
warp_mean_wf = ants_apply_warps_func_mni(
workflow, strat, num_strat, num_ants_cores,
node, out_file,
node, out_file,
c.template_brain_only_for_func,
"mean_functional_to_standard",
"Linear", 0
)
create_log_node(workflow, warp_mean_wf,
'outputspec.output_image', num_strat)
strat_list += new_strat_list
# Derivatives
# Inserting ALFF/fALFF workflow
# NOTE: this is calculated using the functional time series from
# before frequency filtering and beyond
new_strat_list = []
if 1 in c.runALFF:
for num_strat, strat in enumerate(strat_list):
alff = create_alff('alff_falff_%d' % num_strat)
alff.inputs.hp_input.hp = c.highPassFreqALFF
alff.inputs.lp_input.lp = c.lowPassFreqALFF
alff.get_node('hp_input').iterables = ('hp',
c.highPassFreqALFF)
alff.get_node('lp_input').iterables = ('lp',
c.lowPassFreqALFF)
node, out_file = strat['functional_freq_unfiltered']
workflow.connect(node, out_file,
alff, 'inputspec.rest_res')
node, out_file = strat['functional_brain_mask']
workflow.connect(node, out_file,
alff, 'inputspec.rest_mask')
strat.append_name(alff.name)
strat.update_resource_pool({
'alff': (alff, 'outputspec.alff_img'),
'falff': (alff, 'outputspec.falff_img')
})
create_log_node(workflow,
alff, 'outputspec.falff_img', num_strat)
strat_list += new_strat_list
# Inserting VMHC Workflow
new_strat_list = []
if 1 in c.runVMHC:
for num_strat, strat in enumerate(strat_list):
nodes = strat.get_nodes_names()
if 'func_mni_fsl_warp' in nodes:
if 'anat_mni_fnirt_register' not in nodes and 'anat_mni_flirt_register' in nodes:
vmhc = create_vmhc(False, True, 'vmhc_%d' % num_strat)
elif 'anat_mni_fnirt_register' in nodes:
vmhc = create_vmhc(False, False, 'vmhc_%d' % num_strat)
else:
vmhc = create_vmhc(True, False, 'vmhc_%d' % num_strat,
int(num_ants_cores))
vmhc.inputs.inputspec.standard_for_func = c.template_skull_for_func
vmhc.inputs.fwhm_input.fwhm = c.fwhm
vmhc.get_node('fwhm_input').iterables = ('fwhm', c.fwhm)
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file,
vmhc, 'inputspec.rest_res')
node, out_file = strat['functional_to_anat_linear_xfm']
workflow.connect(node, out_file,
vmhc, 'inputspec.example_func2highres_mat')
node, out_file = strat['functional_brain_mask']
workflow.connect(node, out_file,
vmhc, 'inputspec.rest_mask')
node, out_file = strat['mean_functional']
workflow.connect(node, out_file,
vmhc, 'inputspec.mean_functional')
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file,
vmhc, 'inputspec.brain')
# TODO ASH normalize w schema val
if 'ANTS' in c.regOption and \
'anat_mni_flirt_register' not in nodes and \
'anat_mni_fnirt_register' not in nodes and \
'anat_symmetric_mni_flirt_register' not in nodes and \
'anat_symmetric_mni_fnirt_register' not in nodes:
node, out_file = strat['ants_symmetric_initial_xfm']
workflow.connect(node, out_file,
vmhc, 'inputspec.ants_symm_initial_xfm')
node, out_file = strat['ants_symmetric_rigid_xfm']
workflow.connect(node, out_file,
vmhc, 'inputspec.ants_symm_rigid_xfm')
node, out_file = strat['ants_symmetric_affine_xfm']
workflow.connect(node, out_file,
vmhc, 'inputspec.ants_symm_affine_xfm')
node, out_file = strat['anatomical_to_symmetric_mni_nonlinear_xfm']
workflow.connect(node, out_file,
vmhc, 'inputspec.ants_symm_warp_field')
else:
if 'anat_mni_fnirt_register' in nodes:
node, out_file = strat['anatomical_to_symmetric_mni_nonlinear_xfm']
workflow.connect(node, out_file,
vmhc, 'inputspec.fnirt_nonlinear_warp')
elif 'anat_mni_flirt_register' in nodes:
node, out_file = strat[
'anatomical_to_symmetric_mni_linear_xfm']
workflow.connect(node, out_file,
vmhc,
'inputspec.flirt_linear_aff')
strat.update_resource_pool({
'vmhc_raw_score': (vmhc, 'outputspec.VMHC_FWHM_img'),
'vmhc_fisher_zstd': (vmhc, 'outputspec.VMHC_Z_FWHM_img'),
'vmhc_fisher_zstd_zstat_map': (vmhc, 'outputspec.VMHC_Z_stat_FWHM_img')
})
strat.append_name(vmhc.name)
create_log_node(
workflow, vmhc, 'outputspec.VMHC_FWHM_img', num_strat)
strat_list += new_strat_list
# Inserting REHO Workflow
if 1 in c.runReHo:
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
preproc = create_reho()
cluster_size = c.clusterSize
# TODO ASH schema validator
# Check the cluster size is supported
if cluster_size not in [7, 19, 27]:
err_msg = 'Cluster size specified: %d, is not supported. ' \
'Change to 7, 19, or 27 and try again' % cluster_size
raise Exception(err_msg)
else:
preproc.inputs.inputspec.cluster_size = cluster_size
reho = preproc.clone('reho_%d' % num_strat)
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file,
reho, 'inputspec.rest_res_filt')
node, out_file = strat['functional_brain_mask']
workflow.connect(node, out_file,
reho, 'inputspec.rest_mask')
create_log_node(workflow, reho, 'outputspec.raw_reho_map',
num_strat)
strat_list += new_strat_list
ts_analysis_dict = {}
sca_analysis_dict = {}
# TODO ASH normalize w schema val
if c.tsa_roi_paths:
tsa_roi_dict = c.tsa_roi_paths[0]
# Timeseries and SCA config selections processing
# flip the dictionary
for roi_path in tsa_roi_dict.keys():
ts_analysis_to_run = map(
lambda x: x.strip(),
tsa_roi_dict[roi_path].split(",")
)
if any(
corr in ts_analysis_to_run for corr in [
"PearsonCorr", "PartialCorr"
]
) and "Avg" not in ts_analysis_to_run:
ts_analysis_to_run += ["Avg"]
for analysis_type in ts_analysis_to_run:
if analysis_type not in ts_analysis_dict.keys():
ts_analysis_dict[analysis_type] = []
ts_analysis_dict[analysis_type].append(roi_path)
# c.tsa_roi_paths and c.sca_roi_paths come in a format as such:
# a list containing a dictionary
# [
# {
# '/path/to/rois1.nii.gz': 'Avg, MultReg',
# '/path/to/rois2.nii.gz': 'Avg, MultReg',
# '/path/to/rois3.nii.gz': 'Avg, MultReg',
# '/path/to/rois4.nii.gz': 'DualReg'
# }
# ]
# TODO ASH normalize w schema val
if 1 in c.runROITimeseries:
# TODO ASH normalize w schema val
if not c.tsa_roi_paths:
err = "\n\n[!] CPAC says: Time Series Extraction is " \
"set to run, but no ROI NIFTI file paths were provided!" \
"\n\n"
raise Exception(err)
# TODO ASH normalize w schema val
if 1 in c.runSCA:
# TODO ASH normalize w schema val
if c.sca_roi_paths:
sca_roi_dict = c.sca_roi_paths[0]
else:
err = "\n\n[!] CPAC says: Seed-based Correlation Analysis is " \
"set to run, but no ROI NIFTI file paths were provided!" \
"\n\n"
raise Exception(err)
# flip the dictionary
for roi_path in sca_roi_dict.keys():
for analysis_type in sca_roi_dict[roi_path].split(","):
analysis_type = analysis_type.replace(" ", "")
if analysis_type not in sca_analysis_dict.keys():
sca_analysis_dict[analysis_type] = []
sca_analysis_dict[analysis_type].append(roi_path)
# Section: Spatial Regression Based Time Series
new_strat_list = []
if "SpatialReg" in ts_analysis_dict.keys() or \
"DualReg" in sca_analysis_dict.keys():
for num_strat, strat in enumerate(strat_list):
if "SpatialReg" in ts_analysis_dict.keys():
resample_spatial_map_to_native_space = pe.Node(
interface=fsl.FLIRT(),
name='resample_spatial_map_to_native_space_%d' % num_strat
)
resample_spatial_map_to_native_space.inputs.set(
interp='nearestneighbour',
apply_xfm=True,
in_matrix_file=c.identityMatrix
)
spatial_map_dataflow = create_spatial_map_dataflow(
ts_analysis_dict["SpatialReg"],
'spatial_map_dataflow_%d' % num_strat
)
spatial_map_dataflow.inputs.inputspec.set(
creds_path=input_creds_path,
dl_dir=c.workingDirectory
)
spatial_map_timeseries = get_spatial_map_timeseries(
'spatial_map_timeseries_%d' % num_strat
)
spatial_map_timeseries.inputs.inputspec.demean = True # c.spatialDemean
node, out_file = strat['functional_to_standard']
node2, out_file2 = strat['functional_brain_mask_to_standard']
# resample the input functional file and functional mask
# to spatial map
workflow.connect(node, out_file,
resample_spatial_map_to_native_space,
'reference')
workflow.connect(spatial_map_dataflow,
'select_spatial_map.out_file',
resample_spatial_map_to_native_space,
'in_file')
# connect it to the spatial_map_timeseries
workflow.connect(resample_spatial_map_to_native_space,
'out_file',
spatial_map_timeseries,
'inputspec.spatial_map')
workflow.connect(node2, out_file2,
spatial_map_timeseries,
'inputspec.subject_mask')
workflow.connect(node, out_file,
spatial_map_timeseries,
'inputspec.subject_rest')
strat.append_name(spatial_map_timeseries.name)
strat.update_resource_pool({
'spatial_map_timeseries': (spatial_map_timeseries, 'outputspec.subject_timeseries')
})
create_log_node(workflow, spatial_map_timeseries,
'outputspec.subject_timeseries', num_strat)
if "DualReg" in sca_analysis_dict.keys():
resample_spatial_map_to_native_space_for_dr = pe.Node(
interface=fsl.FLIRT(),
name='resample_spatial_map_to_native_space_for_DR_%d' % num_strat
)
resample_spatial_map_to_native_space_for_dr.inputs.set(
interp='nearestneighbour',
apply_xfm=True,
in_matrix_file=c.identityMatrix
)
spatial_map_dataflow_for_dr = create_spatial_map_dataflow(
sca_analysis_dict["DualReg"],
'spatial_map_dataflow_for_DR_%d' % num_strat
)
spatial_map_dataflow_for_dr.inputs.inputspec.set(
creds_path=input_creds_path,
dl_dir=c.workingDirectory
)
spatial_map_timeseries_for_dr = get_spatial_map_timeseries(
'spatial_map_timeseries_for_DR_%d' % num_strat
)
spatial_map_timeseries_for_dr.inputs.inputspec.demean = True # c.spatialDemean
node, out_file = strat['functional_to_standard']
node2, out_file2 = strat['functional_brain_mask_to_standard']
# resample the input functional file and functional mask
# to spatial map
workflow.connect(node, out_file,
resample_spatial_map_to_native_space_for_dr,
'reference')
workflow.connect(spatial_map_dataflow_for_dr,
'select_spatial_map.out_file',
resample_spatial_map_to_native_space_for_dr,
'in_file')
# connect it to the spatial_map_timeseries
workflow.connect(
resample_spatial_map_to_native_space_for_dr,
'out_file',
spatial_map_timeseries_for_dr,
'inputspec.spatial_map'
)
workflow.connect(node, out_file,
spatial_map_timeseries_for_dr,
'inputspec.subject_rest')
strat.append_name(spatial_map_timeseries_for_dr.name)
strat.update_resource_pool({
'spatial_map_timeseries_for_DR': (
spatial_map_timeseries_for_dr,
'outputspec.subject_timeseries')
})
create_log_node(workflow, spatial_map_timeseries_for_dr,
'outputspec.subject_timeseries',
num_strat)
strat_list += new_strat_list
if 1 in c.runROITimeseries and ("Avg" in ts_analysis_dict.keys() or \
"Avg" in sca_analysis_dict.keys() or \
"MultReg" in sca_analysis_dict.keys()):
# ROI Based Time Series
new_strat_list = []
for num_strat, strat in enumerate(strat_list):
if "Avg" in ts_analysis_dict.keys():
resample_functional_to_roi = pe.Node(interface=fsl.FLIRT(),
name='resample_functional_to_roi_%d' % num_strat)
resample_functional_to_roi.inputs.set(
interp='trilinear',
apply_xfm=True,
in_matrix_file=c.identityMatrix
)
roi_dataflow = create_roi_mask_dataflow(
ts_analysis_dict["Avg"],
'roi_dataflow_%d' % num_strat
)
roi_dataflow.inputs.inputspec.set(
creds_path=input_creds_path,
dl_dir=c.workingDirectory
)
roi_timeseries = get_roi_timeseries(
'roi_timeseries_%d' % num_strat
)
roi_timeseries.inputs.inputspec.output_type = c.roiTSOutputs
node, out_file = strat['functional_to_standard']
# resample the input functional file to roi
workflow.connect(node, out_file,
resample_functional_to_roi, 'in_file')
workflow.connect(roi_dataflow, 'outputspec.out_file',
resample_functional_to_roi, 'reference')
# connect it to the roi_timeseries
workflow.connect(roi_dataflow, 'outputspec.out_file',
roi_timeseries, 'input_roi.roi')
workflow.connect(resample_functional_to_roi, 'out_file',
roi_timeseries, 'inputspec.rest')
strat.append_name(roi_timeseries.name)
strat.update_resource_pool({
'roi_timeseries': (roi_timeseries, 'outputspec.roi_outputs'),
'functional_to_roi': (resample_functional_to_roi, 'out_file')
})
create_log_node(workflow, roi_timeseries, 'outputspec.roi_outputs',
num_strat)
if "Avg" in sca_analysis_dict.keys():
# same workflow, except to run TSE and send it to the resource
# pool so that it will not get sent to SCA
resample_functional_to_roi_for_sca = pe.Node(
interface=fsl.FLIRT(),
name='resample_functional_to_roi_for_sca_%d' % num_strat
)
resample_functional_to_roi_for_sca.inputs.set(
interp='trilinear',
apply_xfm=True,
in_matrix_file=c.identityMatrix
)
roi_dataflow_for_sca = create_roi_mask_dataflow(
sca_analysis_dict["Avg"],
'roi_dataflow_for_sca_%d' % num_strat
)
roi_dataflow_for_sca.inputs.inputspec.set(
creds_path=input_creds_path,
dl_dir=c.workingDirectory
)
roi_timeseries_for_sca = get_roi_timeseries(
'roi_timeseries_for_sca_%d' % num_strat
)
node, out_file = strat['functional_to_standard']
# resample the input functional file to roi
workflow.connect(node, out_file,
resample_functional_to_roi_for_sca,
'in_file')
workflow.connect(roi_dataflow_for_sca,
'outputspec.out_file',
resample_functional_to_roi_for_sca,
'reference')
# connect it to the roi_timeseries
workflow.connect(roi_dataflow_for_sca,
'outputspec.out_file',
roi_timeseries_for_sca, 'input_roi.roi')
workflow.connect(resample_functional_to_roi_for_sca,
'out_file',
roi_timeseries_for_sca, 'inputspec.rest')
strat.append_name(roi_timeseries_for_sca.name)
strat.update_resource_pool({
'roi_timeseries_for_SCA': (roi_timeseries_for_sca, 'outputspec.roi_outputs'),
'functional_to_roi_for_SCA': (resample_functional_to_roi, 'out_file')
})
create_log_node(workflow, roi_timeseries_for_sca,
'outputspec.roi_outputs', num_strat)
if "MultReg" in sca_analysis_dict.keys():
# same workflow, except to run TSE and send it to the resource
# pool so that it will not get sent to SCA
resample_functional_to_roi_for_multreg = pe.Node(
interface=fsl.FLIRT(),
name='resample_functional_to_roi_for_mult_reg_%d' % num_strat
)
resample_functional_to_roi_for_multreg.inputs.set(
interp='trilinear',
apply_xfm=True,
in_matrix_file=c.identityMatrix
)
roi_dataflow_for_multreg = create_roi_mask_dataflow(
sca_analysis_dict["MultReg"],
'roi_dataflow_for_mult_reg_%d' % num_strat
)
roi_dataflow_for_multreg.inputs.inputspec.set(
creds_path=input_creds_path,
dl_dir=c.workingDirectory
)
roi_timeseries_for_multreg = get_roi_timeseries(
'roi_timeseries_for_mult_reg_%d' % num_strat
)
node, out_file = strat['functional_to_standard']
# resample the input functional file to roi
workflow.connect(node, out_file,
resample_functional_to_roi_for_multreg,
'in_file')
workflow.connect(roi_dataflow_for_multreg,
'outputspec.out_file',
resample_functional_to_roi_for_multreg,
'reference')
# connect it to the roi_timeseries
workflow.connect(roi_dataflow_for_multreg,
'outputspec.out_file',
roi_timeseries_for_multreg,
'input_roi.roi')
workflow.connect(resample_functional_to_roi_for_multreg,
'out_file',
roi_timeseries_for_multreg,
'inputspec.rest')
strat.append_name(roi_timeseries_for_multreg.name)
strat.update_resource_pool({
'roi_timeseries_for_SCA_multreg': (roi_timeseries_for_multreg, 'outputspec.roi_outputs')
})
create_log_node(workflow, roi_timeseries_for_multreg,
'outputspec.roi_outputs', num_strat)
strat_list += new_strat_list
# Connectome
if "PearsonCorr" in ts_analysis_dict.keys() or \
"PartialCorr" in ts_analysis_dict.keys():
for num_strat, strat in enumerate(strat_list):
if "PearsonCorr" in ts_analysis_dict.keys():
connectome_wf = create_connectome('connectome_PearsonCorr_%d' % num_strat)
connectome_wf.inputs.inputspec.method = "PearsonCorr"
node, out_file = strat['roi_timeseries']
workflow.connect(node,
out_file,
connectome_wf,
'inputspec.time_series')
strat.update_resource_pool({
'connectome_PearsonCorr': (connectome_wf, 'outputspec.connectome')
})
if "PartialCorr" in ts_analysis_dict.keys():
connectome_wf = create_connectome('connectome_PartialCorr_%d' % num_strat)
connectome_wf.inputs.inputspec.method = "PartialCorr"
node, out_file = strat['roi_timeseries']
workflow.connect(node,
out_file,
connectome_wf,
'inputspec.time_series')
strat.update_resource_pool({
'connectome_PartialCorr': (connectome_wf, 'outputspec.connectome')
})
# Voxel Based Time Series
new_strat_list = []
if "Voxel" in ts_analysis_dict.keys():
for num_strat, strat in enumerate(strat_list):
resample_functional_to_mask = pe.Node(interface=fsl.FLIRT(),
name='resample_functional_to_mask_%d' % num_strat)
resample_functional_to_mask.inputs.set(
interp='trilinear',
apply_xfm=True,
in_matrix_file=c.identityMatrix
)
mask_dataflow = create_roi_mask_dataflow(ts_analysis_dict["Voxel"],
'mask_dataflow_%d' % num_strat)
voxel_timeseries = get_voxel_timeseries(
'voxel_timeseries_%d' % num_strat)
voxel_timeseries.inputs.inputspec.output_type = c.roiTSOutputs
node, out_file = strat['functional_to_standard']
# resample the input functional file to mask
workflow.connect(node, out_file,
resample_functional_to_mask, 'in_file')
workflow.connect(mask_dataflow, 'outputspec.out_file',
resample_functional_to_mask, 'reference')
# connect it to the voxel_timeseries
workflow.connect(mask_dataflow, 'outputspec.out_file',
voxel_timeseries, 'input_mask.mask')
workflow.connect(resample_functional_to_mask, 'out_file',
voxel_timeseries, 'inputspec.rest')
strat.append_name(voxel_timeseries.name)
strat.update_resource_pool({
'voxel_timeseries': (voxel_timeseries, 'outputspec.mask_outputs')
})
create_log_node(workflow, voxel_timeseries,
'outputspec.mask_outputs', num_strat)
strat_list += new_strat_list
# Inserting SCA workflow for ROI INPUT
new_strat_list = []
if "Avg" in sca_analysis_dict.keys():
for num_strat, strat in enumerate(strat_list):
sca_roi = create_sca('sca_roi_%d' % num_strat)
node, out_file = strat.get_leaf_properties()
workflow.connect(node, out_file,
sca_roi, 'inputspec.functional_file')
node, out_file = strat['roi_timeseries_for_SCA']
workflow.connect(node, (out_file, extract_one_d),
sca_roi, 'inputspec.timeseries_one_d')
strat.update_resource_pool({
'sca_roi_files': (sca_roi, 'outputspec.correlation_files')
})
create_log_node(workflow,
sca_roi, 'outputspec.correlation_stack',
num_strat)
strat.append_name(sca_roi.name)
strat_list += new_strat_list
# (Dual Regression) Temporal Regression for Dual Regression
new_strat_list = []
if "DualReg" in sca_analysis_dict.keys():
for num_strat, strat in enumerate(strat_list):
dr_temp_reg = create_temporal_reg(
'temporal_dual_regression_%d' % num_strat
)
dr_temp_reg.inputs.inputspec.normalize = c.mrsNorm
dr_temp_reg.inputs.inputspec.demean = True
node, out_file = strat['spatial_map_timeseries_for_DR']
node2, out_file2 = strat.get_leaf_properties()
node3, out_file3 = strat['functional_brain_mask']
workflow.connect(node2, out_file2,
dr_temp_reg, 'inputspec.subject_rest')
workflow.connect(node, out_file,
dr_temp_reg, 'inputspec.subject_timeseries')
workflow.connect(node3, out_file3,
dr_temp_reg, 'inputspec.subject_mask')
strat.update_resource_pool({
'dr_tempreg_maps_files': (dr_temp_reg, 'outputspec.temp_reg_map_files'),
'dr_tempreg_maps_zstat_files': (dr_temp_reg, 'outputspec.temp_reg_map_z_files')
})
strat.append_name(dr_temp_reg.name)
create_log_node(workflow, dr_temp_reg,
'outputspec.temp_reg_map', num_strat)
strat_list += new_strat_list
# (Multiple Regression) Temporal Regression for SCA
new_strat_list = []
if "MultReg" in sca_analysis_dict.keys():
for num_strat, strat in enumerate(strat_list):
sc_temp_reg = create_temporal_reg(
'temporal_regression_sca_%d' % num_strat,
which='RT'
)
sc_temp_reg.inputs.inputspec.normalize = c.mrsNorm
sc_temp_reg.inputs.inputspec.demean = True
node, out_file = strat['functional_to_standard']
node2, out_file2 = strat['roi_timeseries_for_SCA_multreg']
node3, out_file3 = strat['functional_brain_mask_to_standard']
workflow.connect(node, out_file,
sc_temp_reg, 'inputspec.subject_rest')
workflow.connect(node2, (out_file2, extract_one_d),
sc_temp_reg, 'inputspec.subject_timeseries')
workflow.connect(node3, out_file3,
sc_temp_reg, 'inputspec.subject_mask')
strat.update_resource_pool({
'sca_tempreg_maps_files': (sc_temp_reg, 'outputspec.temp_reg_map_files'),
'sca_tempreg_maps_zstat_files': (sc_temp_reg, 'outputspec.temp_reg_map_z_files')
})
create_log_node(workflow, sc_temp_reg,
'outputspec.temp_reg_map', num_strat)
strat.append_name(sc_temp_reg.name)
strat_list += new_strat_list
# Section: Network centrality
# TODO ASH handle as boolean on schema validator / normalizer
if 1 in c.runNetworkCentrality:
# TODO ASH move to schema validator
# validate the mask file path
# if not c.templateSpecificationFile.endswith(".nii") and \
# not c.templateSpecificationFile.endswith(".nii.gz"):
# err = "\n\n[!] CPAC says: The Network Centrality mask " \
# "specification file must be a NIFTI file (ending in .nii " \
# "or .nii.gz).\nFile path you provided: %s\n\n" \
# % c.templateSpecificationFile
# raise Exception(err)
strat_list = create_network_centrality_workflow(
workflow, c, strat_list, {
"creds_path": input_creds_path,
"dl_dir": c.workingDirectory
}
)
'''
Loop through the resource pool and connect the nodes for:
- applying warps to standard
- z-score standardization
- smoothing
- calculating output averages
'''
for num_strat, strat in enumerate(strat_list):
if 1 in c.runRegisterFuncToMNI:
rp = strat.get_resource_pool()
for key in sorted(rp.keys()):
# connect nodes to apply warps to template
if key in Outputs.native_nonsmooth:
# smoothing happens at the end, so only the non-smooth
# named output labels for the native-space outputs
strat = output_to_standard(
workflow, key, strat, num_strat, c)
elif key in Outputs.native_nonsmooth_mult:
strat = output_to_standard(workflow, key, strat, num_strat, c,
map_node=True)
if "Before" in c.smoothing_order:
# run smoothing before Z-scoring
if 1 in c.run_smoothing:
rp = strat.get_resource_pool()
for key in sorted(rp.keys()):
# connect nodes for smoothing
if "centrality" in key:
# centrality needs its own mask
strat = output_smooth(workflow, key,
c.templateSpecificationFile, c.fwhm,
strat, num_strat, map_node=True)
elif key in Outputs.native_nonsmooth:
# native space
strat = output_smooth(workflow, key, "functional_brain_mask", c.fwhm,
strat, num_strat)
elif key in Outputs.native_nonsmooth_mult:
# native space with multiple files (map nodes)
strat = output_smooth(workflow, key, "functional_brain_mask", c.fwhm,
strat, num_strat, map_node=True)
elif key in Outputs.template_nonsmooth:
# template space
strat = output_smooth(workflow, key,
"functional_brain_mask_to_standard", c.fwhm,
strat, num_strat)
elif key in Outputs.template_nonsmooth_mult:
# template space with multiple files (map nodes)
strat = output_smooth(workflow, key,
"functional_brain_mask_to_standard", c.fwhm,
strat, num_strat, map_node=True)
if 1 in c.runZScoring:
rp = strat.get_resource_pool()
for key in sorted(rp.keys()):
# connect nodes for z-score standardization
if "sca_roi_files_to_standard" in key:
# correlation files need the r-to-z
strat = fisher_z_score_standardize(workflow, key,
"roi_timeseries_for_SCA",
strat, num_strat,
map_node=True)
elif "centrality" in key:
# specific mask
strat = z_score_standardize(workflow, key,
c.templateSpecificationFile,
strat, num_strat,
map_node=True)
elif key in Outputs.template_raw:
# raw score, in template space
strat = z_score_standardize(workflow, key,
"functional_brain_mask_to_standard",
strat, num_strat)
elif key in Outputs.template_raw_mult:
# same as above but multiple files so mapnode required
strat = z_score_standardize(workflow, key,
"functional_brain_mask_to_standard",
strat, num_strat,
map_node=True)
elif "After" in c.smoothing_order:
# run smoothing after Z-scoring
if 1 in c.runZScoring:
rp = strat.get_resource_pool()
for key in sorted(rp.keys()):
# connect nodes for z-score standardization
if "sca_roi_files_to_standard" in key:
# correlation files need the r-to-z
strat = fisher_z_score_standardize(workflow, key,
"roi_timeseries_for_SCA",
strat, num_strat,
map_node=True)
elif "centrality" in key:
# specific mask
strat = z_score_standardize(workflow, key,
c.templateSpecificationFile,
strat, num_strat,
map_node=True)
elif key in Outputs.template_raw:
# raw score, in template space
strat = z_score_standardize(workflow, key,
"functional_brain_mask_to_standard",
strat, num_strat)
elif key in Outputs.template_raw_mult:
# same as above but multiple files so mapnode required
strat = z_score_standardize(workflow, key,
"functional_brain_mask_to_standard",
strat, num_strat,
map_node=True)
if 1 in c.run_smoothing:
rp = strat.get_resource_pool()
for key in sorted(rp.keys()):
# connect nodes for smoothing
if "centrality" in key:
# centrality needs its own mask
strat = output_smooth(workflow, key,
c.templateSpecificationFile, c.fwhm,
strat, num_strat, map_node=True)
elif key in Outputs.native_nonsmooth:
# native space
strat = output_smooth(workflow, key, "functional_brain_mask", c.fwhm,
strat, num_strat)
elif key in Outputs.native_nonsmooth_mult:
# native space with multiple files (map nodes)
strat = output_smooth(workflow, key, "functional_brain_mask", c.fwhm,
strat, num_strat, map_node=True)
elif key in Outputs.template_nonsmooth:
# template space
strat = output_smooth(workflow, key,
"functional_brain_mask_to_standard", c.fwhm,
strat, num_strat)
elif key in Outputs.template_nonsmooth_mult:
# template space with multiple files (map nodes)
strat = output_smooth(workflow, key,
"functional_brain_mask_to_standard", c.fwhm,
strat, num_strat, map_node=True)
rp = strat.get_resource_pool()
for key in sorted(rp.keys()):
# connect nodes to calculate averages
if key in Outputs.average:
# the outputs we need the averages for
strat = calc_avg(workflow, key, strat, num_strat)
elif key in Outputs.average_mult:
# those outputs, but the ones with multiple files (map nodes)
strat = calc_avg(workflow, key, strat,
num_strat, map_node=True)
# Quality Control
qc_montage_id_a = {}
qc_montage_id_s = {}
qc_plot_id = {}
qc_hist_id = {}
if 1 in c.generateQualityControlImages:
qc_montage_id_a, qc_montage_id_s, qc_hist_id, qc_plot_id = \
create_qc_workflow(workflow, c, strat_list, Outputs.qc)
logger.info('\n\n' + 'Pipeline building completed.' + '\n\n')
# Run the pipeline only if the user signifies.
# otherwise, only construct the pipeline (above)
if run == 1:
try:
workflow.write_graph(graph2use='hierarchical', format='png')
except:
pass
# this section creates names for the different branched strategies.
# it identifies where the pipeline has forked and then appends the
# name of the forked nodes to the branch name in the output directory
# fork_points is a list of lists, each list containing node names of
# nodes run in that strat/fork that are unique to that strat/fork
fork_points = Strategy.get_forking_points(strat_list)
fork_names = []
# here 'fork_point' is an individual strat with its unique nodes
for fork_point in fork_points:
fork_name = []
for fork in fork_point:
fork_label = ''
if 'ants' in fork:
fork_label = 'ants'
if 'fnirt' in fork:
fork_label = 'fnirt'
elif 'flirt_register' in fork:
fork_label = 'linear-only'
if 'automask' in fork:
fork_label = 'func-3dautomask'
if 'bet' in fork:
fork_label = 'func-bet'
if 'epi_distcorr' in fork:
fork_label = 'dist-corr'
if 'bbreg' in fork:
fork_label = 'bbreg'
if 'nuisance' in fork:
fork_label = 'nuisance'
if 'frequency_filter' in fork:
fork_label = 'freq-filter'
if 'median' in fork:
fork_label = 'median'
if 'motion_stats' in fork:
fork_label = 'motion'
if 'slice' in fork:
fork_label = 'slice'
if 'anat_preproc_afni' in fork:
fork_label = 'anat-afni'
if 'anat_preproc_bet' in fork:
fork_label = 'anat-bet'
fork_name += [fork_label]
fork_names.append('_'.join(fork_name))
# match each strat_list with fork point list
fork_points_labels = dict(zip(strat_list, fork_names))
# DataSink
pipeline_ids = []
scan_ids = ['scan_anat']
scan_ids += ['scan_' + str(scan_id)
for scan_id in sub_dict['func']]
for num_strat, strat in enumerate(strat_list):
if p_name is None or p_name == 'None':
pipeline_id = c.pipelineName
else:
pipeline_id = p_name
if fork_points_labels[strat]:
pipeline_id += '_' + fork_points_labels[strat]
pipeline_ids.append(pipeline_id)
# TODO enforce value with schema validation
# Extract credentials path for output if it exists
try:
# Get path to creds file
creds_path = ''
if c.awsOutputBucketCredentials:
creds_path = str(c.awsOutputBucketCredentials)
creds_path = os.path.abspath(creds_path)
if c.outputDirectory.lower().startswith('s3://'):
# Test for s3 write access
s3_write_access = \
aws_utils.test_bucket_access(creds_path,
c.outputDirectory)
if not s3_write_access:
raise Exception('Not able to write to bucket!')
except Exception as e:
if c.outputDirectory.lower().startswith('s3://'):
err_msg = 'There was an error processing credentials or ' \
'accessing the S3 bucket. Check and try again.\n' \
'Error: %s' % e
raise Exception(err_msg)
# TODO enforce value with schema validation
try:
encrypt_data = bool(c.s3Encryption[0])
except:
encrypt_data = False
ndmg_out = False
try:
# let's encapsulate this inside a Try..Except block so if
# someone doesn't have ndmg_outputs in their pipe config,
# it will default to the regular datasink
# TODO: update this when we change to the optionals
# TODO: only pipe config
if 1 in c.ndmg_mode:
ndmg_out = True
except:
pass
if ndmg_out:
# create the graphs
from CPAC.utils.ndmg_utils import ndmg_roi_timeseries, \
ndmg_create_graphs
atlases = []
if 'Avg' in ts_analysis_dict.keys():
atlases = ts_analysis_dict['Avg']
roi_dataflow_for_ndmg = create_roi_mask_dataflow(atlases,
'roi_dataflow_for_ndmg_%d' % num_strat
)
resample_functional_to_roi = pe.Node(interface=fsl.FLIRT(),
name='resample_functional_to_roi_ndmg_%d' % num_strat)
resample_functional_to_roi.inputs.set(
interp='trilinear',
apply_xfm=True,
in_matrix_file=c.identityMatrix
)
workflow.connect(roi_dataflow_for_ndmg, 'outputspec.out_file',
resample_functional_to_roi, 'reference')
ndmg_ts_imports = ['import os',
'import nibabel as nb',
'import numpy as np']
ndmg_ts = pe.Node(util.Function(input_names=['func_file',
'label_file'],
output_names=['roi_ts',
'rois',
'roits_file'],
function=ndmg_roi_timeseries,
imports=ndmg_ts_imports),
name='ndmg_ts_%d' % num_strat)
node, out_file = strat['functional_to_standard']
workflow.connect(node, out_file, resample_functional_to_roi,
'in_file')
workflow.connect(resample_functional_to_roi, 'out_file',
ndmg_ts, 'func_file')
workflow.connect(roi_dataflow_for_ndmg, 'outputspec.out_file',
ndmg_ts, 'label_file')
ndmg_graph_imports = ['import os',
'from CPAC.utils.ndmg_utils import graph']
ndmg_graph = pe.MapNode(util.Function(input_names=['ts',
'labels'],
output_names=[
'out_file'],
function=ndmg_create_graphs,
imports=ndmg_graph_imports),
name='ndmg_graphs_%d' % num_strat,
iterfield=['labels'])
workflow.connect(ndmg_ts, 'roi_ts', ndmg_graph, 'ts')
workflow.connect(roi_dataflow_for_ndmg, 'outputspec.out_file',
ndmg_graph, 'labels')
strat.update_resource_pool({
'ndmg_ts': (ndmg_ts, 'roits_file'),
'ndmg_graph': (ndmg_graph, 'out_file')
})
rp = strat.get_resource_pool()
if c.write_debugging_outputs:
import pickle
workdir = os.path.join(c.workingDirectory, workflow_name)
rp_pkl = os.path.join(workdir, 'resource_pool.pkl')
with open(rp_pkl, 'wt') as f:
pickle.dump(rp, f)
output_sink_nodes = []
for resource_i, resource in enumerate(sorted(rp.keys())):
if not resource.startswith('qc___') and resource not in Outputs.any:
continue
if resource not in Outputs.override_optional and not ndmg_out:
if 1 not in c.write_func_outputs:
if resource in Outputs.extra_functional:
continue
if 1 not in c.write_debugging_outputs:
if resource in Outputs.debugging:
continue
if 0 not in c.runRegisterFuncToMNI:
if resource in Outputs.native_nonsmooth or \
resource in Outputs.native_nonsmooth_mult or \
resource in Outputs.native_smooth:
continue
if 0 not in c.runZScoring:
# write out only the z-scored outputs
if resource in Outputs.template_raw or \
resource in Outputs.template_raw_mult:
continue
if 0 not in c.run_smoothing:
# write out only the smoothed outputs
if resource in Outputs.native_nonsmooth or \
resource in Outputs.template_nonsmooth or \
resource in Outputs.native_nonsmooth_mult or \
resource in Outputs.template_nonsmooth_mult:
continue
if ndmg_out:
ds = pe.Node(nio.DataSink(),
name='sinker_{}_{}'.format(num_strat,
resource_i))
ds.inputs.base_directory = c.outputDirectory
ds.inputs.creds_path = creds_path
ds.inputs.encrypt_bucket_keys = encrypt_data
ds.inputs.parameterization = True
ds.inputs.regexp_substitutions = [
(r'_rename_(.)*/', ''),
(r'_scan_', 'scan-'),
(r'/_mask_', '/roi-'),
(r'file_s3(.)*/', ''),
(r'ndmg_atlases', ''),
(r'func_atlases', ''),
(r'label', ''),
(r'res-.+\/', ''),
(r'_mask_', 'roi-'),
(r'mask_sub-', 'sub-'),
(r'/_selector_', '_nuis-'),
(r'_selector_pc', ''),
(r'.linear', ''),
(r'.wm', ''),
(r'.global', ''),
(r'.motion', ''),
(r'.quadratic', ''),
(r'.gm', ''),
(r'.compcor', ''),
(r'.csf', ''),
(r'_sub-', '/sub-'),
(r'(\.\.)', '')
]
container = 'pipeline_{0}'.format(pipeline_id)
sub_ses_id = subject_id.split('_')
if 'sub-' not in sub_ses_id[0]:
sub_tag = 'sub-{0}'.format(sub_ses_id[0])
else:
sub_tag = sub_ses_id[0]
ses_tag = 'ses-1'
if len(sub_ses_id) > 1:
if 'ses-' not in sub_ses_id[1]:
ses_tag = 'ses-{0}'.format(sub_ses_id[1])
else:
ses_tag = sub_ses_id[1]
id_tag = '_'.join([sub_tag, ses_tag])
anat_template_tag = 'standard'
func_template_tag = 'standard'
try:
if 'FSL' in c.regOption and 'ANTS' not in c.regOption:
if 'MNI152' in c.fnirtConfig:
anat_template_tag = 'MNI152'
func_template_tag = 'MNI152'
except:
pass
anat_res_tag = c.resolution_for_anat
anat_res_tag = anat_res_tag.replace('mm', '')
func_res_tag = c.resolution_for_func_preproc
func_res_tag = func_res_tag.replace('mm', '')
ndmg_key_dct = {'anatomical_brain':
('anat', 'preproc',
'{0}_T1w_preproc_brain'.format(id_tag)),
'anatomical_to_standard':
('anat', 'registered',
'{0}_T1w_space-{1}_res-{2}x{2}x{2}_registered'.format(id_tag, anat_template_tag, anat_res_tag)),
'functional_preprocessed':
('func', 'preproc',
'{0}_bold_preproc'.format(id_tag)),
'functional_nuisance_residuals':
('func', 'clean',
'{0}_bold_space-{1}_res-{2}x{2}x{2}_clean'.format(id_tag, func_template_tag, func_res_tag)),
'functional_to_standard':
('func', 'registered',
'{0}_bold_space-{1}_res-{2}x{2}x{2}_registered'.format(
id_tag, func_template_tag,
func_res_tag)),
'functional_mask_to_standard':
('func', 'registered',
'{0}_bold_space-{1}_res-{2}x{2}x{2}_registered_mask'.format(
id_tag, func_template_tag,
func_res_tag)),
'ndmg_ts':
('func', 'roi-timeseries',
'{0}_bold_res-{1}x{1}x{1}_variant-mean_timeseries'.format(
id_tag, func_res_tag)),
'ndmg_graph':
('func', 'roi-connectomes',
'{0}_bold_res-{1}x{1}x{1}_measure-correlation'.format(
id_tag, func_res_tag))
}
if resource not in ndmg_key_dct.keys():
continue
ds.inputs.container = '{0}/{1}'.format(container,
ndmg_key_dct[resource][0])
node, out_file = rp[resource]
# rename the file
if 'roi_' in resource or 'ndmg_graph' in resource:
rename_file = pe.MapNode(
interface=util.Rename(),
name='rename__{}_{}'.format(num_strat, resource_i),
iterfield=['in_file'])
else:
rename_file = pe.Node(
interface=util.Rename(),
name='rename_{}_{}'.format(num_strat, resource_i)
)
rename_file.inputs.keep_ext = True
rename_file.inputs.format_string = ndmg_key_dct[resource][2]
workflow.connect(node, out_file,
rename_file, 'in_file')
workflow.connect(rename_file, 'out_file',
ds, ndmg_key_dct[resource][1])
else:
# regular datasink
ds = pe.Node(
nio.DataSink(),
name='sinker_{}_{}'.format(num_strat, resource_i)
)
ds.inputs.base_directory = c.outputDirectory
ds.inputs.creds_path = creds_path
ds.inputs.encrypt_bucket_keys = encrypt_data
ds.inputs.container = os.path.join(
'pipeline_%s' % pipeline_id, subject_id
)
ds.inputs.regexp_substitutions = [
(r"/_sca_roi(.)*[/]", '/'),
(r"/_smooth_centrality_(\d)+[/]", '/'),
(r"/_z_score(\d)+[/]", "/"),
(r"/_dr_tempreg_maps_zstat_files_smooth_(\d)+[/]", "/"),
(r"/_sca_tempreg_maps_zstat_files_smooth_(\d)+[/]", "/"),
(r"/qc___", '/qc/')
]
node, out_file = rp[resource]
workflow.connect(node, out_file, ds, resource)
output_sink_nodes += [(ds, 'out_file')]
if 1 in c.runSymbolicLinks and not ndmg_out and \
not c.outputDirectory.lower().startswith('s3://'):
merge_link_node = pe.Node(
interface=Merge(len(output_sink_nodes)),
name='create_symlinks_paths_{}'.format(num_strat)
)
merge_link_node.inputs.ravel_inputs = True
link_node = pe.Node(
interface=function.Function(
input_names=[
'output_dir',
'symlink_dir',
'pipeline_id',
'subject_id',
'paths',
],
output_names=[],
function=create_symlinks,
as_module=True
), name='create_symlinks_{}'.format(num_strat)
)
link_node.inputs.output_dir = c.outputDirectory
link_node.inputs.subject_id = subject_id
link_node.inputs.pipeline_id = 'pipeline_%s' % pipeline_id
for i, (node, node_input) in enumerate(output_sink_nodes):
workflow.connect(node, node_input, merge_link_node, 'in{}'.format(i))
workflow.connect(merge_link_node, 'out', link_node, 'paths')
try:
G = nx.DiGraph()
strat_name = strat.get_name()
G.add_edges_from([
(strat_name[s], strat_name[s + 1])
for s in range(len(strat_name) - 1)
])
dotfilename = os.path.join(log_dir, 'strategy.dot')
nx.drawing.nx_pydot.write_dot(G, dotfilename)
format_dot(dotfilename, 'png')
except:
logger.warn('Cannot Create the strategy and pipeline '
'graph, dot or/and pygraphviz is not installed')
forks = "\n\nStrategy forks:\n" + \
"\n".join(["- " + pipe for pipe in sorted(set(pipeline_ids))]) + \
"\n\n"
logger.info(forks)
pipeline_start_datetime = strftime("%Y-%m-%d %H:%M:%S")
subject_info['resource_pool'] = []
for strat_no, strat in enumerate(strat_list):
strat_label = 'strat_%d' % strat_no
subject_info[strat_label] = strat.get_name()
subject_info['resource_pool'].append(strat.get_resource_pool())
subject_info['status'] = 'Running'
# TODO:set memory and num_threads of critical nodes if running
# MultiProcPlugin
# Create callback logger
cb_log_filename = os.path.join(log_dir,
'callback.log')
try:
if not os.path.exists(os.path.dirname(cb_log_filename)):
os.makedirs(os.path.dirname(cb_log_filename))
except IOError:
pass
# Add handler to callback log file
cb_logger = cb_logging.getLogger('callback')
cb_logger.setLevel(cb_logging.DEBUG)
handler = cb_logging.FileHandler(cb_log_filename)
cb_logger.addHandler(handler)
# Log initial information from all the nodes
for node_name in workflow.list_node_names():
node = workflow.get_node(node_name)
cb_logger.debug(json.dumps({
"id": str(node),
"hash": node.inputs.get_hashval()[1],
}))
# Add status callback function that writes in callback log
if nipype.__version__ not in ('1.1.2'):
err_msg = "This version of Nipype may not be compatible with " \
"CPAC v%s, please install Nipype version 1.1.2\n" \
% (CPAC.__version__)
logger.error(err_msg)
else:
from CPAC.utils.monitoring import log_nodes_cb
plugin_args['status_callback'] = log_nodes_cb
# Actually run the pipeline now, for the current subject
workflow.run(plugin=plugin, plugin_args=plugin_args)
# Dump subject info pickle file to subject log dir
subject_info['status'] = 'Completed'
subject_info_file = os.path.join(
log_dir, 'subject_info_%s.pkl' % subject_id
)
with open(subject_info_file, 'wb') as info:
pickle.dump(subject_info, info)
for i, _ in enumerate(pipeline_ids):
for scan in scan_ids:
create_log_node(workflow, None, None, i, scan).run()
if 1 in c.generateQualityControlImages and not ndmg_out:
for pip_id in pipeline_ids:
pipeline_base = os.path.join(c.outputDirectory,
'pipeline_%s' % pip_id)
qc_output_folder = os.path.join(pipeline_base, subject_id,
'qc_html')
sub_output_dir = os.path.join(c.outputDirectory,
'pipeline_{0}'.format(pip_id),
subject_id)
generate_qc_pages(qc_output_folder,
sub_output_dir,
qc_montage_id_a,
qc_montage_id_s,
qc_plot_id,
qc_hist_id)
# have this check in case the user runs cpac_runner from terminal and
# the timing parameter list is not supplied as usual by the GUI
if pipeline_timing_info != None:
# pipeline_timing_info list:
# [0] - unique pipeline ID
# [1] - pipeline start time stamp (first click of 'run' from GUI)
# [2] - number of subjects in subject list
unique_pipeline_id = pipeline_timing_info[0]
pipeline_start_stamp = pipeline_timing_info[1]
num_subjects = pipeline_timing_info[2]
# elapsed time data list:
# [0] - elapsed time in minutes
elapsed_time_data = []
elapsed_time_data.append(
int(((time.time() - pipeline_start_time) / 60)))
# elapsedTimeBin list:
# [0] - cumulative elapsed time (minutes) across all subjects
# [1] - number of times the elapsed time has been appended
# (effectively a measure of how many subjects have run)
# TODO
# write more doc for all this
# warning in .csv that some runs may be partial
# code to delete .tmp file
timing_temp_file_path = os.path.join(c.logDirectory,
'%s_pipeline_timing.tmp' % unique_pipeline_id)
if not os.path.isfile(timing_temp_file_path):
elapsedTimeBin = []
elapsedTimeBin.append(0)
elapsedTimeBin.append(0)
with open(timing_temp_file_path, 'wb') as handle:
pickle.dump(elapsedTimeBin, handle)
with open(timing_temp_file_path, 'rb') as handle:
elapsedTimeBin = pickle.loads(handle.read())
elapsedTimeBin[0] = elapsedTimeBin[0] + elapsed_time_data[0]
elapsedTimeBin[1] = elapsedTimeBin[1] + 1
with open(timing_temp_file_path, 'wb') as handle:
pickle.dump(elapsedTimeBin, handle)
# this happens once the last subject has finished running!
if elapsedTimeBin[1] == num_subjects:
pipelineTimeDict = {}
pipelineTimeDict['Pipeline'] = c.pipelineName
pipelineTimeDict['Cores_Per_Subject'] = c.maxCoresPerParticipant
pipelineTimeDict['Simultaneous_Subjects'] = c.numParticipantsAtOnce
pipelineTimeDict['Number_of_Subjects'] = num_subjects
pipelineTimeDict['Start_Time'] = pipeline_start_stamp
pipelineTimeDict['End_Time'] = strftime("%Y-%m-%d_%H:%M:%S")
pipelineTimeDict['Elapsed_Time_(minutes)'] = elapsedTimeBin[0]
pipelineTimeDict['Status'] = 'Complete'
gpaTimeFields = ['Pipeline', 'Cores_Per_Subject',
'Simultaneous_Subjects',
'Number_of_Subjects', 'Start_Time',
'End_Time', 'Elapsed_Time_(minutes)',
'Status']
timeHeader = dict((n, n) for n in gpaTimeFields)
with open(os.path.join(
c.logDirectory,
'cpac_individual_timing_%s.csv' % c.pipelineName
), 'a') as timeCSV, open(os.path.join(
c.logDirectory,
'cpac_individual_timing_%s.csv' % c.pipelineName
), 'rb') as readTimeCSV:
timeWriter = csv.DictWriter(timeCSV, fieldnames=gpaTimeFields)
timeReader = csv.DictReader(readTimeCSV)
headerExists = False
for line in timeReader:
if 'Start_Time' in line:
headerExists = True
if headerExists == False:
timeWriter.writerow(timeHeader)
timeWriter.writerow(pipelineTimeDict)
# remove the temp timing file now that it is no longer needed
os.remove(timing_temp_file_path)
# Upload logs to s3 if s3_str in output directory
if c.outputDirectory.lower().startswith('s3://'):
try:
# Store logs in s3 output director/logs/...
s3_log_dir = c.outputDirectory + '/logs/' + \
os.path.basename(log_dir)
bucket_name = c.outputDirectory.split('/')[2]
bucket = fetch_creds.return_bucket(creds_path, bucket_name)
# Collect local log files
local_log_files = []
for root, _, files in os.walk(log_dir):
local_log_files.extend([os.path.join(root, fil)
for fil in files])
# Form destination keys
s3_log_files = [loc.replace(log_dir, s3_log_dir)
for loc in local_log_files]
# Upload logs
aws_utils.s3_upload(bucket,
(local_log_files, s3_log_files),
encrypt=encrypt_data)
# Delete local log files
for log_f in local_log_files:
os.remove(log_f)
except Exception as exc:
err_msg = 'Unable to upload CPAC log files in: %s.\nError: %s'
logger.error(err_msg, log_dir, exc)
# Remove working directory when done
if c.removeWorkingDir:
try:
subject_wd = os.path.join(c.workingDirectory, workflow_name)
if os.path.exists(subject_wd):
logger.info("Removing working dir: %s" % subject_wd)
shutil.rmtree(subject_wd)
except:
logger.warn('Could not remove subjects %s working directory',
workflow_name)
execution_info = """
End of subject workflow {workflow}
CPAC run complete:
Pipeline configuration: {pipeline}
Subject workflow: {workflow}
Elapsed run time (minutes): {elapsed}
Timing information saved in {log_dir}/cpac_individual_timing_{pipeline}.csv
System time of start: {run_start}
System time of completion: {run_finish}
"""
logger.info(execution_info.format(
workflow=workflow_name,
pipeline=c.pipelineName,
log_dir=c.logDirectory,
elapsed=(time.time() - pipeline_start_time) / 60,
run_start=pipeline_start_datetime,
run_finish=strftime("%Y-%m-%d %H:%M:%S")
))
return workflow
| 43.185834 | 140 | 0.508654 |
f59b462e83ed414b199b14ae9b304e7700dd0fa8
| 4,061 |
py
|
Python
|
pmath/poset.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 4 |
2016-12-17T20:06:10.000Z
|
2021-11-19T04:45:29.000Z
|
pmath/poset.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 11 |
2021-01-06T05:35:11.000Z
|
2022-03-11T23:28:31.000Z
|
pmath/poset.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 3 |
2015-06-12T10:44:16.000Z
|
2021-07-26T18:39:47.000Z
|
__author__ = 'thor'
from numpy import *
import pandas as pd
def set_containment_matrix(family_of_sets, family_of_sets_2=None):
"""
Computes the containment incidence matrix of two families of sets A and B, where A and B are specified by
incidence matrices where rows index sets and columns index elements (so they must have the same number of cols).
The function returns a boolean matrix M of dimensions nrows(A) x nrows(B) where M(i,j)==True if and only if
ith set of A contains (or is equal to) jth set of B.
If the second family of sets is not given, the function computes the containment matrix of the first input on
itself.
See Also:
family_of_sets_to_bitmap and bitmap_to_family_of_sets to transform family of sets specification
(useful to transform) input and output
Example:
>> import itertools
>> t = array([x for x in itertools.product(*([[0, 1]] * 3))]).astype(int32)
>> t
= array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]], dtype=int32)
>> bitmap_to_family_of_sets(set_containment_matrix(t), range(len(t)))
= [array([0]),
array([0, 1]),
array([0, 2]),
array([0, 1, 2, 3]),
array([0, 4]),
array([0, 1, 4, 5]),
array([0, 2, 4, 6]),
array([0, 1, 2, 3, 4, 5, 6, 7])]
"""
if family_of_sets_2 is None:
family_of_sets_2 = family_of_sets
x = matrix((~family_of_sets.astype(bool)).astype(int))
xx = matrix((family_of_sets_2.astype(bool)).astype(int)).T
return squeeze(asarray(~(((x * xx)).astype(bool))))
def family_of_sets_to_bitmap(family_of_sets, output='df'):
"""
Takes set_list, a family of sets, and returns a dataframe of bitmaps (if output=='df')
or a bitmap matrix and array of element names,
whose columns index set elements and rows index sets.
See Also:
bitmap_to_family_of_sets(bitmap, set_labels) (reverse operation)
"""
df = pd.DataFrame([{element: 1 for element in s} for s in family_of_sets], index=family_of_sets).fillna(0)
if output == 'df':
return df
else:
return df.as_matrix(), array(df.columns)
def bitmap_to_family_of_sets(bitmap, set_labels=None):
"""
Takes a bitmap specification of a family of sets and returns an list of arrays specification.
Input:
bitmap: a dataframe whose rows index sets and columns index set labels
bitmap, set_labels: here bitmap is a (sets x set_element_idx) matrix and set_labels are the set elements
See Also:
family_of_sets_to_bitmap(family_of_sets) (reverse operation)
"""
if isinstance(bitmap, pd.DataFrame):
if set_labels is None:
set_labels = array(bitmap.columns)
bitmap = bitmap.as_matrix()
else:
if set_labels is None:
set_labels = arange(shape(bitmap)[1])
assert shape(bitmap)[1] == len(set_labels), "number of set labels must equal the number of elements (num of cols)"
return [set_labels[lidx] for lidx in bitmap]
class SetFamily(object):
def __init__(self, set_family, element_labels=None):
if isinstance(set_family, pd.DataFrame):
element_labels = array(set_family.columns)
self.set_family = set_family.as_matrix() == 1
elif isinstance(set_family, ndarray) and len(shape(set_family)) == 2:
self.set_family = set_family
else:
self.set_family, element_labels = family_of_sets_to_bitmap()
self.n_sets = shape(self.set_family)[0]
self.n_set_elements = shape(self.set_family)[1]
self.set_cardinalities = sum(self.set_family, 1).reshape((self.n_sets, 1))
if element_labels is None:
self.element_labels = arange(self.n_set_elements)
else:
self.element_labels = element_labels
| 39.813725 | 118 | 0.623246 |
e037b59c60bd785ae4466056b36a229119a55063
| 11,824 |
py
|
Python
|
ssvep_utils.py
|
DayBright-David/Leave-one-sub-out-CNN_SSVEP
|
7e24126b3b7db2b9331e5051be9fc34d3868f9ad
|
[
"MIT"
] | null | null | null |
ssvep_utils.py
|
DayBright-David/Leave-one-sub-out-CNN_SSVEP
|
7e24126b3b7db2b9331e5051be9fc34d3868f9ad
|
[
"MIT"
] | null | null | null |
ssvep_utils.py
|
DayBright-David/Leave-one-sub-out-CNN_SSVEP
|
7e24126b3b7db2b9331e5051be9fc34d3868f9ad
|
[
"MIT"
] | null | null | null |
"""
Utilities for CNN based SSVEP Classification
"""
import math
import warnings
warnings.filterwarnings('ignore')
import numpy as np
from scipy.signal import butter, filtfilt
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout, Conv2D, BatchNormalization
from keras.utils.np_utils import to_categorical
from keras import initializers, regularizers
def butter_bandpass_filter(data, lowcut, highcut, sample_rate, order):
'''
Returns bandpass filtered data between the frequency ranges specified in the input.
Args:
data (numpy.ndarray): array of samples.
lowcut (float): lower cutoff frequency (Hz).
highcut (float): lower cutoff frequency (Hz).
sample_rate (float): sampling rate (Hz).
order (int): order of the bandpass filter.
Returns:
(numpy.ndarray): bandpass filtered data.
'''
nyq = 0.5 * sample_rate
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
y = filtfilt(b, a, data)
return y
def get_filtered_eeg(eeg, lowcut, highcut, order, sample_rate):
'''
Returns bandpass filtered eeg for all channels and trials.
Args:
eeg (numpy.ndarray): raw eeg data of shape (num_classes, num_channels, num_samples, num_trials).
lowcut (float): lower cutoff frequency (Hz).
highcut (float): lower cutoff frequency (Hz).
order (int): order of the bandpass filter.
sample_rate (float): sampling rate (Hz).
Returns:
(numpy.ndarray): bandpass filtered eeg of shape (num_classes, num_channels, num_samples, num_trials).
'''
num_classes = eeg.shape[0]
num_chan = eeg.shape[1]
total_trial_len = eeg.shape[2]
num_trials = eeg.shape[3]
trial_len = int(38+0.135*sample_rate+4*sample_rate-1) - int(38+0.135*sample_rate)
filtered_data = np.zeros((eeg.shape[0], eeg.shape[1], trial_len, eeg.shape[3]))
for target in range(0, num_classes):
for channel in range(0, num_chan):
for trial in range(0, num_trials):
signal_to_filter = np.squeeze(eeg[target, channel, int(38+0.135*sample_rate):
int(38+0.135*sample_rate+4*sample_rate-1),
trial])
filtered_data[target, channel, :, trial] = butter_bandpass_filter(signal_to_filter, lowcut,
highcut, sample_rate, order)
return filtered_data
def buffer(data, duration, data_overlap):
'''
Returns segmented data based on the provided input window duration and overlap.
Args:
data (numpy.ndarray): array of samples.
duration (int): window length (number of samples).
data_overlap (int): number of samples of overlap.
Returns:
(numpy.ndarray): segmented data of shape (number_of_segments, duration).
'''
number_segments = int(math.ceil((len(data) - data_overlap)/(duration - data_overlap)))
temp_buf = [data[i:i+duration] for i in range(0, len(data), (duration - int(data_overlap)))]
temp_buf[number_segments-1] = np.pad(temp_buf[number_segments-1],
(0, duration-temp_buf[number_segments-1].shape[0]),
'constant')
segmented_data = np.vstack(temp_buf[0:number_segments])
return segmented_data
def get_segmented_epochs(data, window_len, shift_len, sample_rate):
'''
Returns epoched eeg data based on the window duration and step size.
Args:
data (numpy.ndarray): array of samples.
window_len (int): window length (seconds).
shift_len (int): step size (seconds).
sample_rate (float): sampling rate (Hz).
Returns:
(numpy.ndarray): epoched eeg data of shape.
(num_classes, num_channels, num_trials, number_of_segments, duration).
'''
num_classes = data.shape[0]
num_chan = data.shape[1]
num_trials = data.shape[3]
duration = int(window_len*sample_rate)
data_overlap = (window_len - shift_len)*sample_rate
number_of_segments = int(math.ceil((data.shape[2] - data_overlap)/
(duration - data_overlap)))
segmented_data = np.zeros((data.shape[0], data.shape[1],
data.shape[3], number_of_segments, duration))
for target in range(0, num_classes):
for channel in range(0, num_chan):
for trial in range(0, num_trials):
segmented_data[target, channel, trial, :, :] = buffer(data[target, channel, :, trial],
duration, data_overlap)
return segmented_data
def magnitude_spectrum_features(segmented_data, FFT_PARAMS):
'''
Returns magnitude spectrum features. Fast Fourier Transform computed based on
the FFT parameters provided as input.
Args:
segmented_data (numpy.ndarray): epoched eeg data of shape
(num_classes, num_channels, num_trials, number_of_segments, num_samples).
FFT_PARAMS (dict): dictionary of parameters used for feature extraction.
FFT_PARAMS['resolution'] (float): frequency resolution per bin (Hz).
FFT_PARAMS['start_frequency'] (float): start frequency component to pick from (Hz).
FFT_PARAMS['end_frequency'] (float): end frequency component to pick upto (Hz).
FFT_PARAMS['sampling_rate'] (float): sampling rate (Hz).
Returns:
(numpy.ndarray): magnitude spectrum features of the input EEG.
(n_fc, num_channels, num_classes, num_trials, number_of_segments).
'''
num_classes = segmented_data.shape[0]
num_chan = segmented_data.shape[1]
num_trials = segmented_data.shape[2]
number_of_segments = segmented_data.shape[3]
fft_len = segmented_data[0, 0, 0, 0, :].shape[0]
NFFT = round(FFT_PARAMS['sampling_rate']/FFT_PARAMS['resolution'])
fft_index_start = int(round(FFT_PARAMS['start_frequency']/FFT_PARAMS['resolution']))
fft_index_end = int(round(FFT_PARAMS['end_frequency']/FFT_PARAMS['resolution']))+1
features_data = np.zeros(((fft_index_end - fft_index_start),
segmented_data.shape[1], segmented_data.shape[0],
segmented_data.shape[2], segmented_data.shape[3]))
for target in range(0, num_classes):
for channel in range(0, num_chan):
for trial in range(0, num_trials):
for segment in range(0, number_of_segments):
temp_FFT = np.fft.fft(segmented_data[target, channel, trial, segment, :], NFFT)/fft_len
magnitude_spectrum = 2*np.abs(temp_FFT)
features_data[:, channel, target, trial, segment] = magnitude_spectrum[fft_index_start:fft_index_end,]
return features_data
def complex_spectrum_features(segmented_data, FFT_PARAMS):
'''
Returns complex spectrum features. Fast Fourier Transform computed based on
the FFT parameters provided as input. The real and imaginary parts of the input
signal are concatenated into a single feature vector.
Args:
segmented_data (numpy.ndarray): epoched eeg data of shape
(num_classes, num_channels, num_trials, number_of_segments, num_samples).
FFT_PARAMS (dict): dictionary of parameters used for feature extraction.
FFT_PARAMS['resolution'] (float): frequency resolution per bin (Hz).
FFT_PARAMS['start_frequency'] (float): start frequency component to pick from (Hz).
FFT_PARAMS['end_frequency'] (float): end frequency component to pick upto (Hz).
FFT_PARAMS['sampling_rate'] (float): sampling rate (Hz).
Returns:
(numpy.ndarray): complex spectrum features of the input EEG.
(2*n_fc, num_channels, num_classes, num_trials, number_of_segments)
'''
num_classes = segmented_data.shape[0]
num_chan = segmented_data.shape[1]
num_trials = segmented_data.shape[2]
number_of_segments = segmented_data.shape[3]
fft_len = segmented_data[0, 0, 0, 0, :].shape[0]
NFFT = round(FFT_PARAMS['sampling_rate']/FFT_PARAMS['resolution'])
fft_index_start = int(round(FFT_PARAMS['start_frequency']/FFT_PARAMS['resolution']))
fft_index_end = int(round(FFT_PARAMS['end_frequency']/FFT_PARAMS['resolution']))+1
features_data = np.zeros((2*(fft_index_end - fft_index_start),
segmented_data.shape[1], segmented_data.shape[0],
segmented_data.shape[2], segmented_data.shape[3]))
for target in range(0, num_classes):
for channel in range(0, num_chan):
for trial in range(0, num_trials):
for segment in range(0, number_of_segments):
temp_FFT = np.fft.fft(segmented_data[target, channel, trial, segment, :], NFFT)/fft_len
real_part = np.real(temp_FFT)
imag_part = np.imag(temp_FFT)
features_data[:, channel, target, trial, segment] = np.concatenate((
real_part[fft_index_start:fft_index_end,],
imag_part[fft_index_start:fft_index_end,]), axis=0)
return features_data
def CNN_model(input_shape, CNN_PARAMS):
'''
Returns the Concolutional Neural Network model for SSVEP classification.
Args:
input_shape (numpy.ndarray): shape of input training data
e.g. [num_training_examples, num_channels, n_fc] or [num_training_examples, num_channels, 2*n_fc].
CNN_PARAMS (dict): dictionary of parameters used for feature extraction.
CNN_PARAMS['batch_size'] (int): training mini batch size.
CNN_PARAMS['epochs'] (int): total number of training epochs/iterations.
CNN_PARAMS['droprate'] (float): dropout ratio.
CNN_PARAMS['learning_rate'] (float): model learning rate.
CNN_PARAMS['lr_decay'] (float): learning rate decay ratio.
CNN_PARAMS['l2_lambda'] (float): l2 regularization parameter.
CNN_PARAMS['momentum'] (float): momentum term for stochastic gradient descent optimization.
CNN_PARAMS['kernel_f'] (int): 1D kernel to operate on conv_1 layer for the SSVEP CNN.
CNN_PARAMS['n_ch'] (int): number of eeg channels
CNN_PARAMS['num_classes'] (int): number of SSVEP targets/classes
Returns:
(keras.Sequential): CNN model.
'''
model = Sequential()
model.add(Conv2D(2*CNN_PARAMS['n_ch'], kernel_size=(CNN_PARAMS['n_ch'], 1),
input_shape=(input_shape[0], input_shape[1], input_shape[2]),
padding="valid", kernel_regularizer=regularizers.l2(CNN_PARAMS['l2_lambda']),
kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(CNN_PARAMS['droprate']))
model.add(Conv2D(2*CNN_PARAMS['n_ch'], kernel_size=(1, CNN_PARAMS['kernel_f']),
kernel_regularizer=regularizers.l2(CNN_PARAMS['l2_lambda']), padding="valid",
kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(CNN_PARAMS['droprate']))
model.add(Flatten())
model.add(Dense(CNN_PARAMS['num_classes'], activation='softmax',
kernel_regularizer=regularizers.l2(CNN_PARAMS['l2_lambda']),
kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)))
return model
| 45.302682 | 122 | 0.646059 |
9ab2fe0309c7a4013b14876c816c249e51d5359a
| 3,355 |
py
|
Python
|
tests/builtin_support.py
|
pterjan/fastnumbers
|
c2511e7c51dd131e05d6c434dc0bd59d8e6b8919
|
[
"MIT"
] | null | null | null |
tests/builtin_support.py
|
pterjan/fastnumbers
|
c2511e7c51dd131e05d6c434dc0bd59d8e6b8919
|
[
"MIT"
] | null | null | null |
tests/builtin_support.py
|
pterjan/fastnumbers
|
c2511e7c51dd131e05d6c434dc0bd59d8e6b8919
|
[
"MIT"
] | null | null | null |
"""Supporting definitions for the Python regression tests."""
import platform
import unittest
from typing import Any, Callable, Dict, Optional, Tuple
__all__ = [
"run_with_locale",
"cpython_only",
]
# =======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr: str, *locales: str) -> Callable[..., Any]:
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
def inner(*args: Any, **kwds: Any) -> Any:
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except: # noqa
# cannot retrieve original locale, so do nothing
locale = orig_locale = None # type: ignore
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except: # noqa
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
# =======================================================================
# unittest integration.
def _id(obj: Any) -> Any:
return obj
def cpython_only(test: Any) -> Any:
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg: Optional[str] = None, **guards: bool) -> Callable[[Any], Any]:
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
msg = msg.format(" or ".join(sorted(guardnames.keys())))
return unittest.skip(msg)
def _parse_guards(guards: Dict[str, bool]) -> Tuple[Dict[str, bool], bool]:
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({"cpython": True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards: bool) -> bool:
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
| 32.892157 | 84 | 0.575857 |
04e1c105308f9ac78a38e7e7b8462d434a75e6d1
| 144,335 |
py
|
Python
|
core/controllers/acl_decorators_test.py
|
kaylahardie/oppia
|
e93ed02dfc7f654ef4fb62268c1a9b9d9ded30ec
|
[
"Apache-2.0"
] | null | null | null |
core/controllers/acl_decorators_test.py
|
kaylahardie/oppia
|
e93ed02dfc7f654ef4fb62268c1a9b9d9ded30ec
|
[
"Apache-2.0"
] | 1 |
2020-03-02T21:05:42.000Z
|
2020-03-03T07:09:51.000Z
|
core/controllers/acl_decorators_test.py
|
kaylahardie/oppia
|
e93ed02dfc7f654ef4fb62268c1a9b9d9ded30ec
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.domain.acl_decorators."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_services
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import suggestion_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.tests import test_utils
import feconf
import webapp2
import webtest
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class PlayExplorationDecoratorTests(test_utils.GenericTestBase):
"""Tests for play exploration decorator."""
user_email = 'user@example.com'
username = 'user'
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_play_exploration
def get(self, exploration_id):
return self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(PlayExplorationDecoratorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.user_email, self.username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_play_exploration/<exploration_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_can_not_access_exploration_with_disabled_exploration_ids(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_play_exploration/%s'
% (feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404)
def test_guest_can_access_published_exploration(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_play_exploration/%s' % self.published_exp_id)
self.assertEqual(response['exploration_id'], self.published_exp_id)
def test_guest_cannot_access_private_exploration(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_play_exploration/%s' % self.private_exp_id,
expected_status_int=404)
def test_admin_can_access_private_exploration(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_play_exploration/%s' % self.private_exp_id)
self.assertEqual(response['exploration_id'], self.private_exp_id)
self.logout()
def test_owner_can_access_private_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_play_exploration/%s' % self.private_exp_id)
self.assertEqual(response['exploration_id'], self.private_exp_id)
self.logout()
def test_logged_in_user_cannot_access_not_owned_exploration(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_play_exploration/%s' % self.private_exp_id,
expected_status_int=404)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class PlayCollectionDecoratorTests(test_utils.GenericTestBase):
"""Tests for play collection decorator."""
user_email = 'user@example.com'
username = 'user'
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
published_col_id = 'col_id_1'
private_col_id = 'col_id_2'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_play_collection
def get(self, collection_id):
return self.render_json({'collection_id': collection_id})
def setUp(self):
super(PlayCollectionDecoratorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.user_email, self.username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_play_collection/<collection_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
self.save_new_valid_collection(
self.published_col_id, self.owner_id,
exploration_id=self.published_col_id)
self.save_new_valid_collection(
self.private_col_id, self.owner_id,
exploration_id=self.private_col_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
rights_manager.publish_collection(self.owner, self.published_col_id)
def test_guest_can_access_published_collection(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_play_collection/%s' % self.published_col_id)
self.assertEqual(response['collection_id'], self.published_col_id)
def test_guest_cannot_access_private_collection(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_play_collection/%s' % self.private_col_id,
expected_status_int=404)
def test_admin_can_access_private_collection(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_play_collection/%s' % self.private_col_id)
self.assertEqual(response['collection_id'], self.private_col_id)
self.logout()
def test_owner_can_access_private_collection(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_play_collection/%s' % self.private_col_id)
self.assertEqual(response['collection_id'], self.private_col_id)
self.logout()
def test_logged_in_user_cannot_access_not_owned_private_collection(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_play_collection/%s' % self.private_col_id,
expected_status_int=404)
self.logout()
def test_cannot_access_collection_with_invalid_collection_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_play_collection/invalid_collection_id',
expected_status_int=404)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class EditCollectionDecoratorTests(test_utils.GenericTestBase):
"""Tests for can_edit_collection decorator."""
user_email = 'user@example.com'
username = 'user'
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
published_col_id = 'col_id_1'
private_col_id = 'col_id_2'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_collection
def get(self, collection_id):
return self.render_json({'collection_id': collection_id})
def setUp(self):
super(EditCollectionDecoratorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.user_email, self.username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
self.set_collection_editors([self.OWNER_USERNAME])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_edit_collection/<collection_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
self.save_new_valid_collection(
self.published_col_id, self.owner_id,
exploration_id=self.published_col_id)
self.save_new_valid_collection(
self.private_col_id, self.owner_id,
exploration_id=self.private_col_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
rights_manager.publish_collection(self.owner, self.published_col_id)
def test_can_not_edit_collection_with_invalid_collection_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_collection/invalid_col_id', expected_status_int=404)
self.logout()
def test_guest_cannot_edit_collection_via_json_handler(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_collection/%s' % self.published_col_id,
expected_status_int=401)
def test_guest_is_redirected_when_using_html_handler(self):
with self.swap(
self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE',
feconf.HANDLER_TYPE_HTML):
response = self.mock_testapp.get(
'/mock_edit_collection/%s' % self.published_col_id,
expect_errors=True)
self.assertEqual(response.status_int, 302)
def test_normal_user_cannot_edit_collection(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_collection/%s' % self.private_col_id,
expected_status_int=401)
self.logout()
def test_owner_can_edit_owned_collection(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_collection/%s' % self.private_col_id)
self.assertEqual(response['collection_id'], self.private_col_id)
self.logout()
def test_moderator_cannot_edit_private_collection(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_collection/%s' % self.private_col_id,
expected_status_int=401)
self.logout()
def test_moderator_can_edit_public_collection(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_collection/%s' % self.published_col_id)
self.assertEqual(response['collection_id'], self.published_col_id)
self.logout()
def test_admin_can_edit_any_private_collection(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_collection/%s' % self.private_col_id)
self.assertEqual(response['collection_id'], self.private_col_id)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class CreateExplorationDecoratorTests(test_utils.GenericTestBase):
"""Tests for can_create_exploration decorator."""
username = 'banneduser'
user_email = 'user@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_create_exploration
def get(self):
self.render_json({'success': True})
def setUp(self):
super(CreateExplorationDecoratorTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.user_email, self.username)
self.set_banned_users([self.username])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/create', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_banned_user_cannot_create_exploration(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/create', expected_status_int=401)
self.logout()
def test_normal_user_can_create_exploration(self):
self.login(self.EDITOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/create')
self.assertEqual(response['success'], True)
self.logout()
def test_guest_cannot_create_exploration_via_json_handler(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/create', expected_status_int=401)
def test_guest_is_redirected_when_using_html_handler(self):
with self.swap(
self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE',
feconf.HANDLER_TYPE_HTML):
response = self.mock_testapp.get('/mock/create', expect_errors=True)
self.assertEqual(response.status_int, 302)
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class CreateCollectionDecoratorTests(test_utils.GenericTestBase):
"""Tests for can_create_collection decorator."""
username = 'collectioneditor'
user_email = 'user@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_create_collection
def get(self):
self.render_json({'success': True})
def setUp(self):
super(CreateCollectionDecoratorTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.user_email, self.username)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_collection_editors([self.username])
self.set_admins([self.ADMIN_USERNAME])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/create', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_guest_cannot_create_collection_via_json_handler(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/create', expected_status_int=401)
def test_guest_is_redirected_when_using_html_handler(self):
with self.swap(
self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE',
feconf.HANDLER_TYPE_HTML):
response = self.mock_testapp.get('/mock/create', expect_errors=True)
self.assertEqual(response.status_int, 302)
def test_normal_user_cannot_create_collection(self):
self.login(self.EDITOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/create', expected_status_int=401)
self.logout()
def test_collection_editor_can_create_collection(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/create')
self.assertEqual(response['success'], True)
self.logout()
def test_admins_can_create_collection(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/create')
self.assertEqual(response['success'], True)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class AccessCreatorDashboardTests(test_utils.GenericTestBase):
"""Tests for can_access_creator_dashboard decorator."""
username = 'banneduser'
user_email = 'user@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_creator_dashboard
def get(self):
self.render_json({'success': True})
def setUp(self):
super(AccessCreatorDashboardTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.user_email, self.username)
self.set_banned_users([self.username])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/access', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_banned_user_cannot_access_editor_dashboard(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/access', expected_status_int=401)
self.logout()
def test_normal_user_can_access_editor_dashboard(self):
self.login(self.EDITOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/access')
self.assertEqual(response['success'], True)
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class CommentOnFeedbackThreadTests(test_utils.GenericTestBase):
"""Tests for can_comment_on_feedback_thread decorator."""
published_exp_id = 'exp_0'
private_exp_id = 'exp_1'
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_comment_on_feedback_thread
def get(self, thread_id):
self.render_json({'thread_id': thread_id})
def setUp(self):
super(CommentOnFeedbackThreadTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.viewer_email, self.viewer_username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.set_admins([self.ADMIN_USERNAME])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_comment_on_feedback_thread/<thread_id>',
self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_can_not_comment_on_feedback_threads_with_disabled_exp_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_comment_on_feedback_thread/exploration.%s.thread1'
% feconf.DISABLED_EXPLORATION_IDS[0],
expected_status_int=404)
self.logout()
def test_viewer_cannot_comment_on_feedback_for_private_exploration(self):
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_comment_on_feedback_thread/exploration.%s.thread1'
% self.private_exp_id, expected_status_int=401)
self.assertEqual(
response['error'], 'You do not have credentials to comment on '
'exploration feedback.')
self.logout()
def test_can_not_comment_on_feedback_threads_with_invalid_thread_id(self):
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_comment_on_feedback_thread/invalid_thread_id',
expected_status_int=400)
self.assertEqual(response['error'], 'Thread ID must contain a .')
self.logout()
def test_guest_cannot_comment_on_feedback_threads_via_json_handler(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_comment_on_feedback_thread/exploration.%s.thread1'
% (self.private_exp_id), expected_status_int=401)
self.get_json(
'/mock_comment_on_feedback_thread/exploration.%s.thread1'
% (self.published_exp_id), expected_status_int=401)
def test_guest_is_redirected_when_using_html_handler(self):
with self.swap(
self.MockHandler, 'GET_HANDLER_ERROR_RETURN_TYPE',
feconf.HANDLER_TYPE_HTML):
response = self.mock_testapp.get(
'/mock_comment_on_feedback_thread/exploration.%s.thread1'
% (self.private_exp_id), expect_errors=True)
self.assertEqual(response.status_int, 302)
response = self.mock_testapp.get(
'/mock_comment_on_feedback_thread/exploration.%s.thread1'
% (self.published_exp_id), expect_errors=True)
self.assertEqual(response.status_int, 302)
def test_owner_can_comment_on_feedback_for_private_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_comment_on_feedback_thread/exploration.%s.thread1'
% (self.private_exp_id))
self.logout()
def test_moderator_can_comment_on_feeback_for_public_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_comment_on_feedback_thread/exploration.%s.thread1'
% (self.published_exp_id))
self.logout()
def test_admin_can_comment_on_feeback_for_private_exploration(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_comment_on_feedback_thread/exploration.%s.thread1'
% (self.private_exp_id))
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class CreateFeedbackThreadTests(test_utils.GenericTestBase):
"""Tests for can_create_feedback_thread decorator."""
published_exp_id = 'exp_0'
private_exp_id = 'exp_1'
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_create_feedback_thread
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(CreateFeedbackThreadTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.viewer_email, self.viewer_username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.set_admins([self.ADMIN_USERNAME])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_create_feedback_thread/<exploration_id>',
self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_can_not_create_feedback_threads_with_disabled_exp_id(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_create_feedback_thread/%s'
% (feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404)
def test_viewer_cannot_create_feedback_for_private_exploration(self):
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_create_feedback_thread/%s' % self.private_exp_id,
expected_status_int=401)
self.assertEqual(
response['error'], 'You do not have credentials to create '
'exploration feedback.')
self.logout()
def test_guest_can_create_feedback_threads_for_public_exploration(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_create_feedback_thread/%s' % self.published_exp_id)
def test_owner_cannot_create_feedback_for_private_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_create_feedback_thread/%s' % self.private_exp_id)
self.logout()
def test_moderator_can_create_feeback_for_public_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_create_feedback_thread/%s' % self.published_exp_id)
self.logout()
def test_admin_can_create_feeback_for_private_exploration(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_create_feedback_thread/%s' % self.private_exp_id)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class ViewFeedbackThreadTests(test_utils.GenericTestBase):
"""Tests for can_view_feedback_thread decorator."""
published_exp_id = 'exp_0'
private_exp_id = 'exp_1'
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_view_feedback_thread
def get(self, thread_id):
self.render_json({'thread_id': thread_id})
def setUp(self):
super(ViewFeedbackThreadTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.viewer_email, self.viewer_username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.set_admins([self.ADMIN_USERNAME])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_view_feedback_thread/<thread_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_can_not_view_feedback_threads_with_disabled_exp_id(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_view_feedback_thread/exploration.%s.thread1'
% feconf.DISABLED_EXPLORATION_IDS[0],
expected_status_int=404)
def test_viewer_cannot_view_feedback_for_private_exploration(self):
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_view_feedback_thread/exploration.%s.thread1'
% self.private_exp_id, expected_status_int=401)
self.assertEqual(
response['error'], 'You do not have credentials to view '
'exploration feedback.')
self.logout()
def test_guest_can_view_feedback_threads_for_public_exploration(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_view_feedback_thread/exploration.%s.thread1'
% (self.published_exp_id))
def test_owner_cannot_view_feedback_for_private_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_view_feedback_thread/exploration.%s.thread1'
% (self.private_exp_id))
self.logout()
def test_moderator_can_view_feeback_for_public_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_view_feedback_thread/exploration.%s.thread1'
% (self.published_exp_id))
self.logout()
def test_admin_can_view_feeback_for_private_exploration(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_view_feedback_thread/exploration.%s.thread1'
% (self.private_exp_id))
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class ManageEmailDashboardTests(test_utils.GenericTestBase):
"""Tests for can_manage_email_dashboard decorator."""
query_id = 'query_id'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_manage_email_dashboard
def get(self):
return self.render_json({'success': 1})
@acl_decorators.can_manage_email_dashboard
def put(self, query_id):
return self.render_json({'query_id': query_id})
def setUp(self):
super(ManageEmailDashboardTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[
webapp2.Route('/mock/', self.MockHandler),
webapp2.Route('/mock/<query_id>', self.MockHandler)
],
debug=feconf.DEBUG,
))
def test_moderator_cannot_access_email_dashboard(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/', expected_status_int=401)
self.logout()
def test_admin_can_access_email_dashboard(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/')
self.assertEqual(response['success'], 1)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.mock_testapp.put('/mock/%s' % self.query_id)
self.assertEqual(response.status_int, 200)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class RateExplorationTests(test_utils.GenericTestBase):
"""Tests for can_rate_exploration decorator."""
username = 'user'
user_email = 'user@example.com'
exp_id = 'exp_id'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_rate_exploration
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(RateExplorationTests, self).setUp()
self.signup(self.user_email, self.username)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/<exploration_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_guest_cannot_give_rating(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.exp_id, expected_status_int=401)
def test_normal_user_can_give_rating(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.exp_id)
self.assertEqual(response['exploration_id'], self.exp_id)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class AccessModeratorPageTests(test_utils.GenericTestBase):
username = 'user'
user_email = 'user@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_moderator_page
def get(self):
return self.render_json({'success': 1})
def setUp(self):
super(AccessModeratorPageTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.user_email, self.username)
self.set_admins([self.ADMIN_USERNAME])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_normal_user_cannot_access_moderator_page(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/', expected_status_int=401)
self.logout()
def test_admin_can_access_moderator_page(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/')
self.assertEqual(response['success'], 1)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class FlagExplorationTests(test_utils.GenericTestBase):
"""Tests for can_flag_exploration decorator."""
username = 'user'
user_email = 'user@example.com'
exp_id = 'exp_id'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_flag_exploration
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(FlagExplorationTests, self).setUp()
self.signup(self.user_email, self.username)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/<exploration_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_guest_cannot_flag_exploration(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.exp_id, expected_status_int=401)
def test_normal_user_can_flag_exploration(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.exp_id)
self.assertEqual(response['exploration_id'], self.exp_id)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class SubscriptionToUsersTests(test_utils.GenericTestBase):
"""Tests for can_subscribe_to_users decorator."""
username = 'user'
user_email = 'user@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_subscribe_to_users
def get(self):
self.render_json({'success': True})
def setUp(self):
super(SubscriptionToUsersTests, self).setUp()
self.signup(self.user_email, self.username)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_guest_cannot_subscribe_to_users(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/', expected_status_int=401)
def test_normal_user_can_subscribe_to_users(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/')
self.assertEqual(response['success'], True)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class SendModeratorEmailsTests(test_utils.GenericTestBase):
username = 'user'
user_email = 'user@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_send_moderator_emails
def get(self):
return self.render_json({'success': 1})
def setUp(self):
super(SendModeratorEmailsTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.user_email, self.username)
self.set_admins([self.ADMIN_USERNAME])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_normal_user_cannot_send_moderator_emails(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/', expected_status_int=401)
self.logout()
def test_admin_can_send_moderator_emails(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/')
self.assertEqual(response['success'], 1)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class VoiceoverExplorationTests(test_utils.GenericTestBase):
"""Tests for can_voiceover_exploration decorator."""
role = rights_manager.ROLE_VOICE_ARTIST
username = 'user'
user_email = 'user@example.com'
banned_username = 'banneduser'
banned_user_email = 'banneduser@example.com'
published_exp_id_1 = 'exp_1'
published_exp_id_2 = 'exp_2'
private_exp_id_1 = 'exp_3'
private_exp_id_2 = 'exp_4'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_voiceover_exploration
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(VoiceoverExplorationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.user_email, self.username)
self.signup(self.banned_user_email, self.banned_username)
self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.voice_artist_id = self.get_user_id_from_email(
self.VOICE_ARTIST_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.set_admins([self.ADMIN_USERNAME])
self.set_banned_users([self.banned_username])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/<exploration_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id_1, self.owner_id)
self.save_new_valid_exploration(
self.published_exp_id_2, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id_1, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id_2, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id_1)
rights_manager.publish_exploration(self.owner, self.published_exp_id_2)
rights_manager.assign_role_for_exploration(
self.owner, self.published_exp_id_1, self.voice_artist_id,
self.role)
rights_manager.assign_role_for_exploration(
self.owner, self.private_exp_id_1, self.voice_artist_id, self.role)
def test_banned_user_cannot_voiceover_exploration(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.private_exp_id_1, expected_status_int=401)
self.logout()
def test_owner_can_voiceover_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.private_exp_id_1)
self.assertEqual(response['exploration_id'], self.private_exp_id_1)
self.logout()
def test_moderator_can_voiceover_public_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.published_exp_id_1)
self.assertEqual(response['exploration_id'], self.published_exp_id_1)
self.logout()
def test_moderator_cannot_voiceover_private_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.private_exp_id_1, expected_status_int=401)
self.logout()
def test_admin_can_voiceover_private_exploration(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.private_exp_id_1)
self.assertEqual(response['exploration_id'], self.private_exp_id_1)
self.logout()
def test_voice_artist_can_only_voiceover_assigned_public_exploration(self):
self.login(self.VOICE_ARTIST_EMAIL)
# Checking voice artist can voiceover assigned public exploration.
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.published_exp_id_1)
self.assertEqual(response['exploration_id'], self.published_exp_id_1)
# Checking voice artist cannot voiceover public exploration which he/she
# is not assigned for.
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.published_exp_id_2, expected_status_int=401)
self.logout()
def test_voice_artist_can_only_voiceover_assigned_private_exploration(self):
self.login(self.VOICE_ARTIST_EMAIL)
# Checking voice artist can voiceover assigned private exploration.
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.private_exp_id_1)
self.assertEqual(response['exploration_id'], self.private_exp_id_1)
# Checking voice artist cannot voiceover private exploration which
# he/she is not assigned for.
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.private_exp_id_2, expected_status_int=401)
self.logout()
def test_user_without_voice_artist_role_of_exploration_cannot_voiceover_public_exploration(self): # pylint: disable=line-too-long
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.published_exp_id_1, expected_status_int=401)
self.logout()
def test_user_without_voice_artist_role_of_exploration_cannot_voiceover_private_exploration(self): # pylint: disable=line-too-long
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.private_exp_id_1, expected_status_int=401)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class EditExplorationTests(test_utils.GenericTestBase):
"""Tests for can_edit_exploration decorator."""
username = 'banneduser'
user_email = 'user@example.com'
published_exp_id = 'exp_0'
private_exp_id = 'exp_1'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_exploration
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(EditExplorationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.user_email, self.username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.set_admins([self.ADMIN_USERNAME])
self.set_banned_users([self.username])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_edit_exploration/<exploration_id>',
self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_can_not_edit_exploration_with_invalid_exp_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_exploration/invalid_exp_id',
expected_status_int=404)
self.logout()
def test_banned_user_cannot_edit_exploration(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_exploration/%s' % self.private_exp_id,
expected_status_int=401)
self.logout()
def test_owner_can_edit_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_exploration/%s' % self.private_exp_id)
self.assertEqual(response['exploration_id'], self.private_exp_id)
self.logout()
def test_moderator_can_edit_public_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_exploration/%s' % self.published_exp_id)
self.assertEqual(response['exploration_id'], self.published_exp_id)
self.logout()
def test_moderator_cannot_edit_private_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_exploration/%s' % self.private_exp_id,
expected_status_int=401)
self.logout()
def test_admin_can_edit_private_exploration(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_exploration/%s' % self.private_exp_id)
self.assertEqual(response['exploration_id'], self.private_exp_id)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class ManageOwnAccountTests(test_utils.GenericTestBase):
"""Tests for decorator can_manage_own_account."""
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
username = 'user'
user_email = 'user@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_manage_own_account
def get(self):
return self.render_json({'success': 1})
def setUp(self):
super(ManageOwnAccountTests, self).setUp()
self.signup(self.banned_user_email, self.banned_user)
self.signup(self.user_email, self.username)
self.set_banned_users([self.banned_user])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_banned_user_cannot_update_preferences(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/', expected_status_int=401)
self.logout()
def test_normal_user_can_manage_preferences(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/')
self.assertEqual(response['success'], 1)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class UploadExplorationTests(test_utils.GenericTestBase):
"""Tests for can_upload_exploration decorator."""
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_upload_exploration
def get(self):
return self.render_json({})
def setUp(self):
super(UploadExplorationTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock_upload_exploration/', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_super_admin_can_upload_explorations(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_upload_exploration/')
self.logout()
def test_normal_user_cannot_upload_explorations(self):
self.login(self.EDITOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_upload_exploration/', expected_status_int=401)
self.assertEqual(
response['error'],
'You do not have credentials to upload exploration.')
self.logout()
def test_guest_cannot_upload_explorations(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_upload_exploration/', expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class DeleteExplorationTests(test_utils.GenericTestBase):
"""Tests for can_delete_exploration decorator."""
private_exp_id = 'exp_0'
published_exp_id = 'exp_1'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_delete_exploration
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(DeleteExplorationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.set_moderators([self.MODERATOR_USERNAME])
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_delete_exploration/<exploration_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_guest_can_not_delete_exploration(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_delete_exploration/%s' % self.private_exp_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
def test_owner_can_delete_owned_private_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_delete_exploration/%s' % self.private_exp_id)
self.assertEqual(response['exploration_id'], self.private_exp_id)
self.logout()
def test_moderator_can_delete_published_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_delete_exploration/%s' % self.published_exp_id)
self.assertEqual(response['exploration_id'], self.published_exp_id)
self.logout()
def test_owner_cannot_delete_published_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_delete_exploration/%s' % self.published_exp_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'User %s does not have permissions to delete exploration %s'
% (self.owner_id, self.published_exp_id))
self.logout()
def test_moderator_cannot_delete_private_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_delete_exploration/%s' % self.private_exp_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'User %s does not have permissions to delete exploration %s'
% (self.moderator_id, self.private_exp_id))
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class SuggestChangesToExplorationTests(test_utils.GenericTestBase):
"""Tests for can_suggest_changes_to_exploration decorator."""
username = 'user'
user_email = 'user@example.com'
banned_username = 'banneduser'
banned_user_email = 'banned@example.com'
exploration_id = 'exp_id'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_suggest_changes_to_exploration
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(SuggestChangesToExplorationTests, self).setUp()
self.signup(self.user_email, self.username)
self.signup(self.banned_user_email, self.banned_username)
self.set_banned_users([self.banned_username])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/<exploration_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_banned_user_cannot_suggest_changes(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.exploration_id, expected_status_int=401)
self.logout()
def test_normal_user_can_suggest_changes(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.exploration_id)
self.assertEqual(response['exploration_id'], self.exploration_id)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class SuggestChangesDecoratorsTests(test_utils.GenericTestBase):
"""Tests for can_suggest_changes decorator."""
username = 'user'
user_email = 'user@example.com'
banned_username = 'banneduser'
banned_user_email = 'banned@example.com'
exploration_id = 'exp_id'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_suggest_changes
def get(self):
self.render_json({})
def setUp(self):
super(SuggestChangesDecoratorsTests, self).setUp()
self.signup(self.user_email, self.username)
self.signup(self.banned_user_email, self.banned_username)
self.set_banned_users([self.banned_username])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_banned_user_cannot_suggest_changes(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock', expected_status_int=401)
self.logout()
def test_normal_user_can_suggest_changes(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock')
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class ResubmitSuggestionDecoratorsTests(test_utils.GenericTestBase):
"""Tests for can_resubmit_suggestion decorator."""
owner_username = 'owner'
owner_email = 'owner@example.com'
author_username = 'author'
author_email = 'author@example.com'
username = 'user'
user_email = 'user@example.com'
TARGET_TYPE = 'exploration'
SUGGESTION_TYPE = 'edit_exploration_state_content'
exploration_id = 'exp_id'
target_version_id = 1
change_dict = {
'cmd': 'edit_state_property',
'property_name': 'content',
'state_name': 'Introduction',
'new_value': ''
}
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_resubmit_suggestion
def get(self, suggestion_id):
self.render_json({'suggestion_id': suggestion_id})
def setUp(self):
super(ResubmitSuggestionDecoratorsTests, self).setUp()
self.signup(self.author_email, self.author_username)
self.signup(self.user_email, self.username)
self.signup(self.owner_email, self.owner_username)
self.author_id = self.get_user_id_from_email(self.author_email)
self.owner_id = self.get_user_id_from_email(self.owner_email)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/<suggestion_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_default_exploration(self.exploration_id, self.owner_id)
suggestion_services.create_suggestion(
self.SUGGESTION_TYPE, self.TARGET_TYPE,
self.exploration_id, self.target_version_id,
self.author_id,
self.change_dict, '')
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id),
('target_id', self.exploration_id)])[0]
self.suggestion_id = suggestion.suggestion_id
def test_author_can_resubmit_suggestion(self):
self.login(self.author_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.suggestion_id)
self.assertEqual(response['suggestion_id'], self.suggestion_id)
self.logout()
def test_non_author_cannot_resubmit_suggestion(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.suggestion_id, expected_status_int=401)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class DecoratorForAcceptingSuggestionTests(test_utils.GenericTestBase):
"""Tests for get_decorator_for_accepting_suggestion decorator."""
AUTHOR_USERNAME = 'author'
AUTHOR_EMAIL = 'author@example.com'
VIEWER_USERNAME = 'user'
VIEWER_EMAIL = 'user@example.com'
TARGET_TYPE = 'exploration'
SUGGESTION_TYPE = 'edit_exploration_state_content'
EXPLORATION_ID = 'exp_id'
TARGET_VERSION_ID = 1
CHANGE_DICT = {
'cmd': 'edit_state_property',
'property_name': 'content',
'state_name': 'Introduction',
'new_value': ''
}
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.get_decorator_for_accepting_suggestion(
acl_decorators.can_edit_exploration)
def get(self, target_id, suggestion_id):
self.render_json({
'target_id': target_id,
'suggestion_id': suggestion_id
})
def setUp(self):
super(DecoratorForAcceptingSuggestionTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, self.AUTHOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_accept_suggestion/<target_id>/<suggestion_id>',
self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_default_exploration(self.EXPLORATION_ID, self.owner_id)
rights_manager.publish_exploration(self.owner, self.EXPLORATION_ID)
suggestion_services.create_suggestion(
self.SUGGESTION_TYPE, self.TARGET_TYPE,
self.EXPLORATION_ID, self.TARGET_VERSION_ID,
self.author_id,
self.CHANGE_DICT, '')
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id),
('target_id', self.EXPLORATION_ID)])[0]
self.suggestion_id = suggestion.suggestion_id
def test_guest_cannot_accept_suggestion(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_accept_suggestion/%s/%s'
% (self.EXPLORATION_ID, self.suggestion_id),
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
def test_owner_can_accept_suggestion(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_accept_suggestion/%s/%s'
% (self.EXPLORATION_ID, self.suggestion_id))
self.assertEqual(response['suggestion_id'], self.suggestion_id)
self.assertEqual(response['target_id'], self.EXPLORATION_ID)
self.logout()
def test_viewer_cannot_accept_suggestion(self):
self.login(self.VIEWER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_accept_suggestion/%s/%s'
% (self.EXPLORATION_ID, self.suggestion_id),
expected_status_int=401)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class PublishExplorationTests(test_utils.GenericTestBase):
"""Tests for can_publish_exploration decorator."""
private_exp_id = 'exp_0'
public_exp_id = 'exp_1'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_publish_exploration
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(PublishExplorationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_moderators([self.MODERATOR_USERNAME])
self.set_admins([self.ADMIN_USERNAME])
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_publish_exploration/<exploration_id>',
self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.public_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.public_exp_id)
def test_cannot_publish_exploration_with_invalid_exp_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_publish_exploration/invalid_exp_id',
expected_status_int=404)
self.logout()
def test_owner_can_publish_owned_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_publish_exploration/%s' % self.private_exp_id)
self.assertEqual(response['exploration_id'], self.private_exp_id)
self.logout()
def test_already_published_exploration_cannot_be_published(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_publish_exploration/%s' % self.public_exp_id,
expected_status_int=401)
self.logout()
def test_moderator_cannot_publish_private_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_publish_exploration/%s' % self.private_exp_id,
expected_status_int=401)
self.logout()
def test_admin_can_publish_any_exploration(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_publish_exploration/%s' % self.private_exp_id)
self.assertEqual(response['exploration_id'], self.private_exp_id)
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class ModifyExplorationRolesTests(test_utils.GenericTestBase):
"""Tests for can_modify_exploration_roles decorator."""
private_exp_id = 'exp_0'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_modify_exploration_roles
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(ModifyExplorationRolesTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_moderators([self.MODERATOR_USERNAME])
self.set_admins([self.ADMIN_USERNAME])
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/<exploration_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
def test_owner_can_modify_exploration_roles(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.private_exp_id)
self.assertEqual(response['exploration_id'], self.private_exp_id)
self.logout()
def test_moderator_cannot_modify_roles_of_unowned_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.private_exp_id, expected_status_int=401)
self.logout()
def test_admin_can_modify_roles_of_any_exploration(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.private_exp_id)
self.assertEqual(response['exploration_id'], self.private_exp_id)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class CollectionPublishStatusTests(test_utils.GenericTestBase):
"""Tests can_publish_collection and can_unpublish_collection decorators."""
user_email = 'user@example.com'
username = 'user'
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
published_col_id = 'col_id_1'
private_col_id = 'col_id_2'
class MockPublishHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_publish_collection
def get(self, collection_id):
return self.render_json({'collection_id': collection_id})
class MockUnpublishHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_unpublish_collection
def get(self, collection_id):
return self.render_json({'collection_id': collection_id})
def setUp(self):
super(CollectionPublishStatusTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.user_email, self.username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
self.set_collection_editors([self.OWNER_USERNAME])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[
webapp2.Route(
'/mock_publish_collection/<collection_id>',
self.MockPublishHandler),
webapp2.Route(
'/mock_unpublish_collection/<collection_id>',
self.MockUnpublishHandler)
],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
self.save_new_valid_collection(
self.published_col_id, self.owner_id,
exploration_id=self.published_col_id)
self.save_new_valid_collection(
self.private_col_id, self.owner_id,
exploration_id=self.private_col_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
rights_manager.publish_collection(self.owner, self.published_col_id)
def test_cannot_publish_collection_with_invalid_exp_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_publish_collection/invalid_col_id',
expected_status_int=404)
self.logout()
def test_cannot_unpublish_collection_with_invalid_exp_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_unpublish_collection/invalid_col_id',
expected_status_int=404)
self.logout()
def test_owner_can_publish_collection(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_publish_collection/%s' % self.private_col_id)
self.assertEqual(response['collection_id'], self.private_col_id)
self.logout()
def test_owner_cannot_unpublish_public_collection(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_unpublish_collection/%s' % self.published_col_id,
expected_status_int=401)
self.logout()
def test_moderator_can_unpublish_public_collection(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_unpublish_collection/%s' % self.published_col_id)
self.assertEqual(response['collection_id'], self.published_col_id)
self.logout()
def test_admin_can_publish_any_collection(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_publish_collection/%s' % self.private_col_id)
self.assertEqual(response['collection_id'], self.private_col_id)
self.logout()
def test_admin_cannot_publish_already_published_collection(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_publish_collection/%s' % self.published_col_id,
expected_status_int=401)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class AccessLearnerDashboardDecoratorTests(test_utils.GenericTestBase):
"""Tests the decorator can_access_learner_dashboard."""
user = 'user'
user_email = 'user@example.com'
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_learner_dashboard
def get(self):
return self.render_json({})
def setUp(self):
super(AccessLearnerDashboardDecoratorTests, self).setUp()
self.signup(self.user_email, self.user)
self.signup(self.banned_user_email, self.banned_user)
self.set_banned_users([self.banned_user])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_banned_user_is_redirected(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/', expected_status_int=401)
self.logout()
def test_exploration_editor_can_access_learner_dashboard(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock/')
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class EditTopicDecoratorTests(test_utils.GenericTestBase):
"""Tests the decorator can_edit_topic."""
manager_username = 'topicmanager'
manager_email = 'topicmanager@example.com'
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
topic_id = 'topic_1'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_topic
def get(self, topic_id):
self.render_json({'topic_id': topic_id})
def setUp(self):
super(EditTopicDecoratorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.manager_email, self.manager_username)
self.signup(self.viewer_email, self.viewer_username)
self.set_admins([self.ADMIN_USERNAME])
self.set_topic_managers([self.manager_username])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.manager_id = self.get_user_id_from_email(self.manager_email)
self.viewer_id = self.get_user_id_from_email(self.viewer_email)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.manager = user_services.UserActionsInfo(self.manager_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock_edit_topic/<topic_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.topic_id = topic_services.get_new_topic_id()
self.save_new_topic(
self.topic_id, self.viewer_id, name='Name',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
topic_services.create_new_topic_rights(self.topic_id, self.admin_id)
topic_services.assign_role(
self.admin, self.manager, topic_domain.ROLE_MANAGER, self.topic_id)
def test_can_not_edit_topic_with_invalid_topic_id(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_topic/invalid_topic_id', expected_status_int=404)
self.logout()
def test_admin_can_edit_topic(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_topic/%s' % self.topic_id)
self.assertEqual(response['topic_id'], self.topic_id)
self.logout()
def test_topic_manager_can_edit_topic(self):
self.login(self.manager_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_topic/%s' % self.topic_id)
self.assertEqual(response['topic_id'], self.topic_id)
self.logout()
def test_normal_user_cannot_edit_topic(self):
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_topic/%s' % self.topic_id, expected_status_int=401)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class EditStoryDecoratorTests(test_utils.GenericTestBase):
"""Tests the decorator can_edit_story."""
manager_username = 'topicmanager'
manager_email = 'topicmanager@example.com'
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_story
def get(self, story_id):
self.render_json({'story_id': story_id})
def setUp(self):
super(EditStoryDecoratorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock_edit_story/<story_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.story_id = story_services.get_new_story_id()
self.topic_id = topic_services.get_new_topic_id()
self.save_new_story(self.story_id, self.admin_id, self.topic_id)
self.save_new_topic(
self.topic_id, self.admin_id, name='Name',
description='Description', canonical_story_ids=[self.story_id],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
topic_services.create_new_topic_rights(self.topic_id, self.admin_id)
def test_can_not_edit_story_with_invalid_story_id(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_story/story_id_new', expected_status_int=404)
self.logout()
def test_can_not_edit_story_with_invalid_topic_id(self):
self.login(self.ADMIN_EMAIL)
story_id = story_services.get_new_story_id()
topic_id = topic_services.get_new_topic_id()
self.save_new_story(story_id, self.admin_id, topic_id)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_story/%s' % story_id, expected_status_int=404)
self.logout()
def test_admin_can_edit_story(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_story/%s' % self.story_id)
self.assertEqual(response['story_id'], self.story_id)
self.logout()
def test_topic_manager_can_edit_story(self):
self.signup(self.manager_email, self.manager_username)
self.set_topic_managers([self.manager_username])
manager_id = self.get_user_id_from_email(self.manager_email)
manager = user_services.UserActionsInfo(manager_id)
topic_services.assign_role(
self.admin, manager, topic_domain.ROLE_MANAGER, self.topic_id)
self.login(self.manager_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_story/%s' % self.story_id)
self.assertEqual(response['story_id'], self.story_id)
self.logout()
def test_normal_user_cannot_edit_story(self):
self.signup(self.viewer_email, self.viewer_username)
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_story/%s' % self.story_id, expected_status_int=401)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class AddStoryToTopicTests(test_utils.GenericTestBase):
"""Tests for decorator can_add_new_story_to_topic."""
manager_username = 'topicmanager'
manager_email = 'topicmanager@example.com'
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
topic_id = 'topic_1'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_add_new_story_to_topic
def get(self, topic_id):
self.render_json({'topic_id': topic_id})
def setUp(self):
super(AddStoryToTopicTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.manager_email, self.manager_username)
self.signup(self.viewer_email, self.viewer_username)
self.set_admins([self.ADMIN_USERNAME])
self.set_topic_managers([self.manager_username])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.manager_id = self.get_user_id_from_email(self.manager_email)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.manager = user_services.UserActionsInfo(self.manager_id)
self.viewer_id = self.get_user_id_from_email(self.viewer_email)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_add_story_to_topic/<topic_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.topic_id = topic_services.get_new_topic_id()
self.save_new_topic(
self.topic_id, self.viewer_id, name='Name',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
topic_services.create_new_topic_rights(self.topic_id, self.admin_id)
topic_services.assign_role(
self.admin, self.manager, topic_domain.ROLE_MANAGER, self.topic_id)
def test_can_not_add_story_to_topic_with_invalid_topic_id(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_add_story_to_topic/invalid_topic_id',
expected_status_int=404)
self.logout()
def test_admin_can_add_story_to_topic(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_add_story_to_topic/%s' % self.topic_id)
self.assertEqual(response['topic_id'], self.topic_id)
self.logout()
def test_topic_manager_cannot_add_story_to_topic_with_invalid_topic_id(
self):
self.login(self.manager_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_add_story_to_topic/incorrect_id',
expected_status_int=404)
self.logout()
def test_topic_manager_can_add_story_to_topic(self):
self.login(self.manager_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_add_story_to_topic/%s' % self.topic_id)
self.assertEqual(response['topic_id'], self.topic_id)
self.logout()
def test_normal_user_cannot_add_story_to_topic(self):
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_add_story_to_topic/%s' % self.topic_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'You do not have credentials to add a story to this topic.')
self.logout()
def test_guest_cannot_add_story_to_topic(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_add_story_to_topic/%s' % self.topic_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class StoryViewerTests(test_utils.GenericTestBase):
"""Tests for decorator can_access_story_viewer_page."""
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
class MockDataHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_story_viewer_page
def get(self, story_url_fragment):
self.render_json({'story_url_fragment': story_url_fragment})
class MockPageHandler(base.BaseHandler):
@acl_decorators.can_access_story_viewer_page
def get(self, _):
self.render_template('story-viewer-page.mainpage.html')
def setUp(self):
super(StoryViewerTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.banned_user_email, self.banned_user)
self.set_banned_users([self.banned_user])
story_data_url = (
'/mock_story_data/<classroom_url_fragment>/'
'<topic_url_fragment>/<story_url_fragment>')
story_page_url = (
'/mock_story_page/<classroom_url_fragment>/'
'<topic_url_fragment>/story/<story_url_fragment>')
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[
webapp2.Route(story_data_url, self.MockDataHandler),
webapp2.Route(story_page_url, self.MockPageHandler)
],
debug=feconf.DEBUG,
))
self.topic_id = topic_services.get_new_topic_id()
self.story_id = story_services.get_new_story_id()
self.story_url_fragment = 'story-frag'
self.save_new_story(
self.story_id, self.admin_id, self.topic_id,
url_fragment=self.story_url_fragment)
subtopic_1 = topic_domain.Subtopic.create_default_subtopic(
1, 'Subtopic Title 1')
subtopic_1.skill_ids = ['skill_id_1']
subtopic_1.url_fragment = 'sub-one-frag'
self.save_new_topic(
self.topic_id, self.admin_id, name='Name',
description='Description', canonical_story_ids=[self.story_id],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[subtopic_1], next_subtopic_id=2)
def test_cannot_access_non_existent_story(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_story_data/staging/topic/non-existent-frag',
expected_status_int=404)
def test_cannot_access_story_when_topic_is_not_published(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_story_data/staging/topic/%s'
% self.story_url_fragment,
expected_status_int=404)
def test_cannot_access_story_when_story_is_not_published(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_story_data/staging/topic/%s'
% self.story_url_fragment,
expected_status_int=404)
def test_can_access_story_when_story_and_topic_are_published(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_story_data/staging/topic/%s'
% self.story_url_fragment,
expected_status_int=200)
def test_can_access_story_when_all_url_fragments_are_valid(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_html_response(
'/mock_story_page/staging/topic/story/%s'
% self.story_url_fragment,
expected_status_int=200)
def test_redirect_to_story_page_if_story_url_fragment_is_invalid(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_story_page/staging/topic/story/000',
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/staging/topic/story',
response.headers['location'])
def test_redirect_to_correct_url_if_abbreviated_topic_is_invalid(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_story_page/staging/invalid-topic/story/%s'
% self.story_url_fragment,
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/staging/topic/story/%s'
% self.story_url_fragment,
response.headers['location'])
def test_redirect_with_correct_classroom_name_in_url(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_story_page/math/topic/story/%s'
% self.story_url_fragment,
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/staging/topic/story/%s'
% self.story_url_fragment,
response.headers['location'])
def test_redirect_lowercase_story_url_fragment(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
topic_services.publish_story(
self.topic_id, self.story_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_story_page/staging/topic/story/Story-frag',
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/staging/topic/story/story-frag',
response.headers['location'])
class SubtopicViewerTests(test_utils.GenericTestBase):
"""Tests for decorator can_access_subtopic_viewer_page."""
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
class MockDataHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_subtopic_viewer_page
def get(self, unused_topic_url_fragment, subtopic_url_fragment):
self.render_json({'subtopic_url_fragment': subtopic_url_fragment})
class MockPageHandler(base.BaseHandler):
@acl_decorators.can_access_subtopic_viewer_page
def get(self, unused_topic_url_fragment, unused_subtopic_url_fragment):
self.render_template('subtopic-viewer-page.mainpage.html')
def setUp(self):
super(SubtopicViewerTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.banned_user_email, self.banned_user)
self.set_banned_users([self.banned_user])
subtopic_data_url = (
'/mock_subtopic_data/<classroom_url_fragment>/'
'<topic_url_fragment>/<subtopic_url_fragment>')
subtopic_page_url = (
'/mock_subtopic_page/<classroom_url_fragment>/'
'<topic_url_fragment>/revision/<subtopic_url_fragment>')
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[
webapp2.Route(subtopic_data_url, self.MockDataHandler),
webapp2.Route(subtopic_page_url, self.MockPageHandler)
],
debug=feconf.DEBUG,
))
self.topic_id = topic_services.get_new_topic_id()
subtopic_1 = topic_domain.Subtopic.create_default_subtopic(
1, 'Subtopic Title 1')
subtopic_1.skill_ids = ['skill_id_1']
subtopic_1.url_fragment = 'sub-one-frag'
subtopic_2 = topic_domain.Subtopic.create_default_subtopic(
2, 'Subtopic Title 2')
subtopic_2.skill_ids = ['skill_id_2']
subtopic_2.url_fragment = 'sub-two-frag'
self.subtopic_page_1 = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
1, self.topic_id))
subtopic_page_services.save_subtopic_page(
self.admin_id, self.subtopic_page_1, 'Added subtopic',
[topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'subtopic_id': 1,
'title': 'Sample'
})]
)
self.save_new_topic(
self.topic_id, self.admin_id, name='topic name',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[subtopic_1, subtopic_2], next_subtopic_id=3,
url_fragment='topic-frag')
def test_cannot_access_non_existent_subtopic(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_subtopic_data/staging/topic-frag/non-existent-frag',
expected_status_int=404)
def test_cannot_access_subtopic_when_topic_is_not_published(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_subtopic_data/staging/topic-frag/sub-one-frag',
expected_status_int=404)
def test_can_access_subtopic_when_topic_is_published(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_subtopic_data/staging/topic-frag/sub-one-frag',
expected_status_int=200)
def test_can_access_subtopic_when_all_url_fragments_are_valid(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_html_response(
'/mock_subtopic_page/staging/topic-frag/revision/sub-one-frag',
expected_status_int=200)
def test_fall_back_to_revision_page_if_subtopic_url_frag_is_invalid(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_subtopic_page/staging/topic-frag/revision/000',
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/staging/topic-frag/revision',
response.headers['location'])
def test_redirect_to_classroom_if_abbreviated_topic_is_invalid(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_subtopic_page/math/invalid-topic/revision/sub-one-frag',
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/math',
response.headers['location'])
def test_redirect_with_correct_classroom_name_in_url(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_subtopic_page/math/topic-frag/revision/sub-one-frag',
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/staging/topic-frag/revision'
'/sub-one-frag',
response.headers['location'])
def test_redirect_with_lowercase_subtopic_url_fragment(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_subtopic_page/staging/topic-frag/revision/Sub-One-Frag',
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/staging/topic-frag/revision'
'/sub-one-frag',
response.headers['location'])
class TopicViewerTests(test_utils.GenericTestBase):
"""Tests for decorator can_access_topic_viewer_page."""
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
class MockDataHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_topic_viewer_page
def get(self, topic_name):
self.render_json({'topic_name': topic_name})
class MockPageHandler(base.BaseHandler):
@acl_decorators.can_access_topic_viewer_page
def get(self, unused_topic_name):
self.render_template('topic-viewer-page.mainpage.html')
def setUp(self):
super(TopicViewerTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.banned_user_email, self.banned_user)
self.set_banned_users([self.banned_user])
topic_data_url = (
'/mock_topic_data/<classroom_url_fragment>/<topic_url_fragment>')
topic_page_url = (
'/mock_topic_page/<classroom_url_fragment>/<topic_url_fragment>')
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[
webapp2.Route(topic_data_url, self.MockDataHandler),
webapp2.Route(topic_page_url, self.MockPageHandler)
],
debug=feconf.DEBUG,
))
self.topic_id = topic_services.get_new_topic_id()
subtopic_1 = topic_domain.Subtopic.create_default_subtopic(
1, 'Subtopic Title 1')
subtopic_1.skill_ids = ['skill_id_1']
subtopic_1.url_fragment = 'sub-one-frag'
self.save_new_topic(
self.topic_id, self.admin_id, name='Name',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[subtopic_1], next_subtopic_id=2)
def test_cannot_access_non_existent_topic(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_topic_data/staging/invalid-topic',
expected_status_int=404)
def test_cannot_access_unpublished_topic(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_topic_data/staging/topic',
expected_status_int=404)
def test_can_access_published_topic(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_topic_data/staging/topic',
expected_status_int=200)
def test_can_access_topic_when_all_url_fragments_are_valid(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_html_response(
'/mock_topic_page/staging/topic',
expected_status_int=200)
def test_redirect_to_classroom_if_abbreviated_topic_is_invalid(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_topic_page/math/invalid-topic',
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/math',
response.headers['location'])
def test_redirect_with_correct_classroom_name_in_url(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_topic_page/math/topic',
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/staging/topic',
response.headers['location'])
def test_redirect_with_lowercase_topic_url_fragment(self):
topic_services.publish_topic(self.topic_id, self.admin_id)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_html_response(
'/mock_topic_page/staging/TOPIC',
expected_status_int=302)
self.assertEqual(
'http://localhost/learn/staging/topic',
response.headers['location'])
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class CreateSkillTests(test_utils.GenericTestBase):
"""Tests for decorator can_create_skill."""
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_create_skill
def get(self):
self.render_json({})
def setUp(self):
super(CreateSkillTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.banned_user_email, self.banned_user)
self.set_banned_users([self.banned_user])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock_create_skill', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_admin_can_create_skill(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_create_skill')
self.logout()
def test_banned_user_cannot_create_skill(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_create_skill', expected_status_int=401)
self.assertEqual(
response['error'],
'You do not have credentials to create a skill.')
self.logout()
def test_guest_cannot_add_create_skill(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_create_skill', expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class ManageQuestionSkillStatusTests(test_utils.GenericTestBase):
"""Tests for decorator can_manage_question_skill_status."""
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
skill_id = '1'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_manage_question_skill_status
def get(self, skill_id):
self.render_json({'skill_id': skill_id})
def setUp(self):
super(ManageQuestionSkillStatusTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.viewer_email, self.viewer_username)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_manage_question_skill_status/<skill_id>',
self.MockHandler)],
debug=feconf.DEBUG,
))
self.question_id = question_services.get_new_question_id()
self.question = self.save_new_question(
self.question_id, self.admin_id,
self._create_valid_question_data('ABC'), [self.skill_id])
question_services.create_new_question_skill_link(
self.admin_id, self.question_id, self.skill_id, 0.5)
def test_admin_can_manage_question_skill_status(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_manage_question_skill_status/%s' % self.skill_id)
self.assertEqual(response['skill_id'], self.skill_id)
self.logout()
def test_viewer_cannot_manage_question_skill_status(self):
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_manage_question_skill_status/%s' % self.skill_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'You do not have credentials to publish a question.')
self.logout()
def test_guest_cannot_manage_question_skill_status(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_manage_question_skill_status/%s' % self.skill_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class CreateTopicTests(test_utils.GenericTestBase):
"""Tests for decorator can_create_topic."""
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_create_topic
def get(self):
self.render_json({})
def setUp(self):
super(CreateTopicTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.banned_user_email, self.banned_user)
self.set_banned_users([self.banned_user])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock_create_topic', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_admin_can_create_topic(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_create_topic')
self.logout()
def test_banned_user_cannot_create_topic(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_create_topic', expected_status_int=401)
self.assertIn(
'does not have enough rights to create a topic.',
response['error'])
self.logout()
def test_guest_cannot_create_topic(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_create_topic', expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class ManageRightsForTopicTests(test_utils.GenericTestBase):
"""Tests for decorator can_manage_rights_for_topic."""
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
topic_id = 'topic_1'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_manage_rights_for_topic
def get(self, topic_id):
self.render_json({'topic_id': topic_id})
def setUp(self):
super(ManageRightsForTopicTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.banned_user_email, self.banned_user)
self.set_banned_users([self.banned_user])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_manage_rights_for_topic/<topic_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
topic_services.create_new_topic_rights(self.topic_id, self.admin_id)
def test_admin_can_manage_rights(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_manage_rights_for_topic/%s' % self.topic_id)
self.logout()
def test_banned_user_cannot_manage_rights(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_manage_rights_for_topic/%s' % self.topic_id,
expected_status_int=401)
self.assertIn(
'does not have enough rights to assign roles for the topic.',
response['error'])
self.logout()
def test_guest_cannot_manage_rights(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_manage_rights_for_topic/%s' % self.topic_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class ChangeTopicPublicationStatusTests(test_utils.GenericTestBase):
"""Tests for decorator can_change_topic_publication_status."""
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_change_topic_publication_status
def get(self, topic_id):
self.render_json({
topic_id: topic_id
})
def setUp(self):
super(ChangeTopicPublicationStatusTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.banned_user_email, self.banned_user)
self.set_banned_users([self.banned_user])
self.topic_id = topic_services.get_new_topic_id()
self.save_new_topic(
self.topic_id, self.admin_id, name='Name1',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_change_publication_status/<topic_id>',
self.MockHandler)],
debug=feconf.DEBUG,
))
def test_admin_can_change_topic_publication_status(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_change_publication_status/%s' % self.topic_id)
self.logout()
def test_can_not_change_topic_publication_status_with_invalid_topic_id(
self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_change_publication_status/invalid_topic_id',
expected_status_int=404)
self.logout()
def test_banned_user_cannot_change_topic_publication_status(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_change_publication_status/%s' % self.topic_id,
expected_status_int=401)
self.assertIn(
'does not have enough rights to publish or unpublish the '
'topic.', response['error'])
self.logout()
def test_guest_cannot_change_topic_publication_status(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_change_publication_status/%s' % self.topic_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class PerformCronTaskTests(test_utils.GenericTestBase):
"""Tests for decorator can_perform_cron_tasks."""
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_perform_cron_tasks
def get(self):
self.render_json({})
def setUp(self):
super(PerformCronTaskTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.viewer_email, self.viewer_username)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock_perform_cron_task', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_super_admin_can_perform_cron_tasks(self):
self.login(self.ADMIN_EMAIL, is_super_admin=True)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_perform_cron_task')
self.logout()
def test_normal_user_cannot_perform_cron_tasks(self):
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_perform_cron_task', expected_status_int=401)
self.assertEqual(
response['error'],
'You do not have the credentials to access this page.')
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class EditSkillDecoratorTests(test_utils.GenericTestBase):
"""Tests permissions for accessing the skill editor."""
second_admin_username = 'adm2'
second_admin_email = 'adm2@example.com'
manager_username = 'topicmanager'
manager_email = 'topicmanager@example.com'
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
skill_id = '1'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_skill
def get(self, skill_id):
self.render_json({'skill_id': skill_id})
def setUp(self):
super(EditSkillDecoratorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.second_admin_email, self.second_admin_username)
self.signup(self.manager_email, self.manager_username)
self.signup(self.viewer_email, self.viewer_username)
self.set_admins([self.ADMIN_USERNAME, self.second_admin_username])
self.set_topic_managers([self.manager_username])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.second_admin_id = self.get_user_id_from_email(
self.second_admin_email)
self.manager_id = self.get_user_id_from_email(self.manager_email)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.manager = user_services.UserActionsInfo(self.manager_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock_edit_skill/<skill_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_cannot_edit_skill_with_invalid_skill_id(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_custom_response(
'/mock_edit_skill/', 'text/plain', expected_status_int=404)
self.logout()
def test_admin_can_edit_skill(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_skill/%s' % self.skill_id)
self.assertEqual(response['skill_id'], self.skill_id)
self.logout()
def test_admin_can_edit_other_public_skill(self):
self.login(self.second_admin_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_skill/%s' % self.skill_id)
self.assertEqual(response['skill_id'], self.skill_id)
self.logout()
def test_topic_manager_can_edit_public_skill(self):
self.login(self.manager_email)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_skill/%s' % self.skill_id)
self.assertEqual(response['skill_id'], self.skill_id)
self.logout()
def test_normal_user_can_not_edit_public_skill(self):
self.login(self.viewer_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_skill/%s' % self.skill_id, expected_status_int=401)
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class EditQuestionDecoratorTests(test_utils.GenericTestBase):
"""Tests the decorator can_edit_question."""
question_id = 'question_id'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_question
def get(self, question_id):
self.render_json({'question_id': question_id})
def setUp(self):
super(EditQuestionDecoratorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.set_admins([self.ADMIN_USERNAME])
self.set_topic_managers([user_services.get_username(self.user_id_a)])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.manager_id = self.get_user_id_from_email('a@example.com')
self.question_id = 'question_id'
self.save_new_question(
self.question_id, self.owner_id,
self._create_valid_question_data('ABC'), ['skill_1'])
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_edit_question/<question_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
def test_guest_cannot_edit_question(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_question/%s' % self.question_id,
expected_status_int=401)
self.assertEqual(
response['error'],
'You must be logged in to access this resource.')
def test_cannot_edit_question_with_invalid_question_id(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_question/invalid_question_id',
expected_status_int=404)
self.logout()
def test_admin_can_edit_question(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_question/%s' % self.question_id)
self.assertEqual(response['question_id'], self.question_id)
self.logout()
def test_topic_manager_can_edit_question(self):
self.login('a@example.com')
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_question/%s' % self.question_id)
self.assertEqual(response['question_id'], self.question_id)
self.logout()
def test_any_user_cannot_edit_question(self):
self.login('b@example.com')
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_question/%s' % self.question_id,
expected_status_int=401)
self.logout()
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class PlayQuestionDecoratorTests(test_utils.GenericTestBase):
"""Tests the decorator can_play_question."""
question_id = 'question_id'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_play_question
def get(self, question_id):
self.render_json({'question_id': question_id})
def setUp(self):
super(PlayQuestionDecoratorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_play_question/<question_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_question(
self.question_id, self.owner_id,
self._create_valid_question_data('ABC'), ['skill_1'])
def test_can_play_question_with_valid_question_id(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_play_question/%s' % (
self.question_id))
self.assertEqual(response['question_id'], self.question_id)
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class PlayEntityDecoratorTests(test_utils.GenericTestBase):
"""Test the decorator can_play_entity."""
user_email = 'user@example.com'
username = 'user'
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_play_entity
def get(self, entity_type, entity_id):
self.render_json(
{'entity_type': entity_type, 'entity_id': entity_id})
def setUp(self):
super(PlayEntityDecoratorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.user_email, self.username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_play_entity/<entity_type>/<entity_id>',
self.MockHandler)],
debug=feconf.DEBUG,
))
self.question_id = question_services.get_new_question_id()
self.save_new_question(
self.question_id, self.owner_id,
self._create_valid_question_data('ABC'), ['skill_1'])
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_cannot_play_exploration_on_disabled_exploration_ids(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_play_entity/%s/%s' % (
feconf.ENTITY_TYPE_EXPLORATION,
feconf.DISABLED_EXPLORATION_IDS[0]), expected_status_int=404)
def test_guest_can_play_exploration_on_published_exploration(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_play_entity/%s/%s' % (
feconf.ENTITY_TYPE_EXPLORATION, self.published_exp_id))
self.assertEqual(
response['entity_type'], feconf.ENTITY_TYPE_EXPLORATION)
self.assertEqual(
response['entity_id'], self.published_exp_id)
def test_guest_cannot_play_exploration_on_private_exploration(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_play_entity/%s/%s' % (
feconf.ENTITY_TYPE_EXPLORATION,
self.private_exp_id), expected_status_int=404)
def test_cannot_play_exploration_with_none_exploration_rights(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_play_entity/%s/%s'
% (feconf.ENTITY_TYPE_EXPLORATION, 'fake_exp_id'),
expected_status_int=404)
def test_can_play_question_for_valid_question_id(self):
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_play_entity/%s/%s' % (
feconf.ENTITY_TYPE_QUESTION, self.question_id))
self.assertEqual(
response['entity_type'], feconf.ENTITY_TYPE_QUESTION)
self.assertEqual(response['entity_id'], self.question_id)
self.assertEqual(response['entity_type'], 'question')
def test_cannot_play_question_invalid_question_id(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_play_entity/%s/%s' % (
feconf.ENTITY_TYPE_QUESTION, 'question_id'),
expected_status_int=404)
def test_cannot_play_entity_for_invalid_entity(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_play_entity/%s/%s' % (
'fake_entity_type', 'fake_entity_id'), expected_status_int=404)
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class EditEntityDecoratorTests(test_utils.GenericTestBase):
username = 'banneduser'
user_email = 'user@example.com'
published_exp_id = 'exp_0'
private_exp_id = 'exp_1'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_entity
def get(self, entity_type, entity_id):
return self.render_json(
{'entity_type': entity_type, 'entity_id': entity_id})
def setUp(self):
super(EditEntityDecoratorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.user_email, self.username)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.set_admins([self.ADMIN_USERNAME])
self.set_banned_users([self.username])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route(
'/mock_edit_entity/<entity_type>/<entity_id>',
self.MockHandler)],
debug=feconf.DEBUG,
))
self.question_id = question_services.get_new_question_id()
self.save_new_question(
self.question_id, self.owner_id,
self._create_valid_question_data('ABC'), ['skill_1'])
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_can_edit_exploration_with_valid_exp_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json(
'/mock_edit_entity/exploration/%s' % (
self.published_exp_id))
self.assertEqual(
response['entity_type'], feconf.ENTITY_TYPE_EXPLORATION)
self.assertEqual(
response['entity_id'], self.published_exp_id)
self.logout()
def test_cannot_edit_exploration_with_invalid_exp_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_entity/exploration/invalid_exp_id',
expected_status_int=404)
self.logout()
def test_banned_user_cannot_edit_exploration(self):
self.login(self.user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_entity/%s/%s' % (
feconf.ENTITY_TYPE_EXPLORATION, self.private_exp_id),
expected_status_int=401)
self.logout()
def test_can_edit_question_with_valid_question_id(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_entity/%s/%s' % (
feconf.ENTITY_TYPE_QUESTION, self.question_id))
self.assertEqual(response['entity_id'], self.question_id)
self.assertEqual(response['entity_type'], 'question')
self.logout()
def test_can_edit_topic(self):
self.login(self.ADMIN_EMAIL)
topic_id = topic_services.get_new_topic_id()
self.save_new_topic(
topic_id, self.admin_id, name='Name',
description='Description', canonical_story_ids=[],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_entity/%s/%s' % (
feconf.ENTITY_TYPE_TOPIC, topic_id))
self.assertEqual(response['entity_id'], topic_id)
self.assertEqual(response['entity_type'], 'topic')
self.logout()
def test_cannot_edit_topic_with_invalid_topic_id(self):
self.login(self.ADMIN_EMAIL)
topic_id = 'incorrect_id'
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock_edit_entity/%s/%s' % (
feconf.ENTITY_TYPE_TOPIC, topic_id),
expected_status_int=404)
self.logout()
def test_can_edit_skill(self):
self.login(self.ADMIN_EMAIL)
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.admin_id, description='Description')
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_entity/%s/%s' % (
feconf.ENTITY_TYPE_SKILL, skill_id))
self.assertEqual(response['entity_id'], skill_id)
self.assertEqual(response['entity_type'], 'skill')
self.logout()
def test_can_edit_story(self):
self.login(self.ADMIN_EMAIL)
story_id = story_services.get_new_story_id()
topic_id = topic_services.get_new_topic_id()
self.save_new_story(story_id, self.admin_id, topic_id)
self.save_new_topic(
topic_id, self.admin_id, name='Name',
description='Description', canonical_story_ids=[story_id],
additional_story_ids=[], uncategorized_skill_ids=[],
subtopics=[], next_subtopic_id=1)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock_edit_entity/%s/%s' % (
feconf.ENTITY_TYPE_STORY, story_id))
self.assertEqual(response['entity_id'], story_id)
self.assertEqual(response['entity_type'], 'story')
self.logout()
def test_cannot_edit_entity_invalid_entity(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json('/mock_edit_entity/%s/%s' % (
'invalid_entity_type', 'q_id'), expected_status_int=404)
# TODO(#10110): Add tests to verify the learner role has correct permissions.
class SaveExplorationTests(test_utils.GenericTestBase):
"""Tests for can_save_exploration decorator."""
role = rights_manager.ROLE_VOICE_ARTIST
username = 'user'
user_email = 'user@example.com'
banned_username = 'banneduser'
banned_user_email = 'banneduser@example.com'
published_exp_id_1 = 'exp_1'
published_exp_id_2 = 'exp_2'
private_exp_id_1 = 'exp_3'
private_exp_id_2 = 'exp_4'
class MockHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_save_exploration
def get(self, exploration_id):
self.render_json({'exploration_id': exploration_id})
def setUp(self):
super(SaveExplorationTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.user_email, self.username)
self.signup(self.banned_user_email, self.banned_username)
self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.voice_artist_id = self.get_user_id_from_email(
self.VOICE_ARTIST_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.set_admins([self.ADMIN_USERNAME])
self.set_banned_users([self.banned_username])
self.owner = user_services.UserActionsInfo(self.owner_id)
self.mock_testapp = webtest.TestApp(webapp2.WSGIApplication(
[webapp2.Route('/mock/<exploration_id>', self.MockHandler)],
debug=feconf.DEBUG,
))
self.save_new_valid_exploration(
self.published_exp_id_1, self.owner_id)
self.save_new_valid_exploration(
self.published_exp_id_2, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id_1, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id_2, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id_1)
rights_manager.publish_exploration(self.owner, self.published_exp_id_2)
rights_manager.assign_role_for_exploration(
self.owner, self.published_exp_id_1, self.voice_artist_id,
self.role)
rights_manager.assign_role_for_exploration(
self.owner, self.private_exp_id_1, self.voice_artist_id, self.role)
def test_unautheticated_user_cannot_save_exploration(self):
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.private_exp_id_1, expected_status_int=401)
def test_can_not_save_exploration_with_invalid_exp_id(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/invalid_exp_id', expected_status_int=404)
self.logout()
def test_banned_user_cannot_save_exploration(self):
self.login(self.banned_user_email)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.private_exp_id_1, expected_status_int=401)
self.logout()
def test_owner_can_save_exploration(self):
self.login(self.OWNER_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.private_exp_id_1)
self.assertEqual(response['exploration_id'], self.private_exp_id_1)
self.logout()
def test_moderator_can_save_public_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.published_exp_id_1)
self.assertEqual(response['exploration_id'], self.published_exp_id_1)
self.logout()
def test_moderator_cannot_save_private_exploration(self):
self.login(self.MODERATOR_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.private_exp_id_1, expected_status_int=401)
self.logout()
def test_admin_can_save_private_exploration(self):
self.login(self.ADMIN_EMAIL)
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.private_exp_id_1)
self.assertEqual(response['exploration_id'], self.private_exp_id_1)
self.logout()
def test_voice_artist_can_only_save_assigned_exploration(self):
self.login(self.VOICE_ARTIST_EMAIL)
# Checking voice artist can only save assigned public exploration.
with self.swap(self, 'testapp', self.mock_testapp):
response = self.get_json('/mock/%s' % self.published_exp_id_1)
self.assertEqual(response['exploration_id'], self.published_exp_id_1)
# Checking voice artist cannot save public exploration which he/she
# is not assigned for.
with self.swap(self, 'testapp', self.mock_testapp):
self.get_json(
'/mock/%s' % self.published_exp_id_2, expected_status_int=401)
self.logout()
| 42.06791 | 134 | 0.672567 |
6d2a059fff98e53f620ecbcc00e7b28b91ae46cb
| 3,076 |
py
|
Python
|
3_kmeans_linting.py
|
wbkdef/hettinger_mypy_linting_presentation
|
aa4729648bcb0291dc0e6f0a17bf663356746876
|
[
"MIT"
] | null | null | null |
3_kmeans_linting.py
|
wbkdef/hettinger_mypy_linting_presentation
|
aa4729648bcb0291dc0e6f0a17bf663356746876
|
[
"MIT"
] | null | null | null |
3_kmeans_linting.py
|
wbkdef/hettinger_mypy_linting_presentation
|
aa4729648bcb0291dc0e6f0a17bf663356746876
|
[
"MIT"
] | null | null | null |
from typing import Tuple, Iterable, Sequence, List, Dict, DefaultDict
from random import sample
from math import fsum, sqrt
from collections import defaultdict
def partial(func, *args):
"Rewrite functools.partial() in a way that doesn't confuse mypy"
def inner(*moreargs):
return func(*args, *moreargs)
return inner
def mean(data: Iterable[float]) -> float:
data = list(data)
return fsum(data) / len(data)
def transpose(matrix: Iterable[Iterable]) -> Iterable[tuple]:
'Swap rows with columns for a 2-D array'
return zip(*matrix)
Point = Tuple[float, ...]
Centroid = Point
# pylint: disable=redefined-outer-name, redefined-builtin
def dist(p: Point, q: Point, sqrt=sqrt, fsum=fsum, zip=zip) -> float:
'Multi-dimensional euclidean distance'
return sqrt(fsum((x1 - x2) ** 2.0 for x1, x2 in zip(p, q)))
def assign_data(centroids: Sequence[Centroid], data: Iterable[Point]) -> Dict[Centroid, Sequence[Point]]:
'Assign data the closest centroid'
d : DefaultDict[Centroid, List[Point]] = defaultdict(list)
for point in data:
centroid: Point = min(centroids, key=partial(dist, point)) # type: ignore
d[centroid].append(point)
return dict(d)
def compute_centroids(groups: Iterable[Sequence[Point]]) -> List[Centroid]:
'Compute the centroid of each group'
return [tuple(map(mean, transpose(group))) for group in groups]
def k_means(data: Iterable[Point], k:int=2, iterations:int=10) -> List[Point]:
'Return k-centroids for the data'
data = list(data)
centroids = sample(data, k)
for i in range(iterations):
labeled = assign_data(centroids, data)
centroids = compute_centroids(labeled.values())
return centroids
def quality(labeled: Dict[Centroid, Sequence[Point]]) -> float:
'Mean value of squared distances from data to its assigned centroid'
return mean(dist(c, p) ** 2 for c, pts in labeled.items() for p in pts)
def main():
from pprint import pprint
print('Simple example with six 3-D points clustered into two groups')
points = [
(10, 41, 23),
(22, 30, 29),
(11, 42, 5),
(20, 32, 4),
(12, 40, 12),
(21, 36, 23),
]
centroids = k_means(points, k=2)
pprint(assign_data(centroids, points))
print('\nExample with a richer dataset.')
print('See: https://www.datascience.com/blog/introduction-to-k-means-clustering-algorithm-learn-data-science-tutorials')
data = [
(10, 30),
(12, 50),
(14, 70),
(9, 150),
(20, 175),
(8, 200),
(14, 240),
(50, 35),
(40, 50),
(45, 60),
(55, 45),
(60, 130),
(60, 220),
(70, 150),
(60, 190),
(90, 160),
]
print('k quality')
print('- -------')
for k in range(1, 8):
centroids = k_means(data, k, iterations=20)
d = assign_data(centroids, data)
print(f'{k} {quality(d) :8,.1f}')
if __name__ == '__main__':
main() # type: ignore
| 29.295238 | 124 | 0.608583 |
7139fd713cd686ff55ece5575cc43baf40b84b80
| 262 |
py
|
Python
|
chillchamber/apps/youtube.py
|
vesche/chillchamber
|
b6adc04958203b833a08b09f8c467b80f7d62a45
|
[
"MIT"
] | 3 |
2020-11-19T15:56:28.000Z
|
2020-12-19T10:47:25.000Z
|
chillchamber/apps/youtube.py
|
vesche/chillchamber
|
b6adc04958203b833a08b09f8c467b80f7d62a45
|
[
"MIT"
] | 1 |
2020-11-19T15:56:25.000Z
|
2020-11-28T03:02:53.000Z
|
chillchamber/apps/youtube.py
|
vesche/chillchamber
|
b6adc04958203b833a08b09f8c467b80f7d62a45
|
[
"MIT"
] | null | null | null |
"""
chillchamber.apps.youtube
"""
from chillchamber.common import App, run_command
class YouTube(App):
def __init__(self):
super().__init__('YouTube')
def run(self):
run_command('/usr/bin/firefox -new-window https://www.youtube.com')
| 18.714286 | 75 | 0.671756 |
da08812e198e05e399f8373ac1ef3fe7389de318
| 2,629 |
py
|
Python
|
ninestargram_server/images/migrations/0001_initial.py
|
apJammanbo/ninestargram-server
|
6aaf5e49a4189da6df9af8afec921d97455d6841
|
[
"MIT"
] | null | null | null |
ninestargram_server/images/migrations/0001_initial.py
|
apJammanbo/ninestargram-server
|
6aaf5e49a4189da6df9af8afec921d97455d6841
|
[
"MIT"
] | 11 |
2020-09-05T20:04:21.000Z
|
2022-03-03T22:38:32.000Z
|
ninestargram_server/images/migrations/0001_initial.py
|
apJammanbo/ninestargram-server
|
6aaf5e49a4189da6df9af8afec921d97455d6841
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.8 on 2018-08-19 06:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('file', models.ImageField(upload_to='')),
('location', models.CharField(max_length=140)),
('caption', models.TextField()),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='images.Image')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='comment',
name='image',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='images.Image'),
),
migrations.AddField(
model_name='comment',
name='reator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 38.661765 | 121 | 0.569038 |
789afce1a10546de99c09df5f9cbc0b9bdd9a244
| 1,133 |
py
|
Python
|
TA-linode/bin/ta_linode/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py
|
jriddle-linode/splunk-addon-linode
|
5954acd12ef88ab991365ef51072db68aed46aa1
|
[
"Apache-2.0"
] | 11 |
2020-01-23T11:32:26.000Z
|
2021-09-23T09:24:02.000Z
|
TA-linode/bin/ta_linode/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py
|
jriddle-linode/splunk-addon-linode
|
5954acd12ef88ab991365ef51072db68aed46aa1
|
[
"Apache-2.0"
] | 26 |
2019-07-15T02:38:22.000Z
|
2021-12-01T04:14:17.000Z
|
TA-linode/bin/ta_linode/aob_py3/cloudconnectlib/splunktacollectorlib/splunk_ta_import_declare.py
|
jriddle-linode/splunk-addon-linode
|
5954acd12ef88ab991365ef51072db68aed46aa1
|
[
"Apache-2.0"
] | 6 |
2019-07-14T17:44:06.000Z
|
2020-11-17T17:33:23.000Z
|
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module is used to filter and reload PATH.
"""
import os
import sys
import re
ta_name = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
ta_lib_name = re.sub("[^\w]+", "_", ta_name.lower())
assert ta_name or ta_name == "package", "TA name is None or package"
pattern = re.compile(r"[\\/]etc[\\/]apps[\\/][^\\/]+[\\/]bin[\\/]?$")
new_paths = [path for path in sys.path if not pattern.search(path) or ta_name in path]
new_paths.insert(0, os.path.sep.join([os.path.dirname(__file__), ta_lib_name]))
sys.path = new_paths
| 34.333333 | 87 | 0.721977 |
6dcf94eecb7d892d8857dd6ff9c36c2e4fa0e304
| 66,068 |
py
|
Python
|
nltk/tree.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
nltk/tree.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
nltk/tree.py
|
PhanatosZou/nltk
|
750e488569b6f80c72ae6ca74eff90eae55e6c4e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Text Trees
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com>
# Peter Ljunglöf <peter.ljunglof@gu.se>
# Nathan Bodenstab <bodenstab@cslu.ogi.edu> (tree transforms)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Class for representing hierarchical language structures, such as
syntax trees and morphological trees.
"""
import re
import sys
from abc import ABCMeta, abstractmethod
from nltk.grammar import Production, Nonterminal
from nltk.probability import ProbabilisticMixIn
from nltk.util import slice_bounds
from nltk.internals import raise_unorderable_types
# TODO: add LabelledTree (can be used for dependency trees)
######################################################################
## Trees
######################################################################
class Tree(list):
"""
A Tree represents a hierarchical grouping of leaves and subtrees.
For example, each constituent in a syntax tree is represented by a single Tree.
A tree's children are encoded as a list of leaves and subtrees,
where a leaf is a basic (non-tree) value; and a subtree is a
nested Tree.
>>> from nltk.tree import Tree
>>> print(Tree(1, [2, Tree(3, [4]), 5]))
(1 2 (3 4) 5)
>>> vp = Tree('VP', [Tree('V', ['saw']),
... Tree('NP', ['him'])])
>>> s = Tree('S', [Tree('NP', ['I']), vp])
>>> print(s)
(S (NP I) (VP (V saw) (NP him)))
>>> print(s[1])
(VP (V saw) (NP him))
>>> print(s[1,1])
(NP him)
>>> t = Tree.fromstring("(S (NP I) (VP (V saw) (NP him)))")
>>> s == t
True
>>> t[1][1].set_label('X')
>>> t[1][1].label()
'X'
>>> print(t)
(S (NP I) (VP (V saw) (X him)))
>>> t[0], t[1,1] = t[1,1], t[0]
>>> print(t)
(S (X him) (VP (V saw) (NP I)))
The length of a tree is the number of children it has.
>>> len(t)
2
The set_label() and label() methods allow individual constituents
to be labeled. For example, syntax trees use this label to specify
phrase tags, such as "NP" and "VP".
Several Tree methods use "tree positions" to specify
children or descendants of a tree. Tree positions are defined as
follows:
- The tree position *i* specifies a Tree's *i*\ th child.
- The tree position ``()`` specifies the Tree itself.
- If *p* is the tree position of descendant *d*, then
*p+i* specifies the *i*\ th child of *d*.
I.e., every tree position is either a single index *i*,
specifying ``tree[i]``; or a sequence *i1, i2, ..., iN*,
specifying ``tree[i1][i2]...[iN]``.
Construct a new tree. This constructor can be called in one
of two ways:
- ``Tree(label, children)`` constructs a new tree with the
specified label and list of children.
- ``Tree.fromstring(s)`` constructs a new tree by parsing the string ``s``.
"""
def __init__(self, node, children=None):
if children is None:
raise TypeError(
"%s: Expected a node value and child list " % type(self).__name__
)
elif isinstance(children, str):
raise TypeError(
"%s() argument 2 should be a list, not a "
"string" % type(self).__name__
)
else:
list.__init__(self, children)
self._label = node
# ////////////////////////////////////////////////////////////
# Comparison operators
# ////////////////////////////////////////////////////////////
def __eq__(self, other):
return self.__class__ is other.__class__ and (self._label, list(self)) == (
other._label,
list(other),
)
def __lt__(self, other):
if not isinstance(other, Tree):
# raise_unorderable_types("<", self, other)
# Sometimes children can be pure strings,
# so we need to be able to compare with non-trees:
return self.__class__.__name__ < other.__class__.__name__
elif self.__class__ is other.__class__:
return (self._label, list(self)) < (other._label, list(other))
else:
return self.__class__.__name__ < other.__class__.__name__
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ne__ = lambda self, other: not self == other
__gt__ = lambda self, other: not (self < other or self == other)
__le__ = lambda self, other: self < other or self == other
__ge__ = lambda self, other: not self < other
# ////////////////////////////////////////////////////////////
# Disabled list operations
# ////////////////////////////////////////////////////////////
def __mul__(self, v):
raise TypeError("Tree does not support multiplication")
def __rmul__(self, v):
raise TypeError("Tree does not support multiplication")
def __add__(self, v):
raise TypeError("Tree does not support addition")
def __radd__(self, v):
raise TypeError("Tree does not support addition")
# ////////////////////////////////////////////////////////////
# Indexing (with support for tree positions)
# ////////////////////////////////////////////////////////////
def __getitem__(self, index):
if isinstance(index, (int, slice)):
return list.__getitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
return self
elif len(index) == 1:
return self[index[0]]
else:
return self[index[0]][index[1:]]
else:
raise TypeError(
"%s indices must be integers, not %s"
% (type(self).__name__, type(index).__name__)
)
def __setitem__(self, index, value):
if isinstance(index, (int, slice)):
return list.__setitem__(self, index, value)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError("The tree position () may not be " "assigned to.")
elif len(index) == 1:
self[index[0]] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError(
"%s indices must be integers, not %s"
% (type(self).__name__, type(index).__name__)
)
def __delitem__(self, index):
if isinstance(index, (int, slice)):
return list.__delitem__(self, index)
elif isinstance(index, (list, tuple)):
if len(index) == 0:
raise IndexError("The tree position () may not be deleted.")
elif len(index) == 1:
del self[index[0]]
else:
del self[index[0]][index[1:]]
else:
raise TypeError(
"%s indices must be integers, not %s"
% (type(self).__name__, type(index).__name__)
)
# ////////////////////////////////////////////////////////////
# Basic tree operations
# ////////////////////////////////////////////////////////////
def _get_node(self):
"""Outdated method to access the node value; use the label() method instead."""
raise NotImplementedError("Use label() to access a node label.")
def _set_node(self, value):
"""Outdated method to set the node value; use the set_label() method instead."""
raise NotImplementedError("Use set_label() method to set a node label.")
node = property(_get_node, _set_node)
def label(self):
"""
Return the node label of the tree.
>>> t = Tree.fromstring('(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))')
>>> t.label()
'S'
:return: the node label (typically a string)
:rtype: any
"""
return self._label
def set_label(self, label):
"""
Set the node label of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.set_label("T")
>>> print(t)
(T (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))
:param label: the node label (typically a string)
:type label: any
"""
self._label = label
def leaves(self):
"""
Return the leaves of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.leaves()
['the', 'dog', 'chased', 'the', 'cat']
:return: a list containing this tree's leaves.
The order reflects the order of the
leaves in the tree's hierarchical structure.
:rtype: list
"""
leaves = []
for child in self:
if isinstance(child, Tree):
leaves.extend(child.leaves())
else:
leaves.append(child)
return leaves
def flatten(self):
"""
Return a flat version of the tree, with all non-root non-terminals removed.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> print(t.flatten())
(S the dog chased the cat)
:return: a tree consisting of this tree's root connected directly to
its leaves, omitting all intervening non-terminal nodes.
:rtype: Tree
"""
return Tree(self.label(), self.leaves())
def height(self):
"""
Return the height of the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.height()
5
>>> print(t[0,0])
(D the)
>>> t[0,0].height()
2
:return: The height of this tree. The height of a tree
containing no children is 1; the height of a tree
containing only leaves is 2; and the height of any other
tree is one plus the maximum of its children's
heights.
:rtype: int
"""
max_child_height = 0
for child in self:
if isinstance(child, Tree):
max_child_height = max(max_child_height, child.height())
else:
max_child_height = max(max_child_height, 1)
return 1 + max_child_height
def treepositions(self, order="preorder"):
"""
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.treepositions() # doctest: +ELLIPSIS
[(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), ...]
>>> for pos in t.treepositions('leaves'):
... t[pos] = t[pos][::-1].upper()
>>> print(t)
(S (NP (D EHT) (N GOD)) (VP (V DESAHC) (NP (D EHT) (N TAC))))
:param order: One of: ``preorder``, ``postorder``, ``bothorder``,
``leaves``.
"""
positions = []
if order in ("preorder", "bothorder"):
positions.append(())
for i, child in enumerate(self):
if isinstance(child, Tree):
childpos = child.treepositions(order)
positions.extend((i,) + p for p in childpos)
else:
positions.append((i,))
if order in ("postorder", "bothorder"):
positions.append(())
return positions
def subtrees(self, filter=None):
"""
Generate all the subtrees of this tree, optionally restricted
to trees matching the filter function.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> for s in t.subtrees(lambda t: t.height() == 2):
... print(s)
(D the)
(N dog)
(V chased)
(D the)
(N cat)
:type filter: function
:param filter: the function to filter all local trees
"""
if not filter or filter(self):
yield self
for child in self:
if isinstance(child, Tree):
for subtree in child.subtrees(filter):
yield subtree
def productions(self):
"""
Generate the productions that correspond to the non-terminal nodes of the tree.
For each subtree of the form (P: C1 C2 ... Cn) this produces a production of the
form P -> C1 C2 ... Cn.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.productions()
[S -> NP VP, NP -> D N, D -> 'the', N -> 'dog', VP -> V NP, V -> 'chased',
NP -> D N, D -> 'the', N -> 'cat']
:rtype: list(Production)
"""
if not isinstance(self._label, str):
raise TypeError(
"Productions can only be generated from trees having node labels that are strings"
)
prods = [Production(Nonterminal(self._label), _child_names(self))]
for child in self:
if isinstance(child, Tree):
prods += child.productions()
return prods
def pos(self):
"""
Return a sequence of pos-tagged words extracted from the tree.
>>> t = Tree.fromstring("(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
>>> t.pos()
[('the', 'D'), ('dog', 'N'), ('chased', 'V'), ('the', 'D'), ('cat', 'N')]
:return: a list of tuples containing leaves and pre-terminals (part-of-speech tags).
The order reflects the order of the leaves in the tree's hierarchical structure.
:rtype: list(tuple)
"""
pos = []
for child in self:
if isinstance(child, Tree):
pos.extend(child.pos())
else:
pos.append((child, self._label))
return pos
def leaf_treeposition(self, index):
"""
:return: The tree position of the ``index``-th leaf in this
tree. I.e., if ``tp=self.leaf_treeposition(i)``, then
``self[tp]==self.leaves()[i]``.
:raise IndexError: If this tree contains fewer than ``index+1``
leaves, or if ``index<0``.
"""
if index < 0:
raise IndexError("index must be non-negative")
stack = [(self, ())]
while stack:
value, treepos = stack.pop()
if not isinstance(value, Tree):
if index == 0:
return treepos
else:
index -= 1
else:
for i in range(len(value) - 1, -1, -1):
stack.append((value[i], treepos + (i,)))
raise IndexError("index must be less than or equal to len(self)")
def treeposition_spanning_leaves(self, start, end):
"""
:return: The tree position of the lowest descendant of this
tree that dominates ``self.leaves()[start:end]``.
:raise ValueError: if ``end <= start``
"""
if end <= start:
raise ValueError("end must be greater than start")
# Find the tree positions of the start & end leaves, and
# take the longest common subsequence.
start_treepos = self.leaf_treeposition(start)
end_treepos = self.leaf_treeposition(end - 1)
# Find the first index where they mismatch:
for i in range(len(start_treepos)):
if i == len(end_treepos) or start_treepos[i] != end_treepos[i]:
return start_treepos[:i]
return start_treepos
# ////////////////////////////////////////////////////////////
# Transforms
# ////////////////////////////////////////////////////////////
def chomsky_normal_form(
self,
factor="right",
horzMarkov=None,
vertMarkov=0,
childChar="|",
parentChar="^",
):
"""
This method can modify a tree in three ways:
1. Convert a tree into its Chomsky Normal Form (CNF)
equivalent -- Every subtree has either two non-terminals
or one terminal as its children. This process requires
the creation of more"artificial" non-terminal nodes.
2. Markov (vertical) smoothing of children in new artificial
nodes
3. Horizontal (parent) annotation of nodes
:param factor: Right or left factoring method (default = "right")
:type factor: str = [left|right]
:param horzMarkov: Markov order for sibling smoothing in artificial nodes (None (default) = include all siblings)
:type horzMarkov: int | None
:param vertMarkov: Markov order for parent smoothing (0 (default) = no vertical annotation)
:type vertMarkov: int | None
:param childChar: A string used in construction of the artificial nodes, separating the head of the
original subtree from the child nodes that have yet to be expanded (default = "|")
:type childChar: str
:param parentChar: A string used to separate the node representation from its vertical annotation
:type parentChar: str
"""
from nltk.treetransforms import chomsky_normal_form
chomsky_normal_form(self, factor, horzMarkov, vertMarkov, childChar, parentChar)
def un_chomsky_normal_form(
self, expandUnary=True, childChar="|", parentChar="^", unaryChar="+"
):
"""
This method modifies the tree in three ways:
1. Transforms a tree in Chomsky Normal Form back to its
original structure (branching greater than two)
2. Removes any parent annotation (if it exists)
3. (optional) expands unary subtrees (if previously
collapsed with collapseUnary(...) )
:param expandUnary: Flag to expand unary or not (default = True)
:type expandUnary: bool
:param childChar: A string separating the head node from its children in an artificial node (default = "|")
:type childChar: str
:param parentChar: A sting separating the node label from its parent annotation (default = "^")
:type parentChar: str
:param unaryChar: A string joining two non-terminals in a unary production (default = "+")
:type unaryChar: str
"""
from nltk.treetransforms import un_chomsky_normal_form
un_chomsky_normal_form(self, expandUnary, childChar, parentChar, unaryChar)
def collapse_unary(self, collapsePOS=False, collapseRoot=False, joinChar="+"):
"""
Collapse subtrees with a single child (ie. unary productions)
into a new non-terminal (Tree node) joined by 'joinChar'.
This is useful when working with algorithms that do not allow
unary productions, and completely removing the unary productions
would require loss of useful information. The Tree is modified
directly (since it is passed by reference) and no value is returned.
:param collapsePOS: 'False' (default) will not collapse the parent of leaf nodes (ie.
Part-of-Speech tags) since they are always unary productions
:type collapsePOS: bool
:param collapseRoot: 'False' (default) will not modify the root production
if it is unary. For the Penn WSJ treebank corpus, this corresponds
to the TOP -> productions.
:type collapseRoot: bool
:param joinChar: A string used to connect collapsed node values (default = "+")
:type joinChar: str
"""
from nltk.treetransforms import collapse_unary
collapse_unary(self, collapsePOS, collapseRoot, joinChar)
# ////////////////////////////////////////////////////////////
# Convert, copy
# ////////////////////////////////////////////////////////////
@classmethod
def convert(cls, tree):
"""
Convert a tree between different subtypes of Tree. ``cls`` determines
which class will be used to encode the new tree.
:type tree: Tree
:param tree: The tree that should be converted.
:return: The new Tree.
"""
if isinstance(tree, Tree):
children = [cls.convert(child) for child in tree]
return cls(tree._label, children)
else:
return tree
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.copy(deep=True)
def copy(self, deep=False):
if not deep:
return type(self)(self._label, self)
else:
return type(self).convert(self)
def _frozen_class(self):
return ImmutableTree
def freeze(self, leaf_freezer=None):
frozen_class = self._frozen_class()
if leaf_freezer is None:
newcopy = frozen_class.convert(self)
else:
newcopy = self.copy(deep=True)
for pos in newcopy.treepositions("leaves"):
newcopy[pos] = leaf_freezer(newcopy[pos])
newcopy = frozen_class.convert(newcopy)
hash(newcopy) # Make sure the leaves are hashable.
return newcopy
# ////////////////////////////////////////////////////////////
# Parsing
# ////////////////////////////////////////////////////////////
@classmethod
def fromstring(
cls,
s,
brackets="()",
read_node=None,
read_leaf=None,
node_pattern=None,
leaf_pattern=None,
remove_empty_top_bracketing=False,
):
"""
Read a bracketed tree string and return the resulting tree.
Trees are represented as nested brackettings, such as::
(S (NP (NNP John)) (VP (V runs)))
:type s: str
:param s: The string to read
:type brackets: str (length=2)
:param brackets: The bracket characters used to mark the
beginning and end of trees and subtrees.
:type read_node: function
:type read_leaf: function
:param read_node, read_leaf: If specified, these functions
are applied to the substrings of ``s`` corresponding to
nodes and leaves (respectively) to obtain the values for
those nodes and leaves. They should have the following
signature:
read_node(str) -> value
For example, these functions could be used to process nodes
and leaves whose values should be some type other than
string (such as ``FeatStruct``).
Note that by default, node strings and leaf strings are
delimited by whitespace and brackets; to override this
default, use the ``node_pattern`` and ``leaf_pattern``
arguments.
:type node_pattern: str
:type leaf_pattern: str
:param node_pattern, leaf_pattern: Regular expression patterns
used to find node and leaf substrings in ``s``. By
default, both nodes patterns are defined to match any
sequence of non-whitespace non-bracket characters.
:type remove_empty_top_bracketing: bool
:param remove_empty_top_bracketing: If the resulting tree has
an empty node label, and is length one, then return its
single child instead. This is useful for treebank trees,
which sometimes contain an extra level of bracketing.
:return: A tree corresponding to the string representation ``s``.
If this class method is called using a subclass of Tree,
then it will return a tree of that type.
:rtype: Tree
"""
if not isinstance(brackets, str) or len(brackets) != 2:
raise TypeError("brackets must be a length-2 string")
if re.search("\s", brackets):
raise TypeError("whitespace brackets not allowed")
# Construct a regexp that will tokenize the string.
open_b, close_b = brackets
open_pattern, close_pattern = (re.escape(open_b), re.escape(close_b))
if node_pattern is None:
node_pattern = "[^\s%s%s]+" % (open_pattern, close_pattern)
if leaf_pattern is None:
leaf_pattern = "[^\s%s%s]+" % (open_pattern, close_pattern)
token_re = re.compile(
"%s\s*(%s)?|%s|(%s)"
% (open_pattern, node_pattern, close_pattern, leaf_pattern)
)
# Walk through each token, updating a stack of trees.
stack = [(None, [])] # list of (node, children) tuples
for match in token_re.finditer(s):
token = match.group()
# Beginning of a tree/subtree
if token[0] == open_b:
if len(stack) == 1 and len(stack[0][1]) > 0:
cls._parse_error(s, match, "end-of-string")
label = token[1:].lstrip()
if read_node is not None:
label = read_node(label)
stack.append((label, []))
# End of a tree/subtree
elif token == close_b:
if len(stack) == 1:
if len(stack[0][1]) == 0:
cls._parse_error(s, match, open_b)
else:
cls._parse_error(s, match, "end-of-string")
label, children = stack.pop()
stack[-1][1].append(cls(label, children))
# Leaf node
else:
if len(stack) == 1:
cls._parse_error(s, match, open_b)
if read_leaf is not None:
token = read_leaf(token)
stack[-1][1].append(token)
# check that we got exactly one complete tree.
if len(stack) > 1:
cls._parse_error(s, "end-of-string", close_b)
elif len(stack[0][1]) == 0:
cls._parse_error(s, "end-of-string", open_b)
else:
assert stack[0][0] is None
assert len(stack[0][1]) == 1
tree = stack[0][1][0]
# If the tree has an extra level with node='', then get rid of
# it. E.g.: "((S (NP ...) (VP ...)))"
if remove_empty_top_bracketing and tree._label == "" and len(tree) == 1:
tree = tree[0]
# return the tree.
return tree
@classmethod
def _parse_error(cls, s, match, expecting):
"""
Display a friendly error message when parsing a tree string fails.
:param s: The string we're parsing.
:param match: regexp match of the problem token.
:param expecting: what we expected to see instead.
"""
# Construct a basic error message
if match == "end-of-string":
pos, token = len(s), "end-of-string"
else:
pos, token = match.start(), match.group()
msg = "%s.read(): expected %r but got %r\n%sat index %d." % (
cls.__name__,
expecting,
token,
" " * 12,
pos,
)
# Add a display showing the error token itsels:
s = s.replace("\n", " ").replace("\t", " ")
offset = pos
if len(s) > pos + 10:
s = s[: pos + 10] + "..."
if pos > 10:
s = "..." + s[pos - 10 :]
offset = 13
msg += '\n%s"%s"\n%s^' % (" " * 16, s, " " * (17 + offset))
raise ValueError(msg)
# ////////////////////////////////////////////////////////////
# Visualization & String Representation
# ////////////////////////////////////////////////////////////
def draw(self):
"""
Open a new window containing a graphical diagram of this tree.
"""
from nltk.draw.tree import draw_trees
draw_trees(self)
def pretty_print(self, sentence=None, highlight=(), stream=None, **kwargs):
"""
Pretty-print this tree as ASCII or Unicode art.
For explanation of the arguments, see the documentation for
`nltk.treeprettyprinter.TreePrettyPrinter`.
"""
from nltk.treeprettyprinter import TreePrettyPrinter
print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs), file=stream)
def __repr__(self):
childstr = ", ".join(repr(c) for c in self)
return "%s(%s, [%s])" % (
type(self).__name__,
repr(self._label),
childstr,
)
def _repr_png_(self):
"""
Draws and outputs in PNG for ipython.
PNG is used instead of PDF, since it can be displayed in the qt console and
has wider browser support.
"""
import os
import base64
import subprocess
import tempfile
from nltk.draw.tree import tree_to_treesegment
from nltk.draw.util import CanvasFrame
from nltk.internals import find_binary
_canvas_frame = CanvasFrame()
widget = tree_to_treesegment(_canvas_frame.canvas(), self)
_canvas_frame.add_widget(widget)
x, y, w, h = widget.bbox()
# print_to_file uses scrollregion to set the width and height of the pdf.
_canvas_frame.canvas()["scrollregion"] = (0, 0, w, h)
with tempfile.NamedTemporaryFile() as file:
in_path = "{0:}.ps".format(file.name)
out_path = "{0:}.png".format(file.name)
_canvas_frame.print_to_file(in_path)
_canvas_frame.destroy_widget(widget)
try:
subprocess.call(
[
find_binary(
"gs",
binary_names=["gswin32c.exe", "gswin64c.exe"],
env_vars=["PATH"],
verbose=False,
)
]
+ "-q -dEPSCrop -sDEVICE=png16m -r90 -dTextAlphaBits=4 -dGraphicsAlphaBits=4 -dSAFER -dBATCH -dNOPAUSE -sOutputFile={0:} {1:}".format(
out_path, in_path
).split()
)
except LookupError:
pre_error_message = str(
"The Ghostscript executable isn't found.\n"
"See http://web.mit.edu/ghostscript/www/Install.htm\n"
"If you're using a Mac, you can try installing\n"
"https://docs.brew.sh/Installation then `brew install ghostscript`"
)
print(pre_error_message, file=sys.stderr)
raise LookupError
with open(out_path, "rb") as sr:
res = sr.read()
os.remove(in_path)
os.remove(out_path)
return base64.b64encode(res).decode()
def __str__(self):
return self.pformat()
def pprint(self, **kwargs):
"""
Print a string representation of this Tree to 'stream'
"""
if "stream" in kwargs:
stream = kwargs["stream"]
del kwargs["stream"]
else:
stream = None
print(self.pformat(**kwargs), file=stream)
def pformat(self, margin=70, indent=0, nodesep="", parens="()", quotes=False):
"""
:return: A pretty-printed string representation of this tree.
:rtype: str
:param margin: The right margin at which to do line-wrapping.
:type margin: int
:param indent: The indentation level at which printing
begins. This number is used to decide how far to indent
subsequent lines.
:type indent: int
:param nodesep: A string that is used to separate the node
from the children. E.g., the default value ``':'`` gives
trees like ``(S: (NP: I) (VP: (V: saw) (NP: it)))``.
"""
# Try writing it on one line.
s = self._pformat_flat(nodesep, parens, quotes)
if len(s) + indent < margin:
return s
# If it doesn't fit on one line, then write it on multi-lines.
if isinstance(self._label, str):
s = "%s%s%s" % (parens[0], self._label, nodesep)
else:
s = "%s%s%s" % (parens[0], repr(self._label), nodesep)
for child in self:
if isinstance(child, Tree):
s += (
"\n"
+ " " * (indent + 2)
+ child.pformat(margin, indent + 2, nodesep, parens, quotes)
)
elif isinstance(child, tuple):
s += "\n" + " " * (indent + 2) + "/".join(child)
elif isinstance(child, str) and not quotes:
s += "\n" + " " * (indent + 2) + "%s" % child
else:
s += "\n" + " " * (indent + 2) + repr(child)
return s + parens[1]
def pformat_latex_qtree(self):
r"""
Returns a representation of the tree compatible with the
LaTeX qtree package. This consists of the string ``\Tree``
followed by the tree represented in bracketed notation.
For example, the following result was generated from a parse tree of
the sentence ``The announcement astounded us``::
\Tree [.I'' [.N'' [.D The ] [.N' [.N announcement ] ] ]
[.I' [.V'' [.V' [.V astounded ] [.N'' [.N' [.N us ] ] ] ] ] ] ]
See http://www.ling.upenn.edu/advice/latex.html for the LaTeX
style file for the qtree package.
:return: A latex qtree representation of this tree.
:rtype: str
"""
reserved_chars = re.compile("([#\$%&~_\{\}])")
pformat = self.pformat(indent=6, nodesep="", parens=("[.", " ]"))
return r"\Tree " + re.sub(reserved_chars, r"\\\1", pformat)
def _pformat_flat(self, nodesep, parens, quotes):
childstrs = []
for child in self:
if isinstance(child, Tree):
childstrs.append(child._pformat_flat(nodesep, parens, quotes))
elif isinstance(child, tuple):
childstrs.append("/".join(child))
elif isinstance(child, str) and not quotes:
childstrs.append("%s" % child)
else:
childstrs.append(repr(child))
if isinstance(self._label, str):
return "%s%s%s %s%s" % (
parens[0],
self._label,
nodesep,
" ".join(childstrs),
parens[1],
)
else:
return "%s%s%s %s%s" % (
parens[0],
repr(self._label),
nodesep,
" ".join(childstrs),
parens[1],
)
class ImmutableTree(Tree):
def __init__(self, node, children=None):
super(ImmutableTree, self).__init__(node, children)
# Precompute our hash value. This ensures that we're really
# immutable. It also means we only have to calculate it once.
try:
self._hash = hash((self._label, tuple(self)))
except (TypeError, ValueError):
raise ValueError(
"%s: node value and children " "must be immutable" % type(self).__name__
)
def __setitem__(self, index, value):
raise ValueError("%s may not be modified" % type(self).__name__)
def __setslice__(self, i, j, value):
raise ValueError("%s may not be modified" % type(self).__name__)
def __delitem__(self, index):
raise ValueError("%s may not be modified" % type(self).__name__)
def __delslice__(self, i, j):
raise ValueError("%s may not be modified" % type(self).__name__)
def __iadd__(self, other):
raise ValueError("%s may not be modified" % type(self).__name__)
def __imul__(self, other):
raise ValueError("%s may not be modified" % type(self).__name__)
def append(self, v):
raise ValueError("%s may not be modified" % type(self).__name__)
def extend(self, v):
raise ValueError("%s may not be modified" % type(self).__name__)
def pop(self, v=None):
raise ValueError("%s may not be modified" % type(self).__name__)
def remove(self, v):
raise ValueError("%s may not be modified" % type(self).__name__)
def reverse(self):
raise ValueError("%s may not be modified" % type(self).__name__)
def sort(self):
raise ValueError("%s may not be modified" % type(self).__name__)
def __hash__(self):
return self._hash
def set_label(self, value):
"""
Set the node label. This will only succeed the first time the
node label is set, which should occur in ImmutableTree.__init__().
"""
if hasattr(self, "_label"):
raise ValueError("%s may not be modified" % type(self).__name__)
self._label = value
######################################################################
## Parented trees
######################################################################
class AbstractParentedTree(Tree, metaclass=ABCMeta):
"""
An abstract base class for a ``Tree`` that automatically maintains
pointers to parent nodes. These parent pointers are updated
whenever any change is made to a tree's structure. Two subclasses
are currently defined:
- ``ParentedTree`` is used for tree structures where each subtree
has at most one parent. This class should be used in cases
where there is no"sharing" of subtrees.
- ``MultiParentedTree`` is used for tree structures where a
subtree may have zero or more parents. This class should be
used in cases where subtrees may be shared.
Subclassing
===========
The ``AbstractParentedTree`` class redefines all operations that
modify a tree's structure to call two methods, which are used by
subclasses to update parent information:
- ``_setparent()`` is called whenever a new child is added.
- ``_delparent()`` is called whenever a child is removed.
"""
def __init__(self, node, children=None):
super(AbstractParentedTree, self).__init__(node, children)
# If children is None, the tree is read from node, and
# all parents will be set during parsing.
if children is not None:
# Otherwise we have to set the parent of the children.
# Iterate over self, and *not* children, because children
# might be an iterator.
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i, dry_run=True)
for i, child in enumerate(self):
if isinstance(child, Tree):
self._setparent(child, i)
# ////////////////////////////////////////////////////////////
# Parent management
# ////////////////////////////////////////////////////////////
@abstractmethod
def _setparent(self, child, index, dry_run=False):
"""
Update the parent pointer of ``child`` to point to ``self``. This
method is only called if the type of ``child`` is ``Tree``;
i.e., it is not called when adding a leaf to a tree. This method
is always called before the child is actually added to the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
:raise TypeError: If ``child`` is a tree with an impropriate
type. Typically, if ``child`` is a tree, then its type needs
to match the type of ``self``. This prevents mixing of
different tree types (single-parented, multi-parented, and
non-parented).
:param dry_run: If true, the don't actually set the child's
parent pointer; just check for any error conditions, and
raise an exception if one is found.
"""
@abstractmethod
def _delparent(self, child, index):
"""
Update the parent pointer of ``child`` to not point to self. This
method is only called if the type of ``child`` is ``Tree``; i.e., it
is not called when removing a leaf from a tree. This method
is always called before the child is actually removed from the
child list of ``self``.
:type child: Tree
:type index: int
:param index: The index of ``child`` in ``self``.
"""
# ////////////////////////////////////////////////////////////
# Methods that add/remove children
# ////////////////////////////////////////////////////////////
# Every method that adds or removes a child must make
# appropriate calls to _setparent() and _delparent().
def __delitem__(self, index):
# del ptree[start:stop]
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# Clear all the children pointers.
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# Delete the children from our child list.
super(AbstractParentedTree, self).__delitem__(index)
# del ptree[i]
elif isinstance(index, int):
if index < 0:
index += len(self)
if index < 0:
raise IndexError("index out of range")
# Clear the child's parent pointer.
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Remove the child from our child list.
super(AbstractParentedTree, self).__delitem__(index)
elif isinstance(index, (list, tuple)):
# del ptree[()]
if len(index) == 0:
raise IndexError("The tree position () may not be deleted.")
# del ptree[(i,)]
elif len(index) == 1:
del self[index[0]]
# del ptree[i1, i2, i3]
else:
del self[index[0]][index[1:]]
else:
raise TypeError(
"%s indices must be integers, not %s"
% (type(self).__name__, type(index).__name__)
)
def __setitem__(self, index, value):
# ptree[start:stop] = value
if isinstance(index, slice):
start, stop, step = slice_bounds(self, index, allow_step=True)
# make a copy of value, in case it's an iterator
if not isinstance(value, (list, tuple)):
value = list(value)
# Check for any error conditions, so we can avoid ending
# up in an inconsistent state if an error does occur.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i * step, dry_run=True)
# clear the child pointers of all parents we're removing
for i in range(start, stop, step):
if isinstance(self[i], Tree):
self._delparent(self[i], i)
# set the child pointers of the new children. We do this
# after clearing *all* child pointers, in case we're e.g.
# reversing the elements in a tree.
for i, child in enumerate(value):
if isinstance(child, Tree):
self._setparent(child, start + i * step)
# finally, update the content of the child list itself.
super(AbstractParentedTree, self).__setitem__(index, value)
# ptree[i] = value
elif isinstance(index, int):
if index < 0:
index += len(self)
if index < 0:
raise IndexError("index out of range")
# if the value is not changing, do nothing.
if value is self[index]:
return
# Set the new child's parent pointer.
if isinstance(value, Tree):
self._setparent(value, index)
# Remove the old child's parent pointer
if isinstance(self[index], Tree):
self._delparent(self[index], index)
# Update our child list.
super(AbstractParentedTree, self).__setitem__(index, value)
elif isinstance(index, (list, tuple)):
# ptree[()] = value
if len(index) == 0:
raise IndexError("The tree position () may not be assigned to.")
# ptree[(i,)] = value
elif len(index) == 1:
self[index[0]] = value
# ptree[i1, i2, i3] = value
else:
self[index[0]][index[1:]] = value
else:
raise TypeError(
"%s indices must be integers, not %s"
% (type(self).__name__, type(index).__name__)
)
def append(self, child):
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def extend(self, children):
for child in children:
if isinstance(child, Tree):
self._setparent(child, len(self))
super(AbstractParentedTree, self).append(child)
def insert(self, index, child):
# Handle negative indexes. Note that if index < -len(self),
# we do *not* raise an IndexError, unlike __getitem__. This
# is done for consistency with list.__getitem__ and list.index.
if index < 0:
index += len(self)
if index < 0:
index = 0
# Set the child's parent, and update our child list.
if isinstance(child, Tree):
self._setparent(child, index)
super(AbstractParentedTree, self).insert(index, child)
def pop(self, index=-1):
if index < 0:
index += len(self)
if index < 0:
raise IndexError("index out of range")
if isinstance(self[index], Tree):
self._delparent(self[index], index)
return super(AbstractParentedTree, self).pop(index)
# n.b.: like `list`, this is done by equality, not identity!
# To remove a specific child, use del ptree[i].
def remove(self, child):
index = self.index(child)
if isinstance(self[index], Tree):
self._delparent(self[index], index)
super(AbstractParentedTree, self).remove(child)
# We need to implement __getslice__ and friends, even though
# they're deprecated, because otherwise list.__getslice__ will get
# called (since we're subclassing from list). Just delegate to
# __getitem__ etc., but use max(0, start) and max(0, stop) because
# because negative indices are already handled *before*
# __getslice__ is called; and we don't want to double-count them.
if hasattr(list, "__getslice__"):
def __getslice__(self, start, stop):
return self.__getitem__(slice(max(0, start), max(0, stop)))
def __delslice__(self, start, stop):
return self.__delitem__(slice(max(0, start), max(0, stop)))
def __setslice__(self, start, stop, value):
return self.__setitem__(slice(max(0, start), max(0, stop)), value)
class ParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
single-parented trees. The following are methods for querying
the structure of a parented tree: ``parent``, ``parent_index``,
``left_sibling``, ``right_sibling``, ``root``, ``treeposition``.
Each ``ParentedTree`` may have at most one parent. In
particular, subtrees may not be shared. Any attempt to reuse a
single ``ParentedTree`` as a child of more than one parent (or
as multiple children of the same parent) will cause a
``ValueError`` exception to be raised.
``ParentedTrees`` should never be used in the same tree as ``Trees``
or ``MultiParentedTrees``. Mixing tree implementations may result
in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node, children=None):
self._parent = None
"""The parent of this Tree, or None if it has no parent."""
super(ParentedTree, self).__init__(node, children)
if children is None:
# If children is None, the tree is read from node.
# After parsing, the parent of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parent = None
self._setparent(child, i)
def _frozen_class(self):
return ImmutableParentedTree
# /////////////////////////////////////////////////////////////////
# Methods
# /////////////////////////////////////////////////////////////////
def parent(self):
"""The parent of this tree, or None if it has no parent."""
return self._parent
def parent_index(self):
"""
The index of this tree in its parent. I.e.,
``ptree.parent()[ptree.parent_index()] is ptree``. Note that
``ptree.parent_index()`` is not necessarily equal to
``ptree.parent.index(ptree)``, since the ``index()`` method
returns the first child that is equal to its argument.
"""
if self._parent is None:
return None
for i, child in enumerate(self._parent):
if child is self:
return i
assert False, "expected to find self in self._parent!"
def left_sibling(self):
"""The left sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index > 0:
return self._parent[parent_index - 1]
return None # no left sibling
def right_sibling(self):
"""The right sibling of this tree, or None if it has none."""
parent_index = self.parent_index()
if self._parent and parent_index < (len(self._parent) - 1):
return self._parent[parent_index + 1]
return None # no right sibling
def root(self):
"""
The root of this tree. I.e., the unique ancestor of this tree
whose parent is None. If ``ptree.parent()`` is None, then
``ptree`` is its own root.
"""
root = self
while root.parent() is not None:
root = root.parent()
return root
def treeposition(self):
"""
The tree position of this tree, relative to the root of the
tree. I.e., ``ptree.root[ptree.treeposition] is ptree``.
"""
if self.parent() is None:
return ()
else:
return self.parent().treeposition() + (self.parent_index(),)
# /////////////////////////////////////////////////////////////////
# Parent Management
# /////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, ParentedTree)
assert self[index] is child
assert child._parent is self
# Delete child's parent pointer.
child._parent = None
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, ParentedTree):
raise TypeError(
"Can not insert a non-ParentedTree " + "into a ParentedTree"
)
# If child already has a parent, then complain.
if child._parent is not None:
raise ValueError("Can not insert a subtree that already " "has a parent.")
# Set child's parent pointer & index.
if not dry_run:
child._parent = self
class MultiParentedTree(AbstractParentedTree):
"""
A ``Tree`` that automatically maintains parent pointers for
multi-parented trees. The following are methods for querying the
structure of a multi-parented tree: ``parents()``, ``parent_indices()``,
``left_siblings()``, ``right_siblings()``, ``roots``, ``treepositions``.
Each ``MultiParentedTree`` may have zero or more parents. In
particular, subtrees may be shared. If a single
``MultiParentedTree`` is used as multiple children of the same
parent, then that parent will appear multiple times in its
``parents()`` method.
``MultiParentedTrees`` should never be used in the same tree as
``Trees`` or ``ParentedTrees``. Mixing tree implementations may
result in incorrect parent pointers and in ``TypeError`` exceptions.
"""
def __init__(self, node, children=None):
self._parents = []
"""A list of this tree's parents. This list should not
contain duplicates, even if a parent contains this tree
multiple times."""
super(MultiParentedTree, self).__init__(node, children)
if children is None:
# If children is None, the tree is read from node.
# After parsing, the parent(s) of the immediate children
# will point to an intermediate tree, not self.
# We fix this by brute force:
for i, child in enumerate(self):
if isinstance(child, Tree):
child._parents = []
self._setparent(child, i)
def _frozen_class(self):
return ImmutableMultiParentedTree
# /////////////////////////////////////////////////////////////////
# Methods
# /////////////////////////////////////////////////////////////////
def parents(self):
"""
The set of parents of this tree. If this tree has no parents,
then ``parents`` is the empty set. To check if a tree is used
as multiple children of the same parent, use the
``parent_indices()`` method.
:type: list(MultiParentedTree)
"""
return list(self._parents)
def left_siblings(self):
"""
A list of all left siblings of this tree, in any of its parent
trees. A tree may be its own left sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the left sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [
parent[index - 1]
for (parent, index) in self._get_parent_indices()
if index > 0
]
def right_siblings(self):
"""
A list of all right siblings of this tree, in any of its parent
trees. A tree may be its own right sibling if it is used as
multiple contiguous children of the same parent. A tree may
appear multiple times in this list if it is the right sibling
of this tree with respect to multiple parents.
:type: list(MultiParentedTree)
"""
return [
parent[index + 1]
for (parent, index) in self._get_parent_indices()
if index < (len(parent) - 1)
]
def _get_parent_indices(self):
return [
(parent, index)
for parent in self._parents
for index, child in enumerate(parent)
if child is self
]
def roots(self):
"""
The set of all roots of this tree. This set is formed by
tracing all possible parent paths until trees with no parents
are found.
:type: list(MultiParentedTree)
"""
return list(self._get_roots_helper({}).values())
def _get_roots_helper(self, result):
if self._parents:
for parent in self._parents:
parent._get_roots_helper(result)
else:
result[id(self)] = self
return result
def parent_indices(self, parent):
"""
Return a list of the indices where this tree occurs as a child
of ``parent``. If this child does not occur as a child of
``parent``, then the empty list is returned. The following is
always true::
for parent_index in ptree.parent_indices(parent):
parent[parent_index] is ptree
"""
if parent not in self._parents:
return []
else:
return [index for (index, child) in enumerate(parent) if child is self]
def treepositions(self, root):
"""
Return a list of all tree positions that can be used to reach
this multi-parented tree starting from ``root``. I.e., the
following is always true::
for treepos in ptree.treepositions(root):
root[treepos] is ptree
"""
if self is root:
return [()]
else:
return [
treepos + (index,)
for parent in self._parents
for treepos in parent.treepositions(root)
for (index, child) in enumerate(parent)
if child is self
]
# /////////////////////////////////////////////////////////////////
# Parent Management
# /////////////////////////////////////////////////////////////////
def _delparent(self, child, index):
# Sanity checks
assert isinstance(child, MultiParentedTree)
assert self[index] is child
assert len([p for p in child._parents if p is self]) == 1
# If the only copy of child in self is at index, then delete
# self from child's parent list.
for i, c in enumerate(self):
if c is child and i != index:
break
else:
child._parents.remove(self)
def _setparent(self, child, index, dry_run=False):
# If the child's type is incorrect, then complain.
if not isinstance(child, MultiParentedTree):
raise TypeError(
"Can not insert a non-MultiParentedTree " + "into a MultiParentedTree"
)
# Add self as a parent pointer if it's not already listed.
if not dry_run:
for parent in child._parents:
if parent is self:
break
else:
child._parents.append(self)
class ImmutableParentedTree(ImmutableTree, ParentedTree):
pass
class ImmutableMultiParentedTree(ImmutableTree, MultiParentedTree):
pass
######################################################################
## Probabilistic trees
######################################################################
class ProbabilisticTree(Tree, ProbabilisticMixIn):
def __init__(self, node, children=None, **prob_kwargs):
Tree.__init__(self, node, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
# We have to patch up these methods to make them work right:
def _frozen_class(self):
return ImmutableProbabilisticTree
def __repr__(self):
return "%s (p=%r)" % (Tree.__repr__(self), self.prob())
def __str__(self):
return "%s (p=%.6g)" % (self.pformat(margin=60), self.prob())
def copy(self, deep=False):
if not deep:
return type(self)(self._label, self, prob=self.prob())
else:
return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val._label, children, prob=val.prob())
else:
return cls(val._label, children, prob=1.0)
else:
return val
def __eq__(self, other):
return self.__class__ is other.__class__ and (
self._label,
list(self),
self.prob(),
) == (other._label, list(other), other.prob())
def __lt__(self, other):
if not isinstance(other, Tree):
raise_unorderable_types("<", self, other)
if self.__class__ is other.__class__:
return (self._label, list(self), self.prob()) < (
other._label,
list(other),
other.prob(),
)
else:
return self.__class__.__name__ < other.__class__.__name__
class ImmutableProbabilisticTree(ImmutableTree, ProbabilisticMixIn):
def __init__(self, node, children=None, **prob_kwargs):
ImmutableTree.__init__(self, node, children)
ProbabilisticMixIn.__init__(self, **prob_kwargs)
self._hash = hash((self._label, tuple(self), self.prob()))
# We have to patch up these methods to make them work right:
def _frozen_class(self):
return ImmutableProbabilisticTree
def __repr__(self):
return "%s [%s]" % (Tree.__repr__(self), self.prob())
def __str__(self):
return "%s [%s]" % (self.pformat(margin=60), self.prob())
def copy(self, deep=False):
if not deep:
return type(self)(self._label, self, prob=self.prob())
else:
return type(self).convert(self)
@classmethod
def convert(cls, val):
if isinstance(val, Tree):
children = [cls.convert(child) for child in val]
if isinstance(val, ProbabilisticMixIn):
return cls(val._label, children, prob=val.prob())
else:
return cls(val._label, children, prob=1.0)
else:
return val
def _child_names(tree):
names = []
for child in tree:
if isinstance(child, Tree):
names.append(Nonterminal(child._label))
else:
names.append(child)
return names
######################################################################
## Parsing
######################################################################
def bracket_parse(s):
"""
Use Tree.read(s, remove_empty_top_bracketing=True) instead.
"""
raise NameError("Use Tree.read(s, remove_empty_top_bracketing=True) instead.")
def sinica_parse(s):
"""
Parse a Sinica Treebank string and return a tree. Trees are represented as nested brackettings,
as shown in the following example (X represents a Chinese character):
S(goal:NP(Head:Nep:XX)|theme:NP(Head:Nhaa:X)|quantity:Dab:X|Head:VL2:X)#0(PERIODCATEGORY)
:return: A tree corresponding to the string representation.
:rtype: Tree
:param s: The string to be converted
:type s: str
"""
tokens = re.split(r"([()| ])", s)
for i in range(len(tokens)):
if tokens[i] == "(":
tokens[i - 1], tokens[i] = (
tokens[i],
tokens[i - 1],
) # pull nonterminal inside parens
elif ":" in tokens[i]:
fields = tokens[i].split(":")
if len(fields) == 2: # non-terminal
tokens[i] = fields[1]
else:
tokens[i] = "(" + fields[-2] + " " + fields[-1] + ")"
elif tokens[i] == "|":
tokens[i] = ""
treebank_string = " ".join(tokens)
return Tree.fromstring(treebank_string, remove_empty_top_bracketing=True)
# s = re.sub(r'^#[^\s]*\s', '', s) # remove leading identifier
# s = re.sub(r'\w+:', '', s) # remove role tags
# return s
######################################################################
## Demonstration
######################################################################
def demo():
"""
A demonstration showing how Trees and Trees can be
used. This demonstration creates a Tree, and loads a
Tree from the Treebank corpus,
and shows the results of calling several of their methods.
"""
from nltk import Tree, ProbabilisticTree
# Demonstrate tree parsing.
s = "(S (NP (DT the) (NN cat)) (VP (VBD ate) (NP (DT a) (NN cookie))))"
t = Tree.fromstring(s)
print("Convert bracketed string into tree:")
print(t)
print(t.__repr__())
print("Display tree properties:")
print(t.label()) # tree's constituent type
print(t[0]) # tree's first child
print(t[1]) # tree's second child
print(t.height())
print(t.leaves())
print(t[1])
print(t[1, 1])
print(t[1, 1, 0])
# Demonstrate tree modification.
the_cat = t[0]
the_cat.insert(1, Tree.fromstring("(JJ big)"))
print("Tree modification:")
print(t)
t[1, 1, 1] = Tree.fromstring("(NN cake)")
print(t)
print()
# Tree transforms
print("Collapse unary:")
t.collapse_unary()
print(t)
print("Chomsky normal form:")
t.chomsky_normal_form()
print(t)
print()
# Demonstrate probabilistic trees.
pt = ProbabilisticTree("x", ["y", "z"], prob=0.5)
print("Probabilistic Tree:")
print(pt)
print()
# Demonstrate parsing of treebank output format.
t = Tree.fromstring(t.pformat())
print("Convert tree to bracketed string and back again:")
print(t)
print()
# Demonstrate LaTeX output
print("LaTeX output:")
print(t.pformat_latex_qtree())
print()
# Demonstrate Productions
print("Production output:")
print(t.productions())
print()
# Demonstrate tree nodes containing objects other than strings
t.set_label(("test", 3))
print(t)
__all__ = [
"ImmutableProbabilisticTree",
"ImmutableTree",
"ProbabilisticMixIn",
"ProbabilisticTree",
"Tree",
"bracket_parse",
"sinica_parse",
"ParentedTree",
"MultiParentedTree",
"ImmutableParentedTree",
"ImmutableMultiParentedTree",
]
| 36.909497 | 154 | 0.545741 |
fbd29548145504691706a2864fe3cc63a2cbbe04
| 6,428 |
py
|
Python
|
tests/test_utils.py
|
dkrm0/python-shaarli-client
|
3b5b2431b40a52bbf2b97f5a542e9ae9743c4723
|
[
"MIT"
] | 30 |
2017-01-17T14:41:42.000Z
|
2022-03-18T12:27:52.000Z
|
tests/test_utils.py
|
dkrm0/python-shaarli-client
|
3b5b2431b40a52bbf2b97f5a542e9ae9743c4723
|
[
"MIT"
] | 40 |
2017-01-15T00:00:24.000Z
|
2022-03-29T04:54:33.000Z
|
tests/test_utils.py
|
dkrm0/python-shaarli-client
|
3b5b2431b40a52bbf2b97f5a542e9ae9743c4723
|
[
"MIT"
] | 9 |
2017-01-14T23:33:39.000Z
|
2022-03-26T23:41:28.000Z
|
"""Tests for Shaarli client utilities"""
# pylint: disable=invalid-name
import json
from argparse import ArgumentParser
from unittest import mock
import pytest
from requests import Response
from shaarli_client.utils import format_response, generate_endpoint_parser
@mock.patch('argparse.ArgumentParser.add_argument')
def test_generate_endpoint_parser_noparam(addargument):
"""Generate a parser from endpoint metadata - no params"""
name = 'put-stuff'
metadata = {
'path': 'stuff',
'method': 'PUT',
'help': "Changes stuff",
'params': {},
}
parser = ArgumentParser()
subparsers = parser.add_subparsers()
generate_endpoint_parser(subparsers, name, metadata)
addargument.assert_has_calls([
# first helper for the main parser
mock.call('-h', '--help', action='help',
default=mock.ANY, help=mock.ANY),
# second helper for the 'put-stuff' subparser
mock.call('-h', '--help', action='help',
default=mock.ANY, help=mock.ANY)
])
@mock.patch('argparse.ArgumentParser.add_argument')
def test_generate_endpoint_parser_single_param(addargument):
"""Generate a parser from endpoint metadata - single param"""
name = 'get-stuff'
metadata = {
'path': 'stuff',
'method': 'GET',
'help': "Gets stuff",
'params': {
'param1': {
'help': "First param",
},
},
}
parser = ArgumentParser()
subparsers = parser.add_subparsers()
generate_endpoint_parser(subparsers, name, metadata)
addargument.assert_has_calls([
# first helper for the main parser
mock.call('-h', '--help', action='help',
default=mock.ANY, help=mock.ANY),
# second helper for the 'put-stuff' subparser
mock.call('-h', '--help', action='help',
default=mock.ANY, help=mock.ANY),
# param1
mock.call('--param1', help="First param")
])
@mock.patch('argparse.ArgumentParser.add_argument')
def test_generate_endpoint_parser_multi_param(addargument):
"""Generate a parser from endpoint metadata - multiple params"""
name = 'get-stuff'
metadata = {
'path': 'stuff',
'method': 'GET',
'help': "Gets stuff",
'params': {
'param1': {
'help': "First param",
'type': int,
},
'param2': {
'choices': ['a', 'b', 'c'],
'help': "Second param",
'nargs': '+',
},
},
}
parser = ArgumentParser()
subparsers = parser.add_subparsers()
generate_endpoint_parser(subparsers, name, metadata)
addargument.assert_has_calls([
# first helper for the main parser
mock.call('-h', '--help', action='help',
default=mock.ANY, help=mock.ANY),
# second helper for the 'put-stuff' subparser
mock.call('-h', '--help', action='help',
default=mock.ANY, help=mock.ANY),
# param1
mock.call('--param1', help="First param", type=int),
# param2
mock.call('--param2', choices=['a', 'b', 'c'],
help="Second param", nargs='+')
])
@mock.patch('argparse.ArgumentParser.add_argument')
def test_generate_endpoint_parser_resource(addargument):
"""Generate a parser from endpoint metadata - API resource"""
name = 'get-stuff'
metadata = {
'path': 'stuff',
'method': 'GET',
'help': "Gets stuff",
'resource': {
'help': "API resource",
'type': int,
},
'params': {},
}
parser = ArgumentParser()
subparsers = parser.add_subparsers()
generate_endpoint_parser(subparsers, name, metadata)
addargument.assert_has_calls([
# first helper for the main parser
mock.call('-h', '--help', action='help',
default=mock.ANY, help=mock.ANY),
# second helper for the 'put-stuff' subparser
mock.call('-h', '--help', action='help',
default=mock.ANY, help=mock.ANY),
# resource
mock.call('resource', help="API resource", type=int)
])
def test_format_response_unsupported_format():
"""Attempt to use an unsupported formatting flag"""
response = Response()
response.__setstate__({'_content': b'{"field":"value"}'})
with pytest.raises(ValueError) as err:
format_response('xml', response)
assert "not a supported format" in str(err.value)
@pytest.mark.parametrize('output_format', ['json', 'pprint', 'text'])
def test_format_response_empty_body(output_format):
"""Format a Requests Response object with no body"""
response = Response()
assert format_response(output_format, response) == ''
def test_format_response_text():
"""Format a Requests Response object to plain text"""
response = Response()
response.__setstate__({
'_content': b'{"global_counter":3251,'
b'"private_counter":1,'
b'"settings":{"title":"Yay!","header_link":"?",'
b'"timezone":"UTC",'
b'"enabled_plugins":["qrcode","token"],'
b'"default_private_links":false}}',
})
assert isinstance(response.text, str)
assert response.text == '{"global_counter":3251,' \
'"private_counter":1,' \
'"settings":{"title":"Yay!","header_link":"?",' \
'"timezone":"UTC",' \
'"enabled_plugins":["qrcode","token"],' \
'"default_private_links":false}}'
def test_format_response_json():
"""Format a Requests Response object to JSON"""
response = Response()
response.__setstate__({
'_content': b'{"global_counter":3251,'
b'"private_counter":1,'
b'"settings":{"title":"Yay!","header_link":"?",'
b'"timezone":"UTC",'
b'"enabled_plugins":["qrcode","token"],'
b'"default_private_links":false}}',
})
assert isinstance(response.json(), dict)
# Ensure valid JSON is returned after formatting
assert json.loads(format_response('json', response))
assert json.loads(format_response('pprint', response))
| 31.509804 | 77 | 0.570317 |
4bc6e05dad4312f6fe21d3ca1352d6605b796ff6
| 451 |
py
|
Python
|
leetcode/subsets.py
|
huynonstop/solving-everything
|
21c7c32f9e482e1e88d5ec8a03f8815d28f7ef39
|
[
"MIT"
] | null | null | null |
leetcode/subsets.py
|
huynonstop/solving-everything
|
21c7c32f9e482e1e88d5ec8a03f8815d28f7ef39
|
[
"MIT"
] | null | null | null |
leetcode/subsets.py
|
huynonstop/solving-everything
|
21c7c32f9e482e1e88d5ec8a03f8815d28f7ef39
|
[
"MIT"
] | null | null | null |
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
return subsets(nums)
def subsets(nums):
n = len(nums)
rs = []
temp = []
def backtrack(start):
rs.append(temp[:])
for i in range(start, n):
if i > start and nums[i] == nums[i - 1]:
continue
temp.append(nums[i])
backtrack(i + 1)
temp.pop()
backtrack(0)
return rs
| 21.47619 | 58 | 0.490022 |
ee230cb4b7eaebcc8c2080a9cc91e88a22d8120f
| 1,201 |
py
|
Python
|
src/plugins/PlatPyGame/code/ui.py
|
StryQ1/Project-Acturus-X
|
e1d928eb751befcfd2ad9f1cf50a32749f97b23c
|
[
"Apache-2.0"
] | 3 |
2022-03-21T07:40:24.000Z
|
2022-03-21T11:16:43.000Z
|
src/plugins/PlatPyGame/code/ui.py
|
StryQ1/Project-Acturus-X
|
e1d928eb751befcfd2ad9f1cf50a32749f97b23c
|
[
"Apache-2.0"
] | null | null | null |
src/plugins/PlatPyGame/code/ui.py
|
StryQ1/Project-Acturus-X
|
e1d928eb751befcfd2ad9f1cf50a32749f97b23c
|
[
"Apache-2.0"
] | null | null | null |
import pygame
class UI:
def __init__(self,surface):
# setup
self.display_surface = surface
# health
self.health_bar = pygame.image.load('../graphics/ui/health_bar.png').convert_alpha()
self.health_bar_topleft = (54,39)
self.bar_max_width = 152
self.bar_height = 4
# coins
self.coin = pygame.image.load('../graphics/ui/coin.png').convert_alpha()
self.coin_rect = self.coin.get_rect(topleft = (50,61))
self.font = pygame.font.Font('../graphics/ui/ARCADEPI.ttf',30)
def show_health(self,current,full):
self.display_surface.blit(self.health_bar,(20,10))
current_health_ratio = current / full
current_bar_width = self.bar_max_width * current_health_ratio
health_bar_rect = pygame.Rect(self.health_bar_topleft,(current_bar_width,self.bar_height))
pygame.draw.rect(self.display_surface,'#dc4949',health_bar_rect)
def show_coins(self,amount):
self.display_surface.blit(self.coin,self.coin_rect)
coin_amount_surf = self.font.render(str(amount),False,'#33323d')
coin_amount_rect = coin_amount_surf.get_rect(midleft = (self.coin_rect.right + 4,self.coin_rect.centery))
self.display_surface.blit(coin_amount_surf,coin_amount_rect)
| 38.741935 | 108 | 0.742714 |
8a40a761e95492843f3b96d1eb85305ac5c82e99
| 740 |
py
|
Python
|
hitomi/commands/accounts.py
|
cleanunicorn/hitomi
|
3bc403fb445da0c8d4702aaf133a59b34d7ffb89
|
[
"Apache-2.0"
] | 3 |
2020-08-26T17:02:57.000Z
|
2022-02-09T03:56:50.000Z
|
hitomi/commands/accounts.py
|
cleanunicorn/hitomi
|
3bc403fb445da0c8d4702aaf133a59b34d7ffb89
|
[
"Apache-2.0"
] | 4 |
2020-01-16T19:33:50.000Z
|
2021-05-08T10:02:27.000Z
|
hitomi/commands/accounts.py
|
cleanunicorn/hitomi
|
3bc403fb445da0c8d4702aaf133a59b34d7ffb89
|
[
"Apache-2.0"
] | 2 |
2020-01-16T19:22:02.000Z
|
2021-09-06T03:12:30.000Z
|
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from hitomi.network.web3 import Web3
class Accounts(list):
def __init__(self, web3) -> None:
self.web3 = web3
self.refresh()
def refresh(self):
self._cache = dict({"accounts": self.web3.eth.accounts})
def _accounts(self) -> list:
return self._cache["accounts"]
def __iter__(self) -> Iterator:
return iter(self._accounts())
def __getitem__(self, key: int) -> Any:
accounts = self._accounts()
return accounts[key]
def __delitem__(self, key: int) -> None:
accounts = self._accounts()
del accounts[key]
def __len__(self) -> int:
return len(self._accounts())
| 24.666667 | 68 | 0.62027 |
c50bb6042bd26e0e4037c48f09db6adb50df3bc7
| 1,606 |
py
|
Python
|
pkgbuild/archlinux/python2/which.py
|
GameMaker2k/Neo-Hockey-Test
|
5737bfedf0d83f69964e85ac1dbf7e6a93c13f44
|
[
"BSD-3-Clause"
] | 1 |
2020-04-04T10:25:42.000Z
|
2020-04-04T10:25:42.000Z
|
pkgbuild/archlinux/python2/which.py
|
GameMaker2k/Neo-Hockey-Test
|
5737bfedf0d83f69964e85ac1dbf7e6a93c13f44
|
[
"BSD-3-Clause"
] | null | null | null |
pkgbuild/archlinux/python2/which.py
|
GameMaker2k/Neo-Hockey-Test
|
5737bfedf0d83f69964e85ac1dbf7e6a93c13f44
|
[
"BSD-3-Clause"
] | 3 |
2021-09-07T08:44:33.000Z
|
2021-12-07T23:49:39.000Z
|
#!/usr/bin/env python2
'''
This program is free software; you can redistribute it and/or modify
it under the terms of the Revised BSD License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Revised BSD License for more details.
Copyright 2011-2016 Game Maker 2k - https://github.com/GameMaker2k
Copyright 2011-2016 Kazuki Przyborowski - https://github.com/KazukiPrzyborowski
$FileInfo: which.py - Last Update: 2/15/2016 Ver. 0.0.5 RC 3 - Author: cooldude2k $
'''
from __future__ import absolute_import, division, print_function, unicode_literals;
import os, sys, argparse;
__version_info__ = (0, 0, 5, "rc3");
if(__version_info__[3]!=None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2])+"+"+str(__version_info__[3]);
if(__version_info__[3]==None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2]);
proname = "which";
prover = __version__;
profullname = proname+" "+prover;
def which_exec(execfile):
for path in os.environ["PATH"].split(":"):
if os.path.exists(path + "/" + execfile):
return path + "/" + execfile;
parser = argparse.ArgumentParser(conflict_handler = "resolve", add_help = True);
parser.add_argument("-v", "--version", action = "version", version = profullname);
parser.add_argument("filename", help = "enter a file name/path");
getargs = parser.parse_args();
print(which_exec(getargs.filename));
| 39.170732 | 127 | 0.721669 |
d3286bf2dc2b2ff70dfcc347f6c6f675b2eff616
| 17,652 |
py
|
Python
|
test/DeepVolume_test.py
|
ZerojumpLine/DeepVolume
|
177c3a8283234e40573b9f9b68280f0d0dedff87
|
[
"MIT"
] | 8 |
2019-11-09T00:56:11.000Z
|
2021-11-08T06:40:22.000Z
|
test/DeepVolume_test.py
|
ZerojumpLine/DeepVolume
|
177c3a8283234e40573b9f9b68280f0d0dedff87
|
[
"MIT"
] | null | null | null |
test/DeepVolume_test.py
|
ZerojumpLine/DeepVolume
|
177c3a8283234e40573b9f9b68280f0d0dedff87
|
[
"MIT"
] | 2 |
2019-11-09T00:56:13.000Z
|
2019-12-16T10:41:58.000Z
|
import argparse
import os
import scipy.io as sio
import numpy as np
import nibabel as nib
import time
import tensorflow as tf
from BrainStructureAwareNetwork_arch import BrainStructureAwareNetwork
from SpatialConnectionAwareNetwork_arch import SpatialConnectionAwareNetwork
from getimgtest import getimgtest
import math
batch_size = 1
# to recover the origin distribution
Intensity_max = 255
Intensity_mean = 0.1616
Intensity_std = 0.2197
parser = argparse.ArgumentParser(description='Tensorflow DeepVolume Test')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--datafile', default='../datafile/', type=str, help='path to datafile folder')
parser.add_argument('--savepath', default='../output/', type=str, help='path to output folder')
parser.add_argument('--modelpath', default='../models/', type=str, help='path to model save folder')
parser.add_argument('-s', '--stage', type=int, default=1, help='load the network one by one...')
args = parser.parse_args()
def test_BrainStructureNetwork():
filespath = args.datafile
axialThickpath = filespath + 'axialThick-test.txt'
sagittalThickpath = filespath + 'sagittalThick-test.txt'
savepath = args.savepath + 'test'
modelpath = args.modelpath + 'BrainStructureAwareNetwork/Model100.ckpt'
val_axialfile = open(axialThickpath)
val_axialread = val_axialfile.read().splitlines()
ntest = len(val_axialread)
testsavepath = savepath + str(1)
if (os.path.isdir(testsavepath) == False) :
for k in range(0, ntest):
os.mkdir(savepath + str(k + 1))
print("Folder created")
with tf.name_scope('input'):
LR = tf.placeholder(tf.float32, shape=[batch_size, 200, 200, 200, 2])
keep_prob = tf.placeholder(tf.float32, name='dropout_ratio')
probs, logits = BrainStructureAwareNetwork(LR, keep_prob)
config = tf.ConfigProto()
config.gpu_options.visible_device_list = str(args.gpu)
sess = tf.Session(config=config)
saver = tf.train.Saver()
saver.restore(sess, modelpath)
print("Model loaded")
def feed_dict(xstart, ystart, zstart, LRimg):
xs = np.zeros((1, 200, 200, 200, 2))
xs[:, :, :, :, :] = LRimg[:,xstart:xstart + 200, ystart:ystart + 200, zstart:zstart + 200, :]
return {LR: xs, keep_prob: 1}
# sample patches with tumor
for kv in range(0, ntest):
time_start = time.time()
print('Loading from test case ' + str(kv + 1) + ' for test for stage 1')
LRimg = getimgtest(axialThickpath, sagittalThickpath, kv)
x_range = LRimg.shape[1]
y_range = LRimg.shape[2]
z_range = LRimg.shape[3]
if z_range < 200:
LRimgpad = np.zeros((1, x_range, y_range, 200, 2))
LRimgpad[:,:,:,0:z_range,:] = LRimg
LRimg = LRimgpad
# The receptive field of 3D U-net is 68
# We should retrive the center 40^3 pixel of 200^3 to reconstruct
if z_range < 200:
hp = np.zeros((x_range, y_range, 200, 1))
else:
hp = np.zeros((x_range, y_range, z_range, 1))
x_sample = np.floor((x_range -160) / 40) + 1
x_sample = x_sample.astype(np.int16)
y_sample = np.floor((y_range -160) / 40) + 1
y_sample = y_sample.astype(np.int16)
z_sample = np.maximum(np.floor((z_range -160) / 40) + 1, 1)
z_sample = z_sample.astype(np.int16)
for jx in range(0, x_sample):
for jy in range(0, y_sample):
for jz in range(0, z_sample):
# deal with the boundaries
if jx < x_sample - 1: # not the last
xstart = jx * 40
else:
xstart = x_range - 200
if jy < y_sample - 1: # not the last
ystart = jy * 40
else:
ystart = y_range - 200
if jz < z_sample - 1: # not the last
zstart = jz * 40
else:
zstart = LRimg.shape[3] - 200
ht = sess.run(probs, feed_dict=feed_dict(xstart, ystart, zstart, LRimg))
# setting the middle content
hp[xstart + 80:xstart + 120, ystart + 80:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 80:120, 80:120, 80:120, :]
# care about the boundies! the patch near the boundies should have half-full padding
if jx == 0:
hp[xstart:xstart + 120, ystart + 80:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 0:120, 80:120, 80:120, :]
if jx == x_sample - 1:
hp[xstart + 80:xstart + 200, ystart + 80:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 80:200, 80:120, 80:120, :]
if jy == 0:
hp[xstart + 80:xstart + 120, ystart:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 80:120, 0:120, 80:120, :]
if jy == y_sample - 1:
hp[xstart + 80:xstart + 120, ystart + 80:ystart + 200, zstart + 80:zstart + 120, :] = ht[0, 80:120, 80:200, 80:120, :]
if jz == 0:
hp[xstart + 80:xstart + 120, ystart + 80:ystart + 120, zstart:zstart + 120, :] = ht[0, 80:120, 80:120, 0:120, :]
if jz == z_sample - 1:
hp[xstart + 80:xstart + 120, ystart + 80:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 80:120, 80:120, 80:200, :]
# then the 4 corner...xy
if jx == 0 and jy == 0:
hp[xstart:xstart + 120, ystart:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 0:120, 0:120, 80:120, :]
if jx == 0 and jy == y_sample - 1:
hp[xstart:xstart + 120, ystart + 80:ystart + 200, zstart + 80:zstart + 120, :] = ht[0, 0:120, 80:200, 80:120, :]
if jx == x_sample - 1 and jy == 0:
hp[xstart + 80:xstart + 200, ystart:ystart + 120, zstart + 80:zstart + 120, :] = ht[0, 80:200, 0:120, 80:120, :]
if jx == x_sample - 1 and jy == y_sample - 1:
hp[xstart + 80:xstart + 200, ystart + 80:ystart + 200, zstart + 80:zstart + 120, :] = ht[0, 80:200, 80:200, 80:120, :]
# then the 4 corner...xz
if jx == 0 and jz == 0:
hp[xstart:xstart + 120, ystart+80:ystart + 120, zstart:zstart + 120, :] = ht[0, 0:120, 80:120, 0:120, :]
if jx == 0 and jz == z_sample - 1:
hp[xstart:xstart + 120, ystart + 80:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 0:120, 80:120, 80:200, :]
if jx == x_sample - 1 and jz == 0:
hp[xstart + 80:xstart + 200, ystart+80:ystart + 120, zstart:zstart + 120, :] = ht[0, 80:200, 80:120, 0:120, :]
if jx == x_sample - 1 and jz == z_sample - 1:
hp[xstart + 80:xstart + 200, ystart + 80:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 80:200, 80:120, 80:200, :]
# then the 4 corner...yz
if jy == 0 and jz == 0:
hp[xstart+80:xstart + 120, ystart:ystart + 120, zstart:zstart + 120, :] = ht[0, 80:120, 0:120, 0:120, :]
if jy == 0 and jz == z_sample - 1:
hp[xstart+80:xstart + 120, ystart:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 80:120, 0:120, 80:200, :]
if jy == y_sample - 1 and jz == 0:
hp[xstart + 80:xstart + 120, ystart+80:ystart + 200, zstart:zstart + 120, :] = ht[0, 80:120, 80:200, 0:120, :]
if jy == y_sample - 1 and jz == z_sample - 1:
hp[xstart + 80:xstart + 120, ystart + 80:ystart + 200, zstart + 80:zstart + 200, :] = ht[0, 80:120, 80:200, 80:200, :]
# the last 8 small corners..
if jx == 0 and jy == 0 and jz == 0:
hp[xstart:xstart + 120, ystart:ystart + 120, zstart:zstart + 120, :] = ht[0, 0:120, 0:120, 0:120, :]
if jx == 0 and jy == 0 and jz == z_sample - 1:
hp[xstart:xstart + 120, ystart:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 0:120, 0:120, 80:200, :]
if jx == 0 and jy == y_sample - 1 and jz == 0:
hp[xstart:xstart + 120, ystart + 80:ystart + 200, zstart:zstart + 120, :] = ht[0, 0:120, 80:200, 0:120, :]
if jx == 0 and jy == y_sample - 1 and jz == z_sample - 1:
hp[xstart:xstart + 120, ystart + 80:ystart + 200, zstart + 80:zstart + 200, :] = ht[0, 0:120, 80:200, 80:200, :]
if jx == x_sample - 1 and jy == 0 and jz == 0:
hp[xstart + 80:xstart + 200, ystart:ystart + 120, zstart:zstart + 120, :] = ht[0, 80:200, 0:120, 0:120, :]
if jx == x_sample - 1 and jy == 0 and jz == z_sample - 1:
hp[xstart + 80:xstart + 200, ystart:ystart + 120, zstart + 80:zstart + 200, :] = ht[0, 80:200, 0:120, 80:200, :]
if jx == x_sample - 1 and jy == y_sample - 1 and jz == 0:
hp[xstart + 80:xstart + 200, ystart + 80:ystart + 200, zstart:zstart + 120, :] = ht[0, 80:200, 80:200, 0:120, :]
if jx == x_sample - 1 and jy == y_sample - 1 and jz == z_sample - 1:
hp[xstart + 80:xstart + 200, ystart + 80:ystart + 200, zstart + 80:zstart + 200, :] = ht[0, 80:200, 80:200, 80:200, :]
print('processing Brain Structure Aware Model.. ' + str(jx/x_sample*100) + '%')
if z_range < 200:
hp = hp[:,:,0:z_range]
print('processing Brain Structure Aware Model.. ' + '100%')
time_end = time.time()
print('Time cost of test at case ' + str(kv + 1) + ' for stage 1 has been ' + str(time_end - time_start) + ' s')
savename = '%s%s%s' % (savepath, str(kv + 1), '/Reconstruction_BrainStructureAwareModel.mat')
sio.savemat(savename, {'Reconstruction': hp})
print('################ case ' + str(kv + 1) + ' has been done for Brain Structure Aware Model ################')
sess.close()
def test_SpatialConnectionAwareNetwork():
seq_length = 2
network_template = tf.make_template('network', SpatialConnectionAwareNetwork)
filespath = args.datafile
axialThickpath = filespath + 'axialThick-test.txt'
sagittalThickspath = filespath + 'sagittalThicks-test.txt'
savepath = args.savepath + 'test'
modelpath = args.modelpath + 'SpatialConnectionAwareNetwork/Model40.ckpt'
val_axialfile = open(axialThickpath)
val_axialread = val_axialfile.read().splitlines()
val_sagittalfile = open(sagittalThickspath)
val_sagittalread = val_sagittalfile.read().splitlines()
ntest = len(val_axialread)
axialThinpath = filespath + 'axialThin-test.txt'
val_GTfile = open(axialThinpath)
val_GTread = val_GTfile.read().splitlines()
with tf.name_scope('input'):
LR = tf.placeholder(tf.float32, shape=[batch_size, seq_length, 360, 432, 3], name='Lowresolute_image')
# conv network
hidden = None
x_1, hidden = network_template(LR[:, 0, :, :, :], hidden)
x_2, hidden = network_template(LR[:, 1, :, :, :], hidden)
config = tf.ConfigProto()
config.gpu_options.visible_device_list = str(args.gpu)
sess = tf.Session(config=config)
saver = tf.train.Saver()
saver.restore(sess, modelpath)
print("Model loaded")
for kv in range(0, ntest):
time_start = time.time()
print('Loading from test case ' + str(kv + 1) + ' for stage 2 for test')
matinput0 = val_axialread[kv]
load_data_input0 = sio.loadmat(matinput0)
datainput0 = load_data_input0['T1_image']
Input_full0cut = datainput0[int(datainput0.shape[0] / 2 - 180):int(datainput0.shape[0] / 2 + 180), int(datainput0.shape[1] / 2 - 216):int(datainput0.shape[1] / 2 + 216), :]
datainput0 = np.transpose(Input_full0cut, [2, 0, 1])
matinput1 = savepath + str(kv + 1) + '/Reconstruction_BrainStructureAwareModel.mat'
load_data_input1 = sio.loadmat(matinput1)
datainput1 = load_data_input1['Reconstruction']
datainput1m = datainput1[:, :, :, 0]
Input_fullrcut = datainput1m[int(datainput1m.shape[0] / 2 - 180):int(datainput1m.shape[0] / 2 + 180), int(datainput1m.shape[1] / 2 - 216):int(datainput1m.shape[1] / 2 + 216), :]
datainput1 = np.transpose(Input_fullrcut, [2, 0, 1])
test_sagittalname = val_sagittalread[kv]
load_data_input2 = sio.loadmat(test_sagittalname)
datainput2m = load_data_input2['T2s2_image']
Input_full2cut = datainput2m[int(datainput1m.shape[0] / 2 - 180):int(datainput1m.shape[0] / 2 + 180), int(datainput1m.shape[1] / 2 - 216):int(datainput1m.shape[1] / 2 + 216), :]
if Input_full2cut.shape[2] < 2 * datainput1m.shape[2]:
Input_full2cut = np.dstack((Input_full2cut, Input_full2cut[:, :, Input_full2cut.shape[2] - 1]))
datainputsag = np.transpose(Input_full2cut, [2, 0, 1])
totalnum = datainput1.shape[0]
def feed_dict(j):
pointer = j
xs = np.zeros((batch_size, seq_length, 360, 432, 3))
xs[:, 0, :, :, 0] = datainput1[pointer, 0:360, 0:432]
xs[:, 0, :, :, 1] = datainput0[pointer, 0:360, 0:432]
xs[:, 0, :, :, 2] = datainputsag[2 * pointer, 0:360, 0:432]
xs[:, 1, :, :, 0] = datainput1[pointer, 0:360, 0:432]
xs[:, 1, :, :, 1] = datainput0[pointer, 0:360, 0:432]
xs[:, 1, :, :, 2] = datainputsag[2 * pointer + 1, 0:360, 0:432]
return {LR: xs}
hp = datainput1m
for j in range(0, np.int16(totalnum)):
ht = sess.run(x_2, feed_dict=feed_dict(j))
hp[int(datainput1m.shape[0] / 2 - 180):int(datainput1m.shape[0] / 2 + 180), int(datainput1m.shape[1] / 2 - 216):int(datainput1m.shape[1] / 2 + 216), j] = ht[0, :, :, 0]
time_end = time.time()
print('Time cost of test at case ' + str(kv + 1) + ' for stage 2 has been ' + str(time_end - time_start) + ' s')
savename = '%s%s%s' % (savepath, str(kv + 1), '//Reconstruction_DeepVolume.mat')
sio.savemat(savename, {'Reconstruction': hp})
# load the brain mask, which was generated based on the axial thin MRI
c1map = val_GTread[kv][0:-4] + 'c1.nii'
c1load = nib.load(c1map)
c1im = c1load.get_fdata()
c2map = val_GTread[kv][0:-4] + 'c2.nii'
c2load = nib.load(c2map)
c2im = c2load.get_fdata()
c3map = val_GTread[kv][0:-4] + 'c3.nii'
c3load = nib.load(c3map)
c3im = c3load.get_fdata()
cim = c1im + c2im + c3im
RecIntensity = np.abs((hp * Intensity_std + Intensity_mean) * Intensity_max)
imgToSave = np.int16(RecIntensity * cim)
npDtype = np.dtype(np.int16)
proxy_origin = nib.load(c1map)
affine_origin = proxy_origin.affine
proxy_origin.uncache()
newImg = nib.Nifti1Image(imgToSave, affine_origin)
newImg.set_data_dtype(npDtype)
nib.save(newImg, savepath + str(kv + 1) + '//pred.nii.gz')
print('################ case ' + str(kv + 1) + ' has been done for Spatial Connection Aware Model ################')
sess.close()
def evaluation():
filespath = args.datafile
savepath = args.savepath + 'test'
axialThinpath = filespath + 'axialThin-test.txt'
val_GTfile = open(axialThinpath)
val_GTread = val_GTfile.read().splitlines()
ntest = len(val_GTread)
PSNRall = []
print('################################ Doing evaluation ################################')
for kv in range(0, ntest):
predmap = savepath + str(kv + 1) + '//pred.nii.gz'
predload = nib.load(predmap)
predim = np.uint8(predload.get_fdata())
matGT = val_GTread[kv]
load_data_input0 = sio.loadmat(matGT)
dataGT = load_data_input0['T3_image']
GTIntensity = (dataGT * Intensity_std + Intensity_mean) * Intensity_max
c1map = val_GTread[kv][0:-4] + 'c1.nii'
c1load = nib.load(c1map)
c1im = c1load.get_fdata()
c2map = val_GTread[kv][0:-4] + 'c2.nii'
c2load = nib.load(c2map)
c2im = c2load.get_fdata()
c3map = val_GTread[kv][0:-4] + 'c3.nii'
c3load = nib.load(c3map)
c3im = c3load.get_fdata()
cim = c1im + c2im + c3im
GTim = np.uint8(GTIntensity * cim)
Resultpsnr = psnr(predim, GTim)
PSNRall.append(Resultpsnr)
print('PSNR of case ' + str(kv+1) + ' is ' + str(Resultpsnr))
print('average PSNR is ' + str(np.mean(PSNRall)))
def psnr(img1, img2):
mse = np.mean((np.double(img1) - np.double(img2)) ** 2)
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
if __name__ == '__main__':
if args.stage == 1:
test_BrainStructureNetwork()
else:
test_SpatialConnectionAwareNetwork()
evaluation()
| 48.098093 | 186 | 0.545491 |
5f124ed4699aaa1ce508705663e09e437138f35c
| 142 |
py
|
Python
|
tests/general/test_model_wrapper_ensemble.py
|
davidwilby/multimodal_keras_wrapper
|
8151a52d9728d669f0b517515c869beb0211c7db
|
[
"MIT"
] | 31 |
2017-02-22T09:38:15.000Z
|
2021-04-19T10:13:34.000Z
|
tests/general/test_model_wrapper_ensemble.py
|
davidwilby/multimodal_keras_wrapper
|
8151a52d9728d669f0b517515c869beb0211c7db
|
[
"MIT"
] | 5 |
2017-12-05T07:08:44.000Z
|
2020-04-15T17:49:00.000Z
|
tests/general/test_model_wrapper_ensemble.py
|
davidwilby/multimodal_keras_wrapper
|
8151a52d9728d669f0b517515c869beb0211c7db
|
[
"MIT"
] | 15 |
2017-02-22T09:38:14.000Z
|
2021-04-19T10:13:37.000Z
|
import pytest
from six import iteritems
def test_model_wrapper_ensemble():
pass
if __name__ == '__main__':
pytest.main([__file__])
| 14.2 | 34 | 0.732394 |
3385dca8bd18bdbe8f4e9d72f562993d06e56efb
| 16,563 |
py
|
Python
|
steamcommunity/steamcommunity.py
|
the-krak3n/Fixator10-Cogs
|
38af6499634a4f4ea200fa9173ccfc9a6bbd14d4
|
[
"MIT"
] | null | null | null |
steamcommunity/steamcommunity.py
|
the-krak3n/Fixator10-Cogs
|
38af6499634a4f4ea200fa9173ccfc9a6bbd14d4
|
[
"MIT"
] | null | null | null |
steamcommunity/steamcommunity.py
|
the-krak3n/Fixator10-Cogs
|
38af6499634a4f4ea200fa9173ccfc9a6bbd14d4
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from contextlib import suppress
from datetime import datetime
from functools import partial
from io import BytesIO
from os import path
from socket import gethostbyname_ex
from time import time
from warnings import filterwarnings
import aiohttp
import discord
import valve.source.a2s
from redbot.core import checks, commands
from redbot.core.data_manager import bundled_data_path
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils import chat_formatting as chat
from valve.steam.api import interface
with suppress(Exception):
from matplotlib import pyplot, units as munits, dates as mdates, use as mpluse
import numpy as np
from .steamuser import SteamUser
try:
from redbot import json # support of Draper's branch
except ImportError:
import json
USERAGENT = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/88.0.4324.104 "
"Safari/537.36"
)
LOAD_INDICATORS = ["\N{GREEN HEART}", "\N{YELLOW HEART}", "\N{BROKEN HEART}"]
def bool_emojify(bool_var: bool) -> str:
return "✅" if bool_var else "❌"
def check_api(ctx):
"""Is API ready?"""
return "ISteamUser" in list(ctx.cog.steam._interfaces.keys())
async def validate_ip(s):
"""Is IP address valid"""
a = s.split(".")
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
async def find_service(services: dict, service: str):
"""Find service from steamstat.us' service list"""
Service = namedtuple("Service", ["id", "load", "text", "text_with_indicator"])
for s in services:
if s[0] == service:
return Service(s[0], s[1], s[2], f"{LOAD_INDICATORS[s[1]]} {s[2]}")
return Service("", "", "", "")
_ = Translator("SteamCommunity", __file__)
filterwarnings("ignore", category=FutureWarning, module=r"valve.")
@cog_i18n(_)
class SteamCommunity(commands.Cog):
"""SteamCommunity commands"""
__version__ = "2.1.15"
# noinspection PyMissingConstructor
def __init__(self, bot):
self.bot = bot
self.steam = None
self.session = aiohttp.ClientSession(json_serialize=json.dumps)
self.status_data = {"last_update": 0.0, "data": {}}
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
async def red_delete_data_for_user(self, **kwargs):
return
async def initialize(self):
"""Should be called straight after cog instantiation."""
apikeys = await self.bot.get_shared_api_tokens("steam")
self.steam = await self.asyncify(interface.API, key=apikeys.get("web"))
async def asyncify(self, func, *args, **kwargs):
"""Run func in executor"""
return await self.bot.loop.run_in_executor(None, partial(func, *args, **kwargs))
@commands.group(aliases=["sc"])
async def steamcommunity(self, ctx):
"""SteamCommunity commands"""
pass
@steamcommunity.command()
@checks.is_owner()
async def apikey(self, ctx):
"""Set API key for Steam Web API"""
message = _(
"To get Steam Web API key:\n"
"1. Login to your Steam account\n"
"2. Visit [Register Steam Web API Key](https://steamcommunity.com/dev/apikey) page\n"
"3. Enter any domain name (e.g. `localhost`)\n"
'4. You will now see "Key" field\n'
"5. Use `{}set api steam web <your_apikey>`\n"
"Note: These tokens are sensitive and should only be used in a private channel\n"
"or in DM with the bot."
).format(ctx.clean_prefix)
await ctx.maybe_send_embed(message)
@steamcommunity.command(name="profile", aliases=["p"])
@commands.bot_has_permissions(embed_links=True)
@commands.cooldown(1, 15, commands.BucketType.user)
@commands.check(check_api)
async def steamprofile(self, ctx, profile: SteamUser):
"""Get steam user's steamcommunity profile"""
em = discord.Embed(
title=profile.personaname,
description=profile.personastate(),
url=profile.profileurl,
timestamp=datetime.utcfromtimestamp(profile.lastlogoff)
if profile.lastlogoff
else discord.Embed.Empty,
color=profile.personastatecolor,
)
if profile.gameid:
em.description = _("In game: [{}](http://store.steampowered.com/app/{})").format(
profile.gameextrainfo or "Unknown", profile.gameid
)
if profile.gameserver:
em.description += _(" on server {}").format(profile.gameserver)
if profile.shared_by:
em.description += _("\nFamily Shared by [{}]({})").format(
profile.shared_by.personaname, profile.shared_by.profileurl
)
if profile.realname:
em.add_field(name=_("Real name"), value=profile.realname, inline=False)
em.add_field(name=_("Level"), value=profile.level or "0")
if profile.country:
em.add_field(name=_("Country"), value=":flag_{}:".format(profile.country.lower()))
em.add_field(name=_("Visibility"), value=profile.visibility)
if profile.createdat:
em.add_field(
name=_("Created at"),
value=datetime.utcfromtimestamp(profile.createdat).strftime(
_("%d.%m.%Y %H:%M:%S")
),
)
em.add_field(name="SteamID", value="{}\n{}".format(profile.steamid, profile.sid3))
em.add_field(name="SteamID64", value=profile.steamid64)
if any([profile.VACbanned, profile.gamebans]):
bansdescription = _("Days since last ban: {}").format(profile.sincelastban)
elif any([profile.communitybanned, profile.economyban]):
bansdescription = _("Has one or more bans:")
else:
bansdescription = _("No bans on record")
em.add_field(name=_("🛡 Bans"), value=bansdescription, inline=False)
em.add_field(name=_("Community ban"), value=bool_emojify(profile.communitybanned))
em.add_field(
name=_("Economy ban"),
value=profile.economyban.capitalize() if profile.economyban else "❌",
)
em.add_field(
name=_("VAC bans"),
value=_("{} VAC bans").format(profile.VACbans) if profile.VACbans else "❌",
)
em.add_field(
name=_("Game bans"),
value=_("{} game bans").format(profile.gamebans) if profile.gamebans else "❌",
)
em.set_thumbnail(url=profile.avatar184)
footer = [_("Powered by Steam")]
if profile.lastlogoff:
footer.append(_("Last seen on"))
em.set_footer(
text=" • ".join(footer),
icon_url="https://steamstore-a.akamaihd.net/public/shared/images/responsive/share_steam_logo.png",
)
await ctx.send(embed=em)
@steamcommunity.command(name="status")
@commands.cooldown(1, 45, commands.BucketType.guild)
@commands.bot_has_permissions(embed_links=True)
@commands.max_concurrency(1, commands.BucketType.user)
async def steamstatus(self, ctx):
"""Get status of steam services"""
async with ctx.typing():
if time() - self.status_data["last_update"] >= 45:
try:
async with self.session.get(
"https://vortigaunt.steamstat.us/not_an_api.json",
headers={"referer": "https://steamstat.us/", "User-Agent": USERAGENT},
raise_for_status=True,
) as gravity:
data = await gravity.json(loads=json.loads)
self.status_data["data"] = data
self.status_data["last_update"] = time()
except aiohttp.ClientResponseError as e:
await ctx.send(
chat.error(
_("Unable to get data from steamstat.us: {} ({})").format(
e.status, e.message
)
)
)
return
except aiohttp.ClientError as e:
await ctx.send(
chat.error(_("Unable to get data from steamstat.us: {}").format(e))
)
return
else:
data = self.status_data["data"]
services = data.get("services", {})
graph = data.get("graph")
em = discord.Embed(
title=_("Steam Status"),
url="https://steamstat.us",
color=await ctx.embed_color(),
timestamp=datetime.utcfromtimestamp(data.get("time", 0)),
)
em.description = _(
"**Online**: {}\n"
"**In-game**: {}\n"
"**Store**: {}\n"
"**Community**: {}\n"
"**Web API**: {}\n"
"**Steam Connection Managers**: {}\n"
"**SteamDB.info database**: {}"
).format(
(await find_service(services, "online")).text_with_indicator,
(await find_service(services, "ingame")).text_with_indicator,
(await find_service(services, "store")).text_with_indicator,
(await find_service(services, "community")).text_with_indicator,
(await find_service(services, "webapi")).text_with_indicator,
(await find_service(services, "cms")).text_with_indicator,
(await find_service(services, "database")).text_with_indicator,
)
em.add_field(
name=_("Games"),
value=_(
"**TF2 Game Coordinator**: {}\n"
"**Dota 2 Game Coordinator**: {}\n"
"**Underlords Game Coordinator**: {}\n"
"**Artifact Game Coordinator**: {}\n"
"**CS:GO Game Coordinator**: {}\n"
"**CS:GO Sessions Logon**: {}\n"
"**CS:GO Player Inventories**: {}\n"
"**CS:GO Matchmaking Scheduler**: {}\n"
).format(
(await find_service(services, "tf2")).text_with_indicator,
(await find_service(services, "dota2")).text_with_indicator,
(await find_service(services, "underlords")).text_with_indicator,
(await find_service(services, "artifact")).text_with_indicator,
(await find_service(services, "csgo")).text_with_indicator,
(await find_service(services, "csgo_sessions")).text_with_indicator,
(await find_service(services, "csgo_community")).text_with_indicator,
(await find_service(services, "csgo_mm_scheduler")).text_with_indicator,
),
)
graph_file = None
if all(lib in globals().keys() for lib in ["pyplot", "np"]):
graph_file = await self.asyncify(self.gen_steam_cm_graph, graph)
graph_file = discord.File(graph_file, filename="CMgraph.png")
em.set_image(url="attachment://CMgraph.png")
# TODO: Regions?
await ctx.send(embed=em, file=graph_file)
if graph_file:
graph_file.close()
@commands.command(aliases=["gameserver"])
async def getserver(self, ctx, serverip: str):
"""Get info about a gameserver"""
if ":" not in serverip:
serverip += ":27015"
serverc = serverip.split(":")
if not serverc[0][0].isdigit():
try:
ip = gethostbyname_ex(serverc[0])[2][0]
except Exception as e:
await ctx.send(_("The specified domain is not valid: {}").format(e))
return
servercheck = ip
serverc = [str(ip), int(serverc[1])]
else:
servercheck = serverc[0]
serverc = [str(serverc[0]), int(serverc[1])]
serverc = tuple(serverc)
if not await validate_ip(str(servercheck)):
await ctx.send_help()
return
async with ctx.typing():
try:
server = await self.asyncify(valve.source.a2s.ServerQuerier, serverc)
info = server.info()
server.close()
except valve.source.a2s.NoResponseError:
await ctx.send(
chat.error(
_("Could not fetch Server or the Server is not on the Steam masterlist")
)
)
return
except Exception as e:
await ctx.send(chat.error(_("An Error has been occurred: {}").format(e)))
return
_map = info.values["map"]
if _map.lower().startswith("workshop"):
link = "https://steamcommunity.com/sharedfiles/filedetails/?id={}".format(
_map.split("/")[1]
)
_map = "{} [(Workshop map)]({})".format(_map.split("/")[2], link)
game = info.values["folder"]
gameid = info.values["app_id"]
gamemode = info.values["game"]
servername = info.values["server_name"].strip()
servertype = str(info.values["server_type"])
playernumber = str(info.values["player_count"] - info.values["bot_count"])
botnumber = int(info.values["bot_count"])
maxplayers = str(info.values["max_players"])
os = str(info.values["platform"])
version = info.values["version"]
em = discord.Embed(colour=await ctx.embed_color())
em.add_field(
name=_("Game"),
value=f"[{game}](http://store.steampowered.com/app/{gameid})",
)
em.add_field(name=_("Gamemode"), value=gamemode)
em.add_field(name=_("Server name"), value=servername, inline=False)
em.add_field(name=_("Map"), value=_map, inline=False)
em.add_field(name="IP", value=serverc[0])
em.add_field(name=_("Operating System"), value=os)
em.add_field(name=_("Server type"), value=servertype)
em.add_field(name=_("Version"), value=version)
em.add_field(name="VAC", value=bool_emojify(bool(info.values["vac_enabled"])))
em.add_field(
name=_("Password"),
value=bool_emojify(bool(info.values["password_protected"])),
)
if botnumber:
em.add_field(
name=_("Players"),
value=_("{}/{}\nBots: {}").format(playernumber, maxplayers, botnumber),
)
else:
em.add_field(name=_("Players"), value="{}/{}\n".format(playernumber, maxplayers))
await ctx.send(embed=em)
@commands.Cog.listener()
async def on_red_api_tokens_update(self, service_name, api_tokens):
if service_name == "steam":
self.steam = await self.asyncify(interface.API, key=api_tokens.get("web"))
def gen_steam_cm_graph(self, graphdata: dict):
"""Make an graph for connection managers"""
mpluse("Agg")
formats = [
"%y", # ticks are mostly years
"%b", # ticks are mostly months
"%d", # ticks are mostly days
"%H:%M", # hrs
"%H:%M", # min
"%S.%f", # secs
]
zero_formats = [""] + formats[:-1]
zero_formats[3] = "%d-%b"
offset_formats = [
"",
"%Y",
"%b %Y",
"%d %b %Y",
"%d %b %Y",
"%d %b %Y %H:%M",
]
munits.registry[datetime] = mdates.ConciseDateConverter(
formats=formats, zero_formats=zero_formats, offset_formats=offset_formats
)
cur = graphdata["start"]
x = []
for _ in range(len(graphdata["data"])):
cur += graphdata["step"]
x.append(cur)
x = [datetime.utcfromtimestamp(_x / 1000) for _x in x]
y = graphdata["data"]
graphfile = BytesIO()
with pyplot.style.context(path.join(bundled_data_path(self), "discord.mplstyle")):
fig, ax = pyplot.subplots()
ax.plot(x, y)
ax.set_ylim(bottom=0)
ax.grid()
ax.set(xlabel="Date", ylabel="%", title="Steam Connection Managers")
ax.set_yticks(np.arange(0, 100, 5))
fig.savefig(graphfile)
pyplot.close(fig)
graphfile.seek(0)
return graphfile
| 38.880282 | 110 | 0.565055 |
572ce924c7d63448692e627384605a4f78ae9c5c
| 1,326 |
py
|
Python
|
whatsapp_web_driver/custom_errors.py
|
AadamLok/wwd
|
d26dbd1bb65b990fe00d95ea9d8e58d38564fd07
|
[
"Apache-2.0"
] | 1 |
2021-06-27T09:31:33.000Z
|
2021-06-27T09:31:33.000Z
|
whatsapp_web_driver/custom_errors.py
|
MurtazaCyclewala/whatsapp_web_driver
|
d26dbd1bb65b990fe00d95ea9d8e58d38564fd07
|
[
"Apache-2.0"
] | null | null | null |
whatsapp_web_driver/custom_errors.py
|
MurtazaCyclewala/whatsapp_web_driver
|
d26dbd1bb65b990fe00d95ea9d8e58d38564fd07
|
[
"Apache-2.0"
] | null | null | null |
class ChromeDriverNotWorking(Exception):
"""
Exception raised when driver passed in doesn't work and selimium throws error.
"""
def __init__(self, place, message="Something is wrong with chrome driver."):
self.message = message
self.place = place
super().__init__(self.message)
def __str__(self):
return f'Chrome driver passed in the {self.place} is not working.\nTry checking the path of chrome driver, or check in if your instaaled chrome version matches with driver version.'
class WhatsappNotLoggedIn(Exception):
"""
Exception raised when web whatsapp is not logged in.
"""
def __init__(self, message="Web Whatsapp not logged in."):
self.message = message
super().__init__(self.message)
def __str__(self):
return f'{self.message}\nLog in to web whatsapp by scaning the barcode and then try again.'
class MaxTimeOut(Exception):
"""
Exception raised when some element or page takes more time than
max_time to load in. Indecating something is wrong with the Internet.
"""
def __init__(self, message="Page took too much time to load."):
self.message = message
super().__init__(self.message)
def __str__(self):
return f'{self.message}\nTry checking your Internet connection.'
| 35.837838 | 189 | 0.687029 |
bd6c08c38c14b4bbb9ef087a4082b260f2cc08b3
| 3,019 |
py
|
Python
|
nautobot/users/filters.py
|
MatthewGP/nautobot
|
93b9d9c33a49e69b8ee86da8af4ef1265873014b
|
[
"Apache-2.0"
] | null | null | null |
nautobot/users/filters.py
|
MatthewGP/nautobot
|
93b9d9c33a49e69b8ee86da8af4ef1265873014b
|
[
"Apache-2.0"
] | null | null | null |
nautobot/users/filters.py
|
MatthewGP/nautobot
|
93b9d9c33a49e69b8ee86da8af4ef1265873014b
|
[
"Apache-2.0"
] | null | null | null |
import django_filters
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.db.models import Q
from nautobot.users.models import ObjectPermission, Token
from nautobot.utilities.filters import BaseFilterSet
__all__ = (
"GroupFilterSet",
"ObjectPermissionFilterSet",
"UserFilterSet",
)
class GroupFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
class Meta:
model = Group
fields = ["id", "name"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(name__icontains=value)
class UserFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
group_id = django_filters.ModelMultipleChoiceFilter(
field_name="groups",
queryset=Group.objects.all(),
label="Group",
)
group = django_filters.ModelMultipleChoiceFilter(
field_name="groups__name",
queryset=Group.objects.all(),
to_field_name="name",
label="Group (name)",
)
class Meta:
model = get_user_model()
fields = [
"id",
"username",
"first_name",
"last_name",
"email",
"is_staff",
"is_active",
]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(username__icontains=value)
| Q(first_name__icontains=value)
| Q(last_name__icontains=value)
| Q(email__icontains=value)
)
class TokenFilterSet(BaseFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
class Meta:
model = Token
fields = ["id", "key", "write_enabled", "created", "expires"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(description__icontains=value))
class ObjectPermissionFilterSet(BaseFilterSet):
user_id = django_filters.ModelMultipleChoiceFilter(
field_name="users",
queryset=get_user_model().objects.all(),
label="User",
)
user = django_filters.ModelMultipleChoiceFilter(
field_name="users__username",
queryset=get_user_model().objects.all(),
to_field_name="username",
label="User (name)",
)
group_id = django_filters.ModelMultipleChoiceFilter(
field_name="groups",
queryset=Group.objects.all(),
label="Group",
)
group = django_filters.ModelMultipleChoiceFilter(
field_name="groups__name",
queryset=Group.objects.all(),
to_field_name="name",
label="Group (name)",
)
class Meta:
model = ObjectPermission
fields = ["id", "name", "enabled", "object_types"]
| 26.252174 | 69 | 0.612454 |
52b93e149a5a1637bd07a108c5bb4301e6daa22f
| 6,577 |
py
|
Python
|
large_cohort/processes/q_ffl.py
|
isabella232/federated-1
|
834608ecad7be6772ecb753a4be72e89beec4b18
|
[
"Apache-2.0"
] | null | null | null |
large_cohort/processes/q_ffl.py
|
isabella232/federated-1
|
834608ecad7be6772ecb753a4be72e89beec4b18
|
[
"Apache-2.0"
] | 1 |
2022-01-29T11:44:07.000Z
|
2022-01-29T11:44:07.000Z
|
large_cohort/processes/q_ffl.py
|
isabella232/federated-1
|
834608ecad7be6772ecb753a4be72e89beec4b18
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the q-Fair Federated Learning (q-FFL) algorithm.
Based on the paper:
Fair Resource Allocation in Federated Learning.
Tian Li, Maziar Sanjabi, Ahmad Beirami, Virginia Smith. ICLR 2020.
https://arxiv.org/abs/1602.05629
Note that the primary distinction between this implementation and the algorithm
described in the paper above is that the paper weights each client by their loss
after training. This requires an extra pass over each client's dataset. In order
to reduce training time on clients, we use the loss computed as the client
trains to do the weighting in q-FFL.
"""
from typing import Any, Callable, Optional
import tensorflow as tf
import tensorflow_federated as tff
DEFAULT_SERVER_OPTIMIZER_FN = lambda: tf.keras.optimizers.SGD(learning_rate=1.0)
def build_keras_output_to_loss_fn(
metric_builder=Callable[[], tf.keras.metrics.Metric]):
"""Creates a function that computes the result of a `tf.keras` metric."""
def output_to_loss_fn(output):
loss_variables = output['loss']
metric = metric_builder()
tf.nest.map_structure(lambda a, b: a.assign(b), metric.variables,
loss_variables)
return metric.result()
return output_to_loss_fn
def build_q_ffl_process(
model_fn: Callable[[], tff.learning.Model],
fairness_parameter: tf.Tensor,
client_optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer],
server_optimizer_fn: Callable[
[], tf.keras.optimizers.Optimizer] = DEFAULT_SERVER_OPTIMIZER_FN,
broadcast_process: Optional[tff.templates.MeasuredProcess] = None,
model_update_aggregation_factory: Optional[
tff.aggregators.WeightedAggregationFactory] = None,
use_experimental_simulation_loop: bool = False,
output_to_loss_fn: Optional[Callable[[Any], tf.Tensor]] = None,
) -> tff.templates.IterativeProcess:
"""Builds an iterative process that performs q-FFL.
This function creates a `tff.templates.IterativeProcess` that performs
a variant of federated averaging on client models, where client updates are
weighted according by their loss raised to the power `fairness_parameter`.
The iterative process has the following methods inherited from
`tff.templates.IterativeProcess`:
* `initialize`: A `tff.Computation` with the functional type signature
`( -> S@SERVER)`, where `S` is a `tff.learning.framework.ServerState`
representing the initial state of the server.
* `next`: A `tff.Computation` with the functional type signature
`(<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>)` where `S` is a
`tff.learning.framework.ServerState` whose type matches that of the output
of `initialize`, and `{B*}@CLIENTS` represents the client datasets, where
`B` is the type of a single batch. This computation returns a
`tff.learning.framework.ServerState` representing the updated server state
and metrics computed during training.
The iterative process also has the following method not inherited from
`tff.templates.IterativeProcess`:
* `get_model_weights`: A `tff.Computation` that takes as input the
a `tff.learning.framework.ServerState`, and returns a
`tff.learning.ModelWeights` containing the state's model weights.
The internal logic of the resulting iterative process is the same as
`tff.learning.build_federated_averaging_process`, but with a custom weighting
function.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
fairness_parameter: A scalar tensor governing the exponent in the client
weights. Must be convertible to a scalar `tf.float32`.
client_optimizer_fn: A no-arg callable that returns a `tf.keras.Optimizer`.
server_optimizer_fn: A no-arg callable that returns a `tf.keras.Optimizer`.
By default, this uses `tf.keras.optimizers.SGD` with a learning rate of
1.0.
broadcast_process: a `tff.templates.MeasuredProcess` that broadcasts the
model weights on the server to the clients. It must support the signature
`(input_values@SERVER -> output_values@CLIENT)`. If set to default None,
the server model is broadcast to the clients using the default
tff.federated_broadcast.
model_update_aggregation_factory: An optional
`tff.aggregators.WeightedAggregationFactory` that constructs
`tff.templates.AggregationProcess` for aggregating the client model
updates on the server. If `None`, uses `tff.aggregators.MeanFactory`.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation. It is
currently necessary to set this flag to True for performant GPU
simulations.
output_to_loss_fn: An optional callable that takes the result of
`model_fn().report_local_unfinalized_metrics()` and returns a scalar
tensor representing the loss of the model. If set to `None`, this method
will assume that the loss will attempt to be extracted
`model_fn().report_local_unfinalized_metrics()['loss']`.
Returns:
A `tff.templates.IterativeProcess`.
"""
if output_to_loss_fn is None:
output_to_loss_fn = lambda x: x['loss']
def client_weighting(client_output):
loss = output_to_loss_fn(client_output)
return tf.math.pow(loss, fairness_parameter)
return tff.learning.build_federated_averaging_process(
model_fn=model_fn,
client_optimizer_fn=client_optimizer_fn,
server_optimizer_fn=server_optimizer_fn,
client_weighting=client_weighting,
broadcast_process=broadcast_process,
model_update_aggregation_factory=model_update_aggregation_factory,
use_experimental_simulation_loop=use_experimental_simulation_loop)
| 46.316901 | 80 | 0.753535 |
c49a2670308148737e276772541d42d2ebb4fe11
| 8,115 |
py
|
Python
|
tests/dates/test_ToDatetimeTransformer.py
|
binti59/LV
|
20994adfb3f311645bd513ab81dad761d3e3ab98
|
[
"BSD-3-Clause"
] | 32 |
2021-04-26T13:04:26.000Z
|
2022-03-18T16:22:13.000Z
|
tests/dates/test_ToDatetimeTransformer.py
|
binti59/LV
|
20994adfb3f311645bd513ab81dad761d3e3ab98
|
[
"BSD-3-Clause"
] | 15 |
2021-05-08T09:46:48.000Z
|
2021-11-23T11:40:15.000Z
|
tests/dates/test_ToDatetimeTransformer.py
|
binti59/LV
|
20994adfb3f311645bd513ab81dad761d3e3ab98
|
[
"BSD-3-Clause"
] | 6 |
2021-05-05T08:48:00.000Z
|
2021-08-17T12:31:32.000Z
|
import pytest
import test_aide as ta
import tests.test_data as d
import datetime
import pandas
import pandas as pd
import numpy as np
import tubular
from tubular.dates import ToDatetimeTransformer
class TestInit(object):
"""Tests for ToDatetimeTransformer.init()."""
def test_arguments(self):
"""Test that init has expected arguments."""
ta.functions.test_function_arguments(
func=ToDatetimeTransformer.__init__,
expected_arguments=[
"self",
"column",
"new_column_name",
"to_datetime_kwargs",
],
expected_default_values=({},),
)
def test_class_methods(self):
"""Test that ToDatetimeTransformer has fit and transform methods."""
to_dt = ToDatetimeTransformer(column="a", new_column_name="b")
ta.classes.test_object_method(
obj=to_dt, expected_method="transform", msg="transform"
)
def test_inheritance(self):
"""Test that ToDatetimeTransformer inherits from BaseTransformer."""
to_dt = ToDatetimeTransformer(column="a", new_column_name="b")
ta.classes.assert_inheritance(to_dt, tubular.base.BaseTransformer)
def test_super_init_called(self, mocker):
"""Test that init calls BaseTransformer.init."""
expected_call_args = {
0: {
"args": (),
"kwargs": {
"columns": ["a"],
"copy": True,
"verbose": False,
},
}
}
with ta.functions.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
ToDatetimeTransformer(
column="a", new_column_name="b", verbose=False, copy=True
)
def test_column_type_error(self):
"""Test that an exception is raised if column is not a str."""
with pytest.raises(
TypeError,
match="column should be a single str giving the column to transform to datetime",
):
ToDatetimeTransformer(
column=["a"],
new_column_name="a",
)
def test_new_column_name_type_error(self):
"""Test that an exception is raised if new_column_name is not a str."""
with pytest.raises(TypeError, match="new_column_name must be a str"):
ToDatetimeTransformer(column="b", new_column_name=1)
def test_to_datetime_kwargs_type_error(self):
"""Test that an exception is raised if to_datetime_kwargs is not a dict."""
with pytest.raises(
TypeError,
match=r"""to_datetime_kwargs should be a dict but got type \<class 'int'\>""",
):
ToDatetimeTransformer(column="b", new_column_name="a", to_datetime_kwargs=1)
def test_to_datetime_kwargs_key_type_error(self):
"""Test that an exception is raised if to_datetime_kwargs has keys which are not str."""
with pytest.raises(
TypeError,
match=r"""unexpected type \(\<class 'int'\>\) for to_datetime_kwargs key in position 1, must be str""",
):
ToDatetimeTransformer(
new_column_name="a",
column="b",
to_datetime_kwargs={"a": 1, 2: "b"},
)
def test_inputs_set_to_attribute(self):
"""Test that the values passed in init are set to attributes."""
to_dt = ToDatetimeTransformer(
column="b",
new_column_name="a",
to_datetime_kwargs={"a": 1, "b": 2},
)
ta.classes.test_object_attributes(
obj=to_dt,
expected_attributes={
"column": "b",
"columns": ["b"],
"new_column_name": "a",
"to_datetime_kwargs": {"a": 1, "b": 2},
},
msg="Attributes for ToDatetimeTransformer set in init",
)
class TestTransform(object):
"""Tests for ToDatetimeTransformer.transform()."""
def expected_df_1():
"""Expected output for test_expected_output."""
df = pd.DataFrame(
{
"a": [1950, 1960, 2000, 2001, np.NaN, 2010],
"b": [1, 2, 3, 4, 5, np.NaN],
"a_Y": [
datetime.datetime(1950, 1, 1),
datetime.datetime(1960, 1, 1),
datetime.datetime(2000, 1, 1),
datetime.datetime(2001, 1, 1),
pd.NaT,
datetime.datetime(2010, 1, 1),
],
"b_m": [
datetime.datetime(1900, 1, 1),
datetime.datetime(1900, 2, 1),
datetime.datetime(1900, 3, 1),
datetime.datetime(1900, 4, 1),
datetime.datetime(1900, 5, 1),
pd.NaT,
],
}
)
return df
def test_arguments(self):
"""Test that transform has expected arguments."""
ta.functions.test_function_arguments(
func=ToDatetimeTransformer.transform, expected_arguments=["self", "X"]
)
def test_super_transform_call(self, mocker):
"""Test the call to BaseTransformer.transform is as expected."""
df = d.create_datediff_test_df()
to_dt = ToDatetimeTransformer(column="a", new_column_name="Y")
expected_call_args = {0: {"args": (d.create_datediff_test_df(),), "kwargs": {}}}
with ta.functions.assert_function_call(
mocker,
tubular.base.BaseTransformer,
"transform",
expected_call_args,
return_value=d.create_datediff_test_df(),
):
to_dt.transform(df)
def test_to_datetime_call(self, mocker):
"""Test the call to pandas.to_datetime is as expected."""
df = d.create_to_datetime_test_df()
to_dt = ToDatetimeTransformer(
column="a", new_column_name="a_Y", to_datetime_kwargs={"format": "%Y"}
)
expected_call_args = {
0: {
"args": (d.create_to_datetime_test_df()["a"],),
"kwargs": {"format": "%Y"},
}
}
with ta.functions.assert_function_call(
mocker,
pandas,
"to_datetime",
expected_call_args,
return_value=pd.to_datetime(d.create_to_datetime_test_df()["a"]),
):
to_dt.transform(df)
def test_output_from_to_datetime_assigned_to_column(self, mocker):
"""Test that the output from pd.to_datetime is assigned to column with name new_column_name."""
df = d.create_to_datetime_test_df()
to_dt = ToDatetimeTransformer(
column="a", new_column_name="a_new", to_datetime_kwargs={"format": "%Y"}
)
to_datetime_output = [1, 2, 3, 4, 5, 6]
mocker.patch("pandas.to_datetime", return_value=to_datetime_output)
df_transformed = to_dt.transform(df)
assert (
df_transformed["a_new"].tolist() == to_datetime_output
), "unexpected values assigned to a_new column"
@pytest.mark.parametrize(
"df, expected",
ta.pandas.adjusted_dataframe_params(
d.create_to_datetime_test_df(), expected_df_1()
),
)
def test_expected_output(self, df, expected):
"""Test input data is transformed as expected."""
to_dt_1 = ToDatetimeTransformer(
column="a", new_column_name="a_Y", to_datetime_kwargs={"format": "%Y"}
)
to_dt_2 = ToDatetimeTransformer(
column="b", new_column_name="b_m", to_datetime_kwargs={"format": "%m"}
)
df_transformed = to_dt_1.transform(df)
df_transformed = to_dt_2.transform(df_transformed)
ta.equality.assert_equal_dispatch(
expected=expected,
actual=df_transformed,
msg="ToDatetimeTransformer.transform output",
)
| 31.211538 | 115 | 0.56451 |
615fe6493105766a0f65f2a10c70c6dbb988c4f2
| 3,474 |
py
|
Python
|
lte/gateway/python/integ_tests/s1aptests/test_enb_partial_reset.py
|
pedroamfarias-cpqd/magma
|
56ff0559d74ff39056f1102486a667a87e65960d
|
[
"BSD-3-Clause"
] | 1 |
2020-10-20T18:59:38.000Z
|
2020-10-20T18:59:38.000Z
|
lte/gateway/python/integ_tests/s1aptests/test_enb_partial_reset.py
|
pedroamfarias-cpqd/magma
|
56ff0559d74ff39056f1102486a667a87e65960d
|
[
"BSD-3-Clause"
] | 104 |
2020-09-23T12:28:22.000Z
|
2022-03-21T05:24:42.000Z
|
lte/gateway/python/integ_tests/s1aptests/test_enb_partial_reset.py
|
pedroamfarias-cpqd/magma
|
56ff0559d74ff39056f1102486a667a87e65960d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import time
import ctypes
from builtins import range
import s1ap_types
import s1ap_wrapper
class TestEnbPartialReset(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def tearDown(self):
self._s1ap_wrapper.cleanup()
def test_enb_partial_reset(self):
""" attach 32 UEs """
ue_ids = []
num_ues = 1
self._s1ap_wrapper.configUEDevice(num_ues)
for _ in range(num_ues):
req = self._s1ap_wrapper.ue_req
print("************************* Calling attach for UE id ", req.ue_id)
self._s1ap_wrapper.s1_util.attach(
req.ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
ue_ids.append(req.ue_id)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
# Trigger eNB Reset
# Add delay to ensure S1APTester sends attach partial before sending
# eNB Reset Request
time.sleep(0.5)
print("************************* Sending eNB Partial Reset Request")
reset_req = s1ap_types.ResetReq()
reset_req.rstType = s1ap_types.resetType.PARTIAL_RESET.value
reset_req.cause = s1ap_types.ResetCause()
reset_req.cause.causeType = s1ap_types.NasNonDelCauseType.TFW_CAUSE_MISC.value
# Set the cause to MISC.hardware-failure
reset_req.cause.causeVal = 3
reset_req.r = s1ap_types.R()
reset_req.r.partialRst = s1ap_types.PartialReset()
reset_req.r.partialRst.numOfConn = num_ues
reset_req.r.partialRst.ueS1apIdPairList = (
(s1ap_types.UeS1apIdPair) * reset_req.r.partialRst.numOfConn
)()
for indx in range(reset_req.r.partialRst.numOfConn):
reset_req.r.partialRst.ueS1apIdPairList[indx].ueId = ue_ids[indx]
print(
"Reset_req.r.partialRst.ueS1apIdPairList[indx].ueId",
reset_req.r.partialRst.ueS1apIdPairList[indx].ueId,
indx,
)
print("ue_ids", ue_ids)
self._s1ap_wrapper.s1_util.issue_cmd(s1ap_types.tfwCmd.RESET_REQ, reset_req)
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(response.msg_type, s1ap_types.tfwCmd.RESET_ACK.value)
# Sleep for 3 seconds to ensure that MME has cleaned up all S1 state
# before proceeding
time.sleep(3)
# Trigger detach request
for ue in ue_ids:
print("************************* Calling detach for UE id ", ue)
# self._s1ap_wrapper.s1_util.detach(
# ue, detach_type, wait_for_s1)
self._s1ap_wrapper.s1_util.detach(
ue, s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value, True
)
if __name__ == "__main__":
unittest.main()
| 38.6 | 86 | 0.647956 |
e7d02c67b7ddbbee5fec0608308e90327ab91074
| 1,956 |
py
|
Python
|
storage/cloud-client/storage_upload_encrypted_file.py
|
yshalabi/python-docs-samples
|
591787c01d94102ba9205f998d95a05b39ccad2f
|
[
"Apache-2.0"
] | 5,938 |
2015-05-18T05:04:37.000Z
|
2022-03-31T20:16:39.000Z
|
storage/cloud-client/storage_upload_encrypted_file.py
|
yshalabi/python-docs-samples
|
591787c01d94102ba9205f998d95a05b39ccad2f
|
[
"Apache-2.0"
] | 4,730 |
2015-05-07T19:00:38.000Z
|
2022-03-31T21:59:41.000Z
|
storage/cloud-client/storage_upload_encrypted_file.py
|
yshalabi/python-docs-samples
|
591787c01d94102ba9205f998d95a05b39ccad2f
|
[
"Apache-2.0"
] | 6,734 |
2015-05-05T17:06:20.000Z
|
2022-03-31T12:02:51.000Z
|
#!/usr/bin/env python
# Copyright 2019 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START storage_upload_encrypted_file]
import base64
# [END storage_upload_encrypted_file]
import sys
# [START storage_upload_encrypted_file]
from google.cloud import storage
def upload_encrypted_blob(
bucket_name,
source_file_name,
destination_blob_name,
base64_encryption_key,
):
"""Uploads a file to a Google Cloud Storage bucket using a custom
encryption key.
The file will be encrypted by Google Cloud Storage and only
retrievable using the provided encryption key.
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
# Encryption key must be an AES256 key represented as a bytestring with
# 32 bytes. Since it's passed in as a base64 encoded string, it needs
# to be decoded.
encryption_key = base64.b64decode(base64_encryption_key)
blob = bucket.blob(
destination_blob_name, encryption_key=encryption_key
)
blob.upload_from_filename(source_file_name)
print(
"File {} uploaded to {}.".format(
source_file_name, destination_blob_name
)
)
# [END storage_upload_encrypted_file]
if __name__ == "__main__":
upload_encrypted_blob(
bucket_name=sys.argv[1],
source_file_name=sys.argv[2],
destination_blob_name=sys.argv[3],
base64_encryption_key=sys.argv[4],
)
| 28.347826 | 75 | 0.725971 |
055265ee55a31b4e1884c5332e5a5aa333c6735b
| 2,857 |
py
|
Python
|
meraki/models/update_network_static_route_model.py
|
bossypants22/python-sdk-test
|
37701d62dc18c2abb910eb790ab978913adcaf7b
|
[
"MIT"
] | 37 |
2019-04-24T14:01:33.000Z
|
2022-01-28T01:37:21.000Z
|
meraki/models/update_network_static_route_model.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 10 |
2019-07-09T16:35:11.000Z
|
2021-12-07T03:47:53.000Z
|
meraki/models/update_network_static_route_model.py
|
ankita66666666/meraki-python-sdk
|
9894089eb013318243ae48869cc5130eb37f80c0
|
[
"MIT"
] | 17 |
2019-04-30T23:53:21.000Z
|
2022-02-07T22:57:44.000Z
|
# -*- coding: utf-8 -*-
"""
meraki
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UpdateNetworkStaticRouteModel(object):
"""Implementation of the 'updateNetworkStaticRoute' model.
TODO: type model description here.
Attributes:
name (string): The name of the static route
subnet (string): The subnet of the static route
gateway_ip (string): The gateway IP (next hop) of the static route
enabled (string): The enabled state of the static route
fixed_ip_assignments (string): The DHCP fixed IP assignments on the
static route
reserved_ip_ranges (string): The DHCP reserved IP ranges on the static
route
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"subnet":'subnet',
"gateway_ip":'gatewayIp',
"enabled":'enabled',
"fixed_ip_assignments":'fixedIpAssignments',
"reserved_ip_ranges":'reservedIpRanges'
}
def __init__(self,
name=None,
subnet=None,
gateway_ip=None,
enabled=None,
fixed_ip_assignments=None,
reserved_ip_ranges=None):
"""Constructor for the UpdateNetworkStaticRouteModel class"""
# Initialize members of the class
self.name = name
self.subnet = subnet
self.gateway_ip = gateway_ip
self.enabled = enabled
self.fixed_ip_assignments = fixed_ip_assignments
self.reserved_ip_ranges = reserved_ip_ranges
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
subnet = dictionary.get('subnet')
gateway_ip = dictionary.get('gatewayIp')
enabled = dictionary.get('enabled')
fixed_ip_assignments = dictionary.get('fixedIpAssignments')
reserved_ip_ranges = dictionary.get('reservedIpRanges')
# Return an object of this model
return cls(name,
subnet,
gateway_ip,
enabled,
fixed_ip_assignments,
reserved_ip_ranges)
| 31.744444 | 95 | 0.587679 |
139045be25ec9a5e2f1b6dc5f1677f9cabfd9a42
| 1,095 |
py
|
Python
|
AP1/Sorteio.py
|
GiovannaPazello/Projetos-em-Python
|
3cf7edbdf2a2350605a775389f7fe2cc7fe8032e
|
[
"MIT"
] | null | null | null |
AP1/Sorteio.py
|
GiovannaPazello/Projetos-em-Python
|
3cf7edbdf2a2350605a775389f7fe2cc7fe8032e
|
[
"MIT"
] | null | null | null |
AP1/Sorteio.py
|
GiovannaPazello/Projetos-em-Python
|
3cf7edbdf2a2350605a775389f7fe2cc7fe8032e
|
[
"MIT"
] | null | null | null |
'''Faça um programa para o usuário adivinhar o número sorteado: O programa deve perguntar o valor
limite para o sorteio (ex: se o usuário informar o número 10, o programa irá sortear um número entre 0 e 10,
inclusive o 10). O programa deve sortear o número e pedir ao usuário que tente adivinhar o número. Caso o usuário
não acerte o palpite o programa deve dizer se o número sorteado é maior ou menor que o informado pelo usuário. O
programa deve continuar pedindo palpites até que o usuário acerte. No final o programa deve informar quantos palpites
foram necessários até que o palpite fosse o correto'''
import random
nLimite = int(input('Digite o número limite para o sorteio:'))
x = random.randint(0,nLimite)
cont = 0
while cont <= nLimite:
n = int(input('Digite o número que você acha que foi sorteado:'))
cont+=1
if n == x:
print('Parabéns, você acertou o número.')
break
elif n > x:
print('Seu número é maior que o sorteado.')
else:
print('Seu número é menor que o sorteado.')
print('A quantidade de tentativas foi {}'.format(cont))
| 45.625 | 117 | 0.722374 |
a3825d30b5173a4546ecbb4e093f6de0cf22a69c
| 12,668 |
py
|
Python
|
Arcpy Methods/first_arcpy_methodology.py
|
KSnyderCode/MunicipalMergerProject
|
2c5cc4879562fcb87b0e379731e83906802159f7
|
[
"MIT"
] | 3 |
2015-10-20T18:15:21.000Z
|
2017-06-20T15:51:04.000Z
|
Arcpy Methods/first_arcpy_methodology.py
|
KSnyderCode/MunicipalMergerProject
|
2c5cc4879562fcb87b0e379731e83906802159f7
|
[
"MIT"
] | 1 |
2017-04-10T20:50:25.000Z
|
2017-04-10T20:50:25.000Z
|
Arcpy Methods/first_arcpy_methodology.py
|
cfh294/GISTools
|
2c5cc4879562fcb87b0e379731e83906802159f7
|
[
"MIT"
] | 1 |
2021-04-11T13:19:14.000Z
|
2021-04-11T13:19:14.000Z
|
import arcpy
from arcpy import env
import sets
arcpy.env.overwriteOutput = True
muniParam = arcpy.GetParameterAsText(0)
muniList = muniParam.split(';')
popMin = arcpy.GetParameterAsText(1)
outputWorkspace = arcpy.GetParameterAsText(2)
arcpy.env.workspace = outputWorkspace
# Dictionary used in the raw() method
escape_dict={'\a':r'\a',
'\b':r'\b',
'\c':r'\c',
'\f':r'\f',
'\n':r'\n',
'\r':r'\r',
'\t':r'\t',
'\v':r'\v',
'\'':r'\'',
'\"':r'\"'}
# Credit to Brett Cannon on code.ActiveState.com, slight modifications by myself.
def raw(text):
# Returns a raw string representation of text and eliminates all special backslash characters
new_string=''
for char in text:
try:
new_string += escape_dict[char]
except KeyError:
new_string += char
return new_string
# Function that returns an array of border lengths for the given municipality. Parameter
# is a municipal code String.
def getAllBorders(munCode):
cursor = arcpy.SearchCursor(polyAnalysis)
lengths = []
for r in cursor:
if(r.getValue('src_MUN_CODE') == munCode):
border = r.getValue('LENGTH')
lengths.append(border)
del cursor
del r
return lengths
# Returns the merger partner, based on the source municipality's municipal code and the
# length of the border that the two municipality's share. Returns the merger partner's
# municipal code String.
def getFellowMerger(length, srcMunCode):
cursor = arcpy.SearchCursor(polyAnalysis)
mergerMunCode = ''
for r in cursor:
# Towns with longest borders are both merger candidates
if(r.getValue('src_MUN_CODE') == srcMunCode and r.getValue('LENGTH') == length and r.getValue('src_isCand') == 1 and r.getValue('nbr_isCand') == 1):
mergerMunCode = r.getValue('nbr_MUN_CODE')
# Doughnut hole: small municipality merger candidate entirely within a non candidate. Merge them.
elif(r.getValue('src_MUN_CODE') == srcMunCode and r.getValue('src_isCand') == 1 and r.getValue('nbr_isCand') == 0 and r.getValue('LENGTH') == length):
mergerMunCode = r.getValue('nbr_MUN_CODE')
return mergerMunCode
del cursor, r
# Renames the name of field in a feature class. Needed due to the 'Alter Field' tool not being available prior to 10.2. Credit to Josh Werts for
# this elegant solution at joshwerts.com
def rename_fields(table, out_table, new_name_by_old_name):
""" Renames specified fields in input feature class/table
:table: input table (fc, table, layer, etc)
:out_table: output table (fc, table, layer, etc)
:new_name_by_old_name: {'old_field_name':'new_field_name',...}
-> out_table
"""
existing_field_names = [field.name for field in arcpy.ListFields(table)]
field_mappings = arcpy.FieldMappings()
field_mappings.addTable(table)
for old_field_name, new_field_name in new_name_by_old_name.iteritems():
if(old_field_name not in existing_field_names):
message = "Field: {0} not in {1}".format(old_field_name, table)
else:
mapping_index = field_mappings.findFieldMapIndex(old_field_name)
field_map = field_mappings.fieldMappings[mapping_index]
output_field = field_map.outputField
output_field.name = new_field_name
output_field.aliasName = new_field_name
field_map.outputField = output_field
field_mappings.replaceFieldMap(mapping_index, field_map)
# use merge with single input just to use new field_mappings
arcpy.Merge_management(table, out_table, field_mappings)
return out_table
for muni in muniList:
# Removing the file path from the name
splitPath = raw(muni).split('\\')
muniName = splitPath[len(splitPath) - 1]
arcpy.AddMessage(muniName)
cursor = arcpy.SearchCursor(muni)
countyName = ''
for row in cursor:
countyName = row.getValue('COUNTY')
break
del row, cursor
iteration = 1
mergeCount = 1
iterMergeCount = 1
lastJoinMerge = 0
popReqMet = False
while popReqMet == False:
thisIterationsMerges = 0
# Getting the source municipality feature class. Checking to see if the isCand field exists.
if(mergeCount > 1):
arcpy.Rename_management('merge%s'%(mergeCount - 1), 'tempmerge%s'%(mergeCount - 1))
newNameByOldName = {'%s_isCand'%(muniName) : 'isCand', '%s_MUN_CODE'%(muniName) : r'MUN_CODE', '%s_MUN'%(muniName) : 'MUN', '%s_POP2010'%(muniName) : 'POP2010', 'merge%s_MUN_CODE'%(lastJoinMerge) : 'MUN_CODE', 'merge%s_MUN'%(lastJoinMerge) : 'MUN', 'merge%s_POP2010'%(lastJoinMerge) : 'POP2010', 'merge%s_isCand'%(lastJoinMerge) : 'isCand'}
if(iteration <= 2):
arcpy.DeleteField_management('tempmerge%s'%(iterMergeCount - 1), ['%s_isCand'%(muniName), 'mergeKey_OBJECTID', 'mergeKey_MUN', 'mergeKey_BESTMERGE'])
else:
arcpy.DeleteField_management('tempmerge%s'%(mergeCount - 1), ['merge%s_isCand'%(lastJoinMerge), 'mergeKey_OBJECTID', 'mergeKey_MUN', 'mergeKey_BESTMERGE'])
rename_fields('tempmerge%s'%(mergeCount - 1), 'merge%s'%(mergeCount - 1), newNameByOldName)
NJmunis = 'merge%s'%(mergeCount - 1)
arcpy.Delete_management('tempmerge%s'%(mergeCount - 1))
else:
NJmunis = muni
checkCursor = arcpy.SearchCursor(NJmunis)
indPops = set()
for checkRow in checkCursor:
muniPop = checkRow.getValue('POP2010')
indPops.add(muniPop)
del checkRow, checkCursor
popCounter = 0
for pop in indPops:
if(pop < int(popMin)):
popCounter += 1
if(popCounter == 0):
popReqMet = True
if(iteration == 1):
arcpy.AddMessage('%s Merge(s) Needed'%(str(popCounter)))
else:
arcpy.AddMessage('%s More Merge(s) Needed'%(str(popCounter)))
NJmunis_fields = arcpy.ListFields(NJmunis)
fieldName = [f.name for f in NJmunis_fields]
if('isCand' not in fieldName):
arcpy.AddField_management(NJmunis, 'isCand', 'LONG')
code = """def isCand(pop):
if(pop < %s):
return 1
else:
return 0"""%(popMin)
exp = 'isCand(!POP2010!)'
arcpy.CalculateField_management(NJmunis, 'isCand', exp, 'PYTHON_9.3', code)
# Executing the Polygon Neighbors tool and writing a new table to disk.
in_fields = [r'MUN_CODE', r'MUN', r'isCand']
polyAnalysis = arcpy.PolygonNeighbors_analysis(NJmunis, 'poly_analysis', in_fields)
# Adding all needed fields to the Polygon Neighbors table.
if('CANDBORDERS' not in fieldName):
arcpy.AddField_management(polyAnalysis, 'CANDBORDERS', 'LONG')
if('BESTMERGE' not in fieldName):
arcpy.AddField_management(polyAnalysis, 'BESTMERGE', 'TEXT')
# Updating the new fields in the Polygon Neighbors table.
cursor = arcpy.UpdateCursor(polyAnalysis)
row = cursor.next()
arcpy.AddMessage('Finding merge partners...')
while row:
srcMunCode = row.getValue('src_MUN_CODE')
borders = getAllBorders(srcMunCode)
longestBorder = 0
if(len(borders) > 1):
longestBorder = max(borders)
else:
longestBorder = borders[0]
row.setValue('BESTMERGE', getFellowMerger(longestBorder, srcMunCode))
cursor.updateRow(row)
row = cursor.next()
del cursor
del row
# Creating a table that will contain merge candidates and their best partners.
arcpy.AddMessage('Making key...')
if(mergeCount > 1):
arcpy.Delete_management('mergeKey')
path = outputWorkspace
name = 'mergeKey'
mergeKey = arcpy.CreateTable_management(path, name)
arcpy.AddField_management(mergeKey, 'MUN', 'TEXT')
arcpy.AddField_management(mergeKey, 'BESTMERGE', 'TEXT')
# Getting values for the key, eliminates extraneous information from the Polygon
# Neighbors table. Stores the pairs as tuples within a set. This prevents any duplicate
# scenarios.
mergerList = set()
cursor = arcpy.SearchCursor(polyAnalysis)
arcpy.AddMessage('Tuple-ing...')
for row in cursor:
srcMun = row.getValue('src_MUN_CODE')
mergeMun = row.getValue('BESTMERGE')
tup = (srcMun, mergeMun)
mergerList.add(tup)
del cursor
del row
totalMergers = len(mergerList)
# Populates the key table using the set of tuples constructed in the previous lines.
arcpy.AddMessage('Updating Key...')
cursor = arcpy.InsertCursor(mergeKey)
for row in range(len(mergerList)):
newRow = cursor.newRow()
pair = mergerList.pop()
mun = pair[0]
bestMerge = pair[1]
newRow.setValue('MUN', mun)
newRow.setValue('BESTMERGE', bestMerge)
cursor.insertRow(newRow)
del cursor
# Joining the municipalities feature class with the merger key, and exporting it as its
# own feature class.
if(mergeCount == 1):
baseMunis = muni
else:
baseMunis = 'merge%s'%(mergeCount - 1)
layer = arcpy.MakeFeatureLayer_management(baseMunis, 'lyr')
view = arcpy.MakeTableView_management(mergeKey, 'tbl')
arcpy.AddJoin_management(layer, 'MUN_CODE', view, 'MUN')
arcpy.Select_analysis(layer, 'join')
arcpy.AddMessage('Merging Munis...')
cursor = arcpy.SearchCursor('join')
for row in cursor:
if(iteration == 1):
if(mergeCount > 1):
layer = arcpy.MakeFeatureLayer_management('merge%s'%(mergeCount - 1), 'lyr')
else:
layer = arcpy.MakeFeatureLayer_management('join', 'lyr')
else:
if(mergeCount > iterMergeCount):
layer = arcpy.MakeFeatureLayer_management('merge%s'%(mergeCount - 1), 'lyr')
else:
layer = arcpy.MakeFeatureLayer_management('join', 'lyr')
# Get this rows best merge municipality. Select this row, all other rows that share
# the same best merge, and the best merge municipality using a SQL statement.
if(row.getValue('mergeKey_BESTMERGE') != '' and row.getValue('mergeKey_BESTMERGE') != None and row.getValue('mergeKey_BESTMERGE') != ' '):
munCode = row.getValue('mergeKey_BESTMERGE')
sql = '"mergeKey_BESTMERGE" =' + " '" + munCode + "'"
arcpy.SelectLayerByAttribute_management(layer, 'NEW_SELECTION', sql)
sql = '"mergeKey_MUN" =' + " '" + munCode + "'"
arcpy.SelectLayerByAttribute_management(layer, 'ADD_TO_SELECTION', sql)
#Getting the population total for the new municipality.
arcpy.Select_analysis(layer, 'selected')
if(int(arcpy.GetCount_management('selected').getOutput(0)) != 0):
popCursor = arcpy.SearchCursor('selected')
totalPop = 0
if(iteration < 2):
field = '%s_POP2010'%(muniName)
else:
field = 'merge%s_POP2010'%(iterMergeCount - 1)
for popRow in popCursor:
totalPop += popRow.getValue(field)
del popRow, popCursor
# Dissolve the selection to simulate a merging of the selected municipalities.
arcpy.Dissolve_management(layer, 'dissolve')
arcpy.Delete_management('selected')
# With the pre-merger municipalities still selected, run the Delete Features tool
# in order to delete them.
arcpy.DeleteFeatures_management(layer)
# Merge the "chopped up" base layer with the new municipality to fill the void that
# the previous ones left when they were deleted. Add the new population figure to the POP2010 field.
arcpy.Merge_management([layer, 'dissolve'], 'merge%s'%(mergeCount))
nullCursor = arcpy.UpdateCursor('merge%s'%(mergeCount))
if(iteration == 1):
popVal = '%s_POP2010'%(muniName)
mcVal = '%s_MUN_CODE'%(muniName)
else:
popVal = 'merge%s_POP2010'%(iterMergeCount - 1)
mcVal = 'merge%s_MUN_CODE'%(iterMergeCount - 1)
for nullRow in nullCursor:
if(nullRow.isNull(popVal)):
nullRow.setValue(popVal, totalPop)
nullRow.setValue(mcVal, '%s'%(mergeCount))
nullCursor.updateRow(nullRow)
del nullRow, nullCursor
#Clean up the temp files.
if(mergeCount > 1):
arcpy.Delete_management('merge%s'%(mergeCount - 1))
arcpy.Delete_management('dissolve')
mergeCount += 1
thisIterationsMerges += 1
else:
arcpy.Delete_management('selected')
arcpy.AddMessage('%s Merges Complete'%(str(mergeCount - 1)))
iteration += 1
lastJoinMerge = iterMergeCount - 1
iterMergeCount += thisIterationsMerges
del cursor, row
# if(len(muniName) > 8):
# muniName = muniName[:8]
newFileName = '%s_merged'%(muniName) + '_' + str(popMin)
if(mergeCount >= 2):
arcpy.Rename_management('merge%s'%(mergeCount - 1), newFileName)
# Populating the County field
fields = [f.name for f in arcpy.ListFields(newFileName)]
fieldName = ''
for field in fields:
if('COUNTY' in field):
fieldName = field
break
cursor = arcpy.UpdateCursor(newFileName)
for row in cursor:
row.setValue(fieldName, countyName)
cursor.updateRow(row)
del row, cursor
# Cleaning up workspace
arcpy.Delete_management('join')
arcpy.Delete_management('mergeKey')
arcpy.Delete_management('poly_analysis')
| 35.188889 | 343 | 0.693874 |
e9f64b34d6210d0495fe806b05ceb2f47e93d560
| 16,201 |
py
|
Python
|
lib/streamlit/elements/slider.py
|
akrolsmir/streaml
|
968e7f5f720080c22e76912aff0861cf917bfeb4
|
[
"Apache-2.0"
] | null | null | null |
lib/streamlit/elements/slider.py
|
akrolsmir/streaml
|
968e7f5f720080c22e76912aff0861cf917bfeb4
|
[
"Apache-2.0"
] | 221 |
2020-10-16T08:15:02.000Z
|
2022-03-29T10:25:00.000Z
|
lib/streamlit/elements/slider.py
|
akrolsmir/streamlit
|
968e7f5f720080c22e76912aff0861cf917bfeb4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import date, time, datetime, timedelta, timezone
from typing import cast
import streamlit
from streamlit.errors import StreamlitAPIException
from streamlit.js_number import JSNumber
from streamlit.js_number import JSNumberBoundsException
from streamlit.proto.Slider_pb2 import Slider as SliderProto
from .utils import register_widget
class SliderMixin:
def slider(
self,
label,
min_value=None,
max_value=None,
value=None,
step=None,
format=None,
key=None,
):
"""Display a slider widget.
This supports int, float, date, time, and datetime types.
This also allows you to render a range slider by passing a two-element
tuple or list as the `value`.
The difference between `st.slider` and `st.select_slider` is that
`slider` only accepts numerical or date/time data and takes a range as
input, while `select_slider` accepts any datatype and takes an iterable
set of options.
Parameters
----------
label : str or None
A short label explaining to the user what this slider is for.
min_value : a supported type or None
The minimum permitted value.
Defaults to 0 if the value is an int, 0.0 if a float,
value - timedelta(days=14) if a date/datetime, time.min if a time
max_value : a supported type or None
The maximum permitted value.
Defaults to 100 if the value is an int, 1.0 if a float,
value + timedelta(days=14) if a date/datetime, time.max if a time
value : a supported type or a tuple/list of supported types or None
The value of the slider when it first renders. If a tuple/list
of two values is passed here, then a range slider with those lower
and upper bounds is rendered. For example, if set to `(1, 10)` the
slider will have a selectable range between 1 and 10.
Defaults to min_value.
step : int/float/timedelta or None
The stepping interval.
Defaults to 1 if the value is an int, 0.01 if a float,
timedelta(days=1) if a date/datetime, timedelta(minutes=15) if a time
(or if max_value - min_value < 1 day)
format : str or None
A printf-style format string controlling how the interface should
display numbers. This does not impact the return value.
Formatter for int/float supports: %d %e %f %g %i
Formatter for date/time/datetime uses Moment.js notation:
https://momentjs.com/docs/#/displaying/format/
key : str
An optional string to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. Multiple widgets of the same type may
not share the same key.
Returns
-------
int/float/date/time/datetime or tuple of int/float/date/time/datetime
The current value of the slider widget. The return type will match
the data type of the value parameter.
Examples
--------
>>> age = st.slider('How old are you?', 0, 130, 25)
>>> st.write("I'm ", age, 'years old')
And here's an example of a range slider:
>>> values = st.slider(
... 'Select a range of values',
... 0.0, 100.0, (25.0, 75.0))
>>> st.write('Values:', values)
This is a range time slider:
>>> from datetime import time
>>> appointment = st.slider(
... "Schedule your appointment:",
... value=(time(11, 30), time(12, 45)))
>>> st.write("You're scheduled for:", appointment)
Finally, a datetime slider:
>>> from datetime import datetime
>>> start_time = st.slider(
... "When do you start?",
... value=datetime(2020, 1, 1, 9, 30),
... format="MM/DD/YY - hh:mm")
>>> st.write("Start time:", start_time)
"""
# Set value default.
if value is None:
value = min_value if min_value is not None else 0
SUPPORTED_TYPES = {
int: SliderProto.INT,
float: SliderProto.FLOAT,
datetime: SliderProto.DATETIME,
date: SliderProto.DATE,
time: SliderProto.TIME,
}
TIMELIKE_TYPES = (SliderProto.DATETIME, SliderProto.TIME, SliderProto.DATE)
# Ensure that the value is either a single value or a range of values.
single_value = isinstance(value, tuple(SUPPORTED_TYPES.keys()))
range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2)
if not single_value and not range_value:
raise StreamlitAPIException(
"Slider value should either be an int/float/datetime or a list/tuple of "
"0 to 2 ints/floats/datetimes"
)
# Simplify future logic by always making value a list
if single_value:
value = [value]
def all_same_type(items):
return len(set(map(type, items))) < 2
if not all_same_type(value):
raise StreamlitAPIException(
"Slider tuple/list components must be of the same type.\n"
f"But were: {list(map(type, value))}"
)
if len(value) == 0:
data_type = SliderProto.INT
else:
data_type = SUPPORTED_TYPES[type(value[0])]
datetime_min = time.min
datetime_max = time.max
if data_type == SliderProto.TIME:
datetime_min = time.min.replace(tzinfo=value[0].tzinfo)
datetime_max = time.max.replace(tzinfo=value[0].tzinfo)
if data_type in (SliderProto.DATETIME, SliderProto.DATE):
datetime_min = value[0] - timedelta(days=14)
datetime_max = value[0] + timedelta(days=14)
DEFAULTS = {
SliderProto.INT: {
"min_value": 0,
"max_value": 100,
"step": 1,
"format": "%d",
},
SliderProto.FLOAT: {
"min_value": 0.0,
"max_value": 1.0,
"step": 0.01,
"format": "%0.2f",
},
SliderProto.DATETIME: {
"min_value": datetime_min,
"max_value": datetime_max,
"step": timedelta(days=1),
"format": "YYYY-MM-DD",
},
SliderProto.DATE: {
"min_value": datetime_min,
"max_value": datetime_max,
"step": timedelta(days=1),
"format": "YYYY-MM-DD",
},
SliderProto.TIME: {
"min_value": datetime_min,
"max_value": datetime_max,
"step": timedelta(minutes=15),
"format": "HH:mm",
},
}
if min_value is None:
min_value = DEFAULTS[data_type]["min_value"]
if max_value is None:
max_value = DEFAULTS[data_type]["max_value"]
if step is None:
step = DEFAULTS[data_type]["step"]
if (
data_type
in (
SliderProto.DATETIME,
SliderProto.DATE,
)
and max_value - min_value < timedelta(days=1)
):
step = timedelta(minutes=15)
if format is None:
format = DEFAULTS[data_type]["format"]
if step == 0:
raise StreamlitAPIException(
"Slider components cannot be passed a `step` of 0."
)
# Ensure that all arguments are of the same type.
args = [min_value, max_value, step]
int_args = all(map(lambda a: isinstance(a, int), args))
float_args = all(map(lambda a: isinstance(a, float), args))
# When min and max_value are the same timelike, step should be a timedelta
timelike_args = (
data_type in TIMELIKE_TYPES
and isinstance(step, timedelta)
and type(min_value) == type(max_value)
)
if not int_args and not float_args and not timelike_args:
raise StreamlitAPIException(
"Slider value arguments must be of matching types."
"\n`min_value` has %(min_type)s type."
"\n`max_value` has %(max_type)s type."
"\n`step` has %(step)s type."
% {
"min_type": type(min_value).__name__,
"max_type": type(max_value).__name__,
"step": type(step).__name__,
}
)
# Ensure that the value matches arguments' types.
all_ints = data_type == SliderProto.INT and int_args
all_floats = data_type == SliderProto.FLOAT and float_args
all_timelikes = data_type in TIMELIKE_TYPES and timelike_args
if not all_ints and not all_floats and not all_timelikes:
raise StreamlitAPIException(
"Both value and arguments must be of the same type."
"\n`value` has %(value_type)s type."
"\n`min_value` has %(min_type)s type."
"\n`max_value` has %(max_type)s type."
% {
"value_type": type(value).__name__,
"min_type": type(min_value).__name__,
"max_type": type(max_value).__name__,
}
)
# Ensure that min <= value(s) <= max, adjusting the bounds as necessary.
min_value = min(min_value, max_value)
max_value = max(min_value, max_value)
if len(value) == 1:
min_value = min(value[0], min_value)
max_value = max(value[0], max_value)
elif len(value) == 2:
start, end = value
if start > end:
# Swap start and end, since they seem reversed
start, end = end, start
value = start, end
min_value = min(start, min_value)
max_value = max(end, max_value)
else:
# Empty list, so let's just use the outer bounds
value = [min_value, max_value]
# Bounds checks. JSNumber produces human-readable exceptions that
# we simply re-package as StreamlitAPIExceptions.
# (We check `min_value` and `max_value` here; `value` and `step` are
# already known to be in the [min_value, max_value] range.)
try:
if all_ints:
JSNumber.validate_int_bounds(min_value, "`min_value`")
JSNumber.validate_int_bounds(max_value, "`max_value`")
elif all_floats:
JSNumber.validate_float_bounds(min_value, "`min_value`")
JSNumber.validate_float_bounds(max_value, "`max_value`")
elif all_timelikes:
# No validation yet. TODO: check between 0001-01-01 to 9999-12-31
pass
except JSNumberBoundsException as e:
raise StreamlitAPIException(str(e))
# Convert dates or times into datetimes
if data_type == SliderProto.TIME:
def _time_to_datetime(time):
# Note, here we pick an arbitrary date well after Unix epoch.
# This prevents pre-epoch timezone issues (https://bugs.python.org/issue36759)
# We're dropping the date from datetime laters, anyways.
return datetime.combine(date(2000, 1, 1), time)
value = list(map(_time_to_datetime, value))
min_value = _time_to_datetime(min_value)
max_value = _time_to_datetime(max_value)
if data_type == SliderProto.DATE:
def _date_to_datetime(date):
return datetime.combine(date, time())
value = list(map(_date_to_datetime, value))
min_value = _date_to_datetime(min_value)
max_value = _date_to_datetime(max_value)
# Now, convert to microseconds (so we can serialize datetime to a long)
if data_type in TIMELIKE_TYPES:
SECONDS_TO_MICROS = 1000 * 1000
DAYS_TO_MICROS = 24 * 60 * 60 * SECONDS_TO_MICROS
def _delta_to_micros(delta):
return (
delta.microseconds
+ delta.seconds * SECONDS_TO_MICROS
+ delta.days * DAYS_TO_MICROS
)
UTC_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
def _datetime_to_micros(dt):
# If dt is naive, Python converts from local time
utc_dt = dt.astimezone(timezone.utc)
return _delta_to_micros(utc_dt - UTC_EPOCH)
# Restore times/datetimes to original timezone (dates are always naive)
orig_tz = (
value[0].tzinfo
if data_type in (SliderProto.TIME, SliderProto.DATETIME)
else None
)
def _micros_to_datetime(micros):
utc_dt = UTC_EPOCH + timedelta(microseconds=micros)
# Convert from utc back to original time (local time if naive)
return utc_dt.astimezone(orig_tz).replace(tzinfo=orig_tz)
value = list(map(_datetime_to_micros, value))
min_value = _datetime_to_micros(min_value)
max_value = _datetime_to_micros(max_value)
step = _delta_to_micros(step)
# It would be great if we could guess the number of decimal places from
# the `step` argument, but this would only be meaningful if step were a
# decimal. As a possible improvement we could make this function accept
# decimals and/or use some heuristics for floats.
slider_proto = SliderProto()
slider_proto.label = label
slider_proto.format = format
slider_proto.default[:] = value
slider_proto.min = min_value
slider_proto.max = max_value
slider_proto.step = step
slider_proto.data_type = data_type
slider_proto.options[:] = []
ui_value = register_widget("slider", slider_proto, user_key=key)
if ui_value:
current_value = getattr(ui_value, "data")
else:
# Widget has not been used; fallback to the original value,
current_value = value
# The widget always returns a float array, so fix the return type if necessary
if data_type == SliderProto.INT:
current_value = list(map(int, current_value))
if data_type == SliderProto.DATETIME:
current_value = [_micros_to_datetime(int(v)) for v in current_value]
if data_type == SliderProto.DATE:
current_value = [_micros_to_datetime(int(v)).date() for v in current_value]
if data_type == SliderProto.TIME:
current_value = [
_micros_to_datetime(int(v)).time().replace(tzinfo=orig_tz)
for v in current_value
]
# If the original value was a list/tuple, so will be the output (and vice versa)
return_value = current_value[0] if single_value else tuple(current_value)
return self.dg._enqueue("slider", slider_proto, return_value)
@property
def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
"""Get our DeltaGenerator."""
return cast("streamlit.delta_generator.DeltaGenerator", self)
| 40.300995 | 94 | 0.580088 |
0050b1e349a313169627f4d688b2f73fee927aef
| 2,122 |
py
|
Python
|
neutron_lbaas/tests/unit/common/cert_manager/test_cert_manager.py
|
kayrus/neutron-lbaas
|
d582fc52c725584e83b01e33f617f11d49a165a8
|
[
"Apache-2.0"
] | 1 |
2017-11-13T13:24:12.000Z
|
2017-11-13T13:24:12.000Z
|
neutron_lbaas/tests/unit/common/cert_manager/test_cert_manager.py
|
kayrus/neutron-lbaas
|
d582fc52c725584e83b01e33f617f11d49a165a8
|
[
"Apache-2.0"
] | 2 |
2018-10-30T11:37:42.000Z
|
2020-09-01T12:08:36.000Z
|
neutron_lbaas/tests/unit/common/cert_manager/test_cert_manager.py
|
kayrus/neutron-lbaas
|
d582fc52c725584e83b01e33f617f11d49a165a8
|
[
"Apache-2.0"
] | 5 |
2018-09-21T07:56:14.000Z
|
2020-10-13T09:52:15.000Z
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron_lbaas.common import cert_manager
from neutron_lbaas.common.cert_manager import barbican_cert_manager as bcm
from neutron_lbaas.common.cert_manager import cert_manager as cmi
from neutron_lbaas.common.cert_manager import local_cert_manager as lcm
from neutron_lbaas.tests import base
class TestCertManager(base.BaseTestCase):
def setUp(self):
cert_manager._CERT_MANAGER_PLUGIN = None
super(TestCertManager, self).setUp()
def test_get_service_url(self):
# Format: <servicename>://<region>/<resource>/<object_id>
cfg.CONF.set_override('service_name',
'lbaas',
'service_auth')
cfg.CONF.set_override('region',
'RegionOne',
'service_auth')
self.assertEqual(
'lbaas://RegionOne/loadbalancer/LB-ID',
cmi.CertManager.get_service_url('LB-ID'))
def test_barbican_cert_manager(self):
cfg.CONF.set_override(
'cert_manager_type',
'barbican',
group='certificates')
self.assertEqual(cert_manager.get_backend().CertManager,
bcm.CertManager)
def test_local_cert_manager(self):
cfg.CONF.set_override(
'cert_manager_type',
'local',
group='certificates')
self.assertEqual(cert_manager.get_backend().CertManager,
lcm.CertManager)
| 37.22807 | 75 | 0.662582 |
cdd8f1cd6370177b70042d02f6a271376003e5cf
| 476 |
py
|
Python
|
data/scripts/templates/object/tangible/medicine/crafted/shared_medpack_cure_disease_c.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20 |
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/medicine/crafted/shared_medpack_cure_disease_c.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/medicine/crafted/shared_medpack_cure_disease_c.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20 |
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/medicine/crafted/shared_medpack_cure_disease_c.iff"
result.attribute_template_id = 7
result.stfName("medicine_name","medic_cure_disease_c")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 28 | 87 | 0.743697 |
931dc61adcf2f07b5c05db01f88a0774d368b115
| 2,741 |
py
|
Python
|
utilities.py
|
janosg/derivatives
|
ee4640baa273093a04ef6bd7a482ba485b753bd2
|
[
"MIT"
] | null | null | null |
utilities.py
|
janosg/derivatives
|
ee4640baa273093a04ef6bd7a482ba485b753bd2
|
[
"MIT"
] | null | null | null |
utilities.py
|
janosg/derivatives
|
ee4640baa273093a04ef6bd7a482ba485b753bd2
|
[
"MIT"
] | null | null | null |
import numpy as np
def chol_params_to_lower_triangular_matrix(params):
dim = number_of_triangular_elements_to_dimension(len(params))
mat = np.zeros((dim, dim))
mat[np.tril_indices(dim)] = params
return mat
def cov_params_to_matrix(cov_params):
"""Build covariance matrix from 1d array with its lower triangular elements.
Args:
cov_params (np.array): 1d array with the lower triangular elements of a
covariance matrix (in C-order)
Returns:
cov (np.array): a covariance matrix
"""
lower = chol_params_to_lower_triangular_matrix(cov_params)
cov = lower + np.tril(lower, k=-1).T
return cov
def cov_matrix_to_params(cov):
return cov[np.tril_indices(len(cov))]
def sdcorr_params_to_sds_and_corr(sdcorr_params):
dim = number_of_triangular_elements_to_dimension(len(sdcorr_params))
sds = np.array(sdcorr_params[:dim])
corr = np.eye(dim)
corr[np.tril_indices(dim, k=-1)] = sdcorr_params[dim:]
corr += np.tril(corr, k=-1).T
return sds, corr
def sds_and_corr_to_cov(sds, corr):
diag = np.diag(sds)
return diag @ corr @ diag
def cov_to_sds_and_corr(cov):
sds = np.sqrt(np.diagonal(cov))
diag = np.diag(1 / sds)
corr = diag @ cov @ diag
return sds, corr
def sdcorr_params_to_matrix(sdcorr_params):
"""Build covariance matrix out of variances and correlations.
Args:
sdcorr_params (np.array): 1d array with parameters. The dimensions of the
covariance matrix are inferred automatically. The first dim parameters
are assumed to be the variances. The remainder are the lower triangular
elements (excluding the diagonal) of a correlation matrix.
Returns:
cov (np.array): a covariance matrix
"""
return sds_and_corr_to_cov(*sdcorr_params_to_sds_and_corr(sdcorr_params))
def cov_matrix_to_sdcorr_params(cov):
dim = len(cov)
sds, corr = cov_to_sds_and_corr(cov)
correlations = corr[np.tril_indices(dim, k=-1)]
return np.hstack([sds, correlations])
def number_of_triangular_elements_to_dimension(num):
"""Calculate the dimension of a square matrix from number of triangular elements.
Args:
num (int): The number of upper or lower triangular elements in the matrix.
Examples:
>>> number_of_triangular_elements_to_dimension(6)
3
>>> number_of_triangular_elements_to_dimension(10)
4
"""
return int(np.sqrt(8 * num + 1) / 2 - 0.5)
def dimension_to_number_of_triangular_elements(dim):
"""Calculate number of triangular elements from the dimension of a square matrix.
Args:
dim (int): Dimension of a square matrix.
"""
return int(dim * (dim + 1) / 2)
| 30.797753 | 85 | 0.692813 |
4389c9c54fe252e12cb1d0dce083e334ab54fc75
| 12,397 |
py
|
Python
|
twitter_ads_v2/resources.py
|
smaeda-ks/twitter-python-ads-sdk-v2
|
2e9578c6f2e3ff431405677cf00d98b5f617d88c
|
[
"MIT"
] | null | null | null |
twitter_ads_v2/resources.py
|
smaeda-ks/twitter-python-ads-sdk-v2
|
2e9578c6f2e3ff431405677cf00d98b5f617d88c
|
[
"MIT"
] | null | null | null |
twitter_ads_v2/resources.py
|
smaeda-ks/twitter-python-ads-sdk-v2
|
2e9578c6f2e3ff431405677cf00d98b5f617d88c
|
[
"MIT"
] | null | null | null |
"""List of the supported API endpoints' resource path."""
RESOURCE_TABLE = {
'accounts': {
'endpoint_types': ['all', 'load'],
'RESOURCE': 'accounts/{id}',
'RESOURCE_COLLECTION': 'accounts'
},
'authenticated_user_access': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'accounts/{account_id}/authenticated_user_access'
},
'bidding_rules': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'bidding_rules'
},
'campaigns': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete', 'batch'],
'BATCH': 'batch/accounts/{account_id}/campaigns',
'RESOURCE': 'accounts/{account_id}/campaigns/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/campaigns'
},
'content_categories': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'content_categories'
},
'funding_instruments': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/funding_instruments/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/funding_instruments'
},
'iab_categories': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'iab_categories'
},
'line_items': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete', 'batch'],
'BATCH': 'batch/accounts/{account_id}/line_items',
'RESOURCE': 'accounts/{account_id}/line_items/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/line_items'
},
'line_item_placements': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'line_items/placements'
},
'line_item_apps': {
'endpoint_types': ['all', 'load', 'create', 'delete'],
'RESOURCE': 'accounts/{account_id}/line_item_apps/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/line_item_apps'
},
'media_creatives': {
'endpoint_types': ['all', 'load', 'create', 'delete'],
'RESOURCE': 'accounts/{account_id}/media_creatives/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/media_creatives'
},
'promoted_accounts': {
'endpoint_types': ['all', 'load', 'create', 'delete'],
'RESOURCE': 'accounts/{account_id}/promoted_accounts/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/promoted_accounts'
},
'promoted_tweets': {
'endpoint_types': ['all', 'load', 'create', 'delete'],
'RESOURCE': 'accounts/{account_id}/promoted_tweets/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/promoted_tweets'
},
'promotable_users': {
'endpoint_types': ['all', 'load'],
'RESOURCE': 'accounts/{account_id}/promotable_users/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/promotable_users'
},
'scheduled_promoted_tweets': {
'endpoint_types': ['all', 'load', 'create', 'delete'],
'RESOURCE': 'accounts/{account_id}/scheduled_promoted_tweets/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/scheduled_promoted_tweets'
},
'targeting_criteria': {
'endpoint_types': ['all', 'load', 'create', 'delete'],
'RESOURCE': 'accounts/{account_id}/targeting_criteria/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/targeting_criteria'
},
'targeting_suggestions': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'accounts/{account_id}/targeting_suggestions'
},
'targeting_criteria_app_store_categories': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/app_store_categories'
},
'targeting_criteria_behavior_taxonomies': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/behavior_taxonomies'
},
'targeting_criteria_behaviors': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/behaviors'
},
'targeting_criteria_conversations': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/conversations'
},
'targeting_criteria_devices': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/devices'
},
'targeting_criteria_events': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/events'
},
'targeting_criteria_interests': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/interests'
},
'targeting_criteria_languages': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/languages'
},
'targeting_criteria_locations': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/locations'
},
'targeting_criteria_network_operators': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/network_operators'
},
'targeting_criteria_platform_versions': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/platform_versions'
},
'targeting_criteria_platforms': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/platforms'
},
'targeting_criteria_tv_markets': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/tv_markets'
},
'targeting_criteria_tv_shows': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'targeting_criteria/tv_shows'
},
'reach_estimate': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'accounts/{account_id}/reach_estimate'
},
'tax_settings': {
'endpoint_types': ['all', 'update'],
'RESOURCE': 'accounts/{account_id}/tax_settings',
'RESOURCE_COLLECTION': 'accounts/{account_id}/tax_settings'
},
'user_settings': {
'endpoint_types': ['load', 'update'],
'RESOURCE': 'accounts/{account_id}/user_settings/{id}'
},
'account_media': {
'endpoint_types': ['all', 'load', 'delete'],
'RESOURCE': 'accounts/{account_id}/account_media/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/account_media'
},
'scheduled_tweets': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/scheduled_tweets/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/scheduled_tweets'
},
'draft_tweets': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/draft_tweets/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/draft_tweets'
},
'draft_tweets_preview': {
'endpoint_types': ['create'],
'RESOURCE': 'accounts/{account_id}/draft_tweets/preview/{id}',
},
'tweets': {
'endpoint_types': ['all', 'create'],
'RESOURCE_GET': 'accounts/{account_id}/tweets',
'RESOURCE_POST': 'accounts/{account_id}/tweet',
},
'tweet_previews': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'accounts/{account_id}/tweet_previews'
},
'image_app_download_cards': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/cards/image_app_download/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/image_app_download'
},
'image_conversation_cards': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/cards/image_conversation/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/image_conversation'
},
'video_app_download_cards': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/cards/video_app_download/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/video_app_download'
},
'video_conversation_cards': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/cards/video_conversation/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/video_conversation'
},
'video_website_cards': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/cards/video_website/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/video_website'
},
'website_cards': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/cards/website/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/website'
},
'image_direct_message_cards': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/cards/image_direct_message/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/image_direct_message'
},
'video_direct_message_cards': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/cards/video_direct_message/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/video_direct_message'
},
'poll_cards': {
'endpoint_types': ['all', 'load', 'create', 'delete'],
'RESOURCE': 'accounts/{account_id}/cards/poll/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/poll'
},
'cards_fetch': {
'endpoint_types': ['all', 'load'],
'RESOURCE': 'accounts/{account_id}/cards/all/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/cards/all'
},
'media_library': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/media_library/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/media_library'
},
'preroll_call_to_actions': {
'endpoint_types': ['all', 'load', 'create', 'update', 'delete'],
'RESOURCE': 'accounts/{account_id}/preroll_call_to_actions/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/preroll_call_to_actions'
},
'active_entities': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'stats/accounts/{account_id}/active_entities'
},
'analytics_sync_stats': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'stats/accounts/{account_id}'
},
'analytics_async_jobs': {
'endpoint_types': ['all', 'delete'],
'RESOURCE': 'stats/jobs/accounts/{account_id}/{id}',
'RESOURCE_COLLECTION': 'stats/jobs/accounts/{account_id}'
},
'analytics_async_create_job': {
'endpoint_types': ['create'],
'RESOURCE_COLLECTION': 'stats/jobs/accounts/{account_id}'
},
'reach_frequency_campaigns': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'stats/accounts/{account_id}/reach/campaigns'
},
'reach_frequency_funding_instruments': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'stats/accounts/{account_id}/reach/funding_instruments'
},
'tailored_audiences': {
'endpoint_types': ['all', 'load', 'create', 'delete'],
'RESOURCE': 'accounts/{account_id}/tailored_audiences/{id}',
'RESOURCE_COLLECTION': 'accounts/{account_id}/tailored_audiences'
},
'tailored_audiences_users': {
'endpoint_types': ['create'],
'RESOURCE': 'accounts/{account_id}/tailored_audiences/{id}/users'
},
'tailored_audience_permissions': {
'endpoint_types': ['all', 'create', 'delete'],
'RESOURCE': ('accounts/{account_id}/tailored_audiences/{tailored_audience_id}/'
'permissions/{tailored_audience_permission_id}'),
'RESOURCE_COLLECTION': ('accounts/{account_id}/tailored_audiences/{tailored_audience_id}/'
'permissions')
},
'insights': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'insights/accounts/{account_id}'
},
'insights_available_audiences': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'insights/accounts/{account_id}/available_audiences'
},
'keyword_insights': {
'endpoint_types': ['all'],
'RESOURCE_COLLECTION': 'insights/keywords/search'
},
}
| 41.740741 | 98 | 0.62378 |
4abd5370bdd6b4a0f0c60c0e9ca356e45a325105
| 4,936 |
py
|
Python
|
jarvis.py
|
uwmadison-chm/factuator
|
649b1b4e42f97b8e833f3576e7bf2cf64c38a126
|
[
"MIT"
] | null | null | null |
jarvis.py
|
uwmadison-chm/factuator
|
649b1b4e42f97b8e833f3576e7bf2cf64c38a126
|
[
"MIT"
] | 8 |
2019-04-24T17:58:30.000Z
|
2021-11-29T20:47:31.000Z
|
jarvis.py
|
uwmadison-chm/factuator
|
649b1b4e42f97b8e833f3576e7bf2cf64c38a126
|
[
"MIT"
] | 1 |
2019-04-24T16:30:59.000Z
|
2019-04-24T16:30:59.000Z
|
import psycopg2
import psycopg2.extras
# NOTE: You will need to `kinit` a kerberos token to make this db connection work
class Jarvis:
def __init__(self):
self.db = psycopg2.connect("postgresql://togarashi.keck.waisman.wisc.edu/bi?krbsrvname=postgres")
def select(self, x):
cursor = self.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(x)
return cursor.fetchall()
def select_list(self, x):
cursor = self.db.cursor()
cursor.execute(x)
return cursor.fetchall()
def columns(self, table):
return self.select_list("select COLUMN_NAME from information_schema.COLUMNS where TABLE_NAME = '%s'" % table)
def tables(self):
return self.select("select relname from pg_class where relkind='r' and relname !~ '^(pg_|sql_)';")
def study(self, study_id):
return self.select("SELECT folder, name, current_subjects, total_subjects FROM studies WHERE id = %s" % study_id)
def quotas(self, study_id):
return self.select("SELECT * FROM quotas where startdate < current_date AND enddate > current_date AND study_id = %s" % study_id)
def total_active_quota(self, study_id):
return "{}gb".format(sum([quota['quotagb'] for quota in self.quotas(study_id)]))
def protocols(self, study_id):
return self.select("SELECT protocol, expiration FROM irb_protocols p JOIN irb_studies s ON p.id = s.irb_protocol_id WHERE s.study_id = %s" % study_id)
def irb_expirations(self, study_id):
irbs = self.protocols(study_id)
if len(irbs) == 1:
return str(irbs[0][1])
else:
return ", ".join(["{} expires {}".format(p[0], p[1]) for p in irbs])
def people(self, study_id):
return self.select("""SELECT p.id, p.first, p.last, ip.pi, ip.admin, ip.irb_alerts FROM irb_studies s
JOIN irb_people ip ON ip.irb_protocol_id = s.irb_protocol_id
JOIN people p on p.id = ip.person_id
WHERE s.study_id = %s
ORDER BY ip.pi DESC, ip.admin DESC, ip.irb_alerts DESC, ip.created_at ASC""" % study_id)
def personnel(self, study_id):
# We want a table of people and whether they are a PI, admin, and/or irb_alert_thinger
# And now we also want groups
group_info = self.select("""SELECT concat(p.first, ' ', p.last), ag.name, ag.id FROM irb_studies s
JOIN irb_protocols irb ON s.irb_protocol_id = irb.id
JOIN irb_protocol_acgroups ipa ON irb.id = ipa.irb_protocol_id
JOIN account_groups ag on ipa.acgroup_id = ag.id
JOIN account_group_members gm on gm.group_id = ag.id
JOIN account_groups ag2 on ag2.id = gm.member_id
JOIN people p on ag2.person_id = p.id
WHERE NOT ag2.isgroup AND p.first IS NOT NULL AND p.first != '' AND study_id = %s
ORDER BY ag.id ASC, p.last ASC, p.first ASC""" % study_id)
group_map = {}
group_ids = {}
all_groups = []
people_map = {}
all_people = []
for p in self.people(study_id):
name = "{first} {last}".format(**p)
if not name in all_people:
all_people.append(name)
people_map[name] = p
for x in group_info:
name = x[0]
group = x[1]
group_ids[group] = x[2]
if not name in all_people:
all_people.append(name)
if not group in all_groups:
all_groups.append(group)
if name in group_map:
group_map[name].append(group)
else:
group_map[name] = [group]
table = """{| class="wikitable" style="text-align:left;"\n!Name\n!PI\n!Admin"""
for g in all_groups:
table += "\n![https://brainimaging.waisman.wisc.edu/members/jarvis/account_groups/{} {}]".format(group_ids[g], g)
for name in all_people:
table += "\n|-\n"
table += "\n|"
if name in people_map:
p = people_map[name]
table += "'''" + name + "'''"
table += "\n|"
if p['pi']:
table += "✓"
table += "\n|"
if p['admin']:
table += "✓"
else:
table += name
table += "\n|"
table += "\n|"
for g in all_groups:
table += "\n|"
if name in group_map:
if g in group_map[name]:
table += "✓"
table += "\n|}"
title = "=== JARVIS Personnel ==="
link = """This information is auto-populated from [https://brainimaging.waisman.wisc.edu/members/jarvis/studies/{} JARVIS].""".format(study_id)
return title + "\n\n" + link + "\n\n" + table + "\n\n"
| 37.393939 | 158 | 0.561386 |
00dc2b87c78cf5db93ac8816cd06470ce14adf46
| 11,391 |
py
|
Python
|
app/recipe/tests/test_recipe_api.py
|
CrownKira/recipe-app-api
|
b948a66f09a5aa420483f6df806ea57315f966da
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
CrownKira/recipe-app-api
|
b948a66f09a5aa420483f6df806ea57315f966da
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_recipe_api.py
|
CrownKira/recipe-app-api
|
b948a66f09a5aa420483f6df806ea57315f966da
|
[
"MIT"
] | null | null | null |
# comes with python, allows you to generate temporary file
import tempfile
# create file name, check if file exists, etc
import os
# PIL: pillow requirement
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse("recipe:recipe-list")
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse("recipe:recipe-upload-image", args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail URL"""
# recipe-detail: name of the endpoint defaultrouter creates for
# our viewset
# specify args with reverse function
# args is a list, since there might be multiple
# args for a single url
return reverse("recipe:recipe-detail", args=[recipe_id])
def sample_tag(user, name="Main course"):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name="Cinnamon"):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {"title": "Sample recipe", "time_minutes": 10, "price": 5.00}
# use update() method of dictionary
defaults.update(params)
# asterisk converts dict to param-arg pairs
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""Test unauthorized recipe API access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test that authentication is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
"test@londonappdev.com", "testpass"
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by("-id")
# use serializer to convert from queryset to raw data
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
# what he sees == what's in the db
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
user2 = get_user_model().objects.create_user(
"other@londonappdev.com", "password123"
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
# response.data must be == serializer.data
# serializer.data is the data exposed to the client
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
"title": "Chocolate cheesecake",
"time_minutes": 30,
"price": 5.00,
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data["id"])
for key in payload.keys():
# getattr(recipe, key): get the value based on the key
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name="Vegan")
tag2 = sample_tag(user=self.user, name="Dessert")
payload = {
"title": "Avocado lime cheesecake",
"tags": [tag1.id, tag2.id],
"time_minutes": 60,
"price": 20.00,
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data["id"])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name="Prawns")
ingredient2 = sample_ingredient(user=self.user, name="Ginger")
payload = {
"title": "Thai prawn red curry",
"ingredients": [ingredient1.id, ingredient2.id],
"time_minutes": 20,
"price": 7.00,
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data["id"])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name="Curry")
payload = {"title": "Chicken tikka", "tags": [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
# refresh after update the db
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload["title"])
tags = recipe.tags.all()
# len & count() both ok
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
# if exclude the tags,
# then the final updated recipe will not have tags
payload = {
"title": "Spaghetti carbonara",
"time_minutes": 25,
"price": 5.00,
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload["title"])
self.assertEqual(recipe.time_minutes, payload["time_minutes"])
self.assertEqual(recipe.price, payload["price"])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
# add a new test class
# there is some common functionalities in the class
# that we need to repeat
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
"user@londonappdev.com", "testpass"
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
# will delete the image that exists in the recipe
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an email to recipe"""
url = image_upload_url(self.recipe.id)
# create a temp file in the system that we can write to
# file removed after exist the block
with tempfile.NamedTemporaryFile(suffix=".jpg") as ntf:
# creates a black square
img = Image.new("RGB", (10, 10))
# save to ntf temp file
# save as JPEG (format)
img.save(ntf, format="JPEG")
# the way that python reads file
# after save, seeking will be done to the end of the file
# when try to access it, it will be blank cos alr read up
# til the end of the file
# seek(0) sets to the beginning of the file
ntf.seek(0)
# pass in post data as dict
# multipart: form that consists of json object
res = self.client.post(url, {"image": ntf}, format="multipart")
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn("image", res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
# http://localhost:8000/api/recipe/recipes/1/upload-image
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {"image": "notimage"}, format="multipart")
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title="Thai vegatable curry")
recipe2 = sample_recipe(user=self.user, title="Aubergine with tahini")
tag1 = sample_tag(user=self.user, name="Vegan")
tag2 = sample_tag(user=self.user, name="Vegetarian")
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title="Fish and chips")
res = self.client.get(RECIPES_URL, {"tags": f"{tag1.id},{tag2.id}"})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title="Posh beans on toast")
recipe2 = sample_recipe(user=self.user, title="Chicken cacciatore")
ingredient1 = sample_ingredient(user=self.user, name="Feta cheese")
ingredient2 = sample_ingredient(user=self.user, name="Chicken")
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title="Steak and mushrooms")
res = self.client.get(
RECIPES_URL, {"ingredients": f"{ingredient1.id},{ingredient2.id}"}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 36.509615 | 78 | 0.650865 |
4cb944cb8c691d66b79e83439bb3b0b9628a5258
| 7,424 |
py
|
Python
|
bin/df_crossval_main.py
|
mrunibe/MIALab
|
82d3f0f4344620fd22384108b022730cde9c7215
|
[
"Apache-2.0"
] | 2 |
2018-12-05T09:03:28.000Z
|
2019-01-02T15:31:35.000Z
|
bin/df_crossval_main.py
|
riedj1/MIALab
|
82d3f0f4344620fd22384108b022730cde9c7215
|
[
"Apache-2.0"
] | null | null | null |
bin/df_crossval_main.py
|
riedj1/MIALab
|
82d3f0f4344620fd22384108b022730cde9c7215
|
[
"Apache-2.0"
] | 1 |
2018-10-20T21:27:55.000Z
|
2018-10-20T21:27:55.000Z
|
"""A medical image analysis pipeline.
The pipeline is used for brain tissue segmentation using a decision forest classifier.
"""
import argparse
import datetime
import os
import sys
import timeit
import SimpleITK as sitk
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)
# somehow pip install does not keep track of packages
import mialab.classifier.decision_forest as df
import mialab.data.conversion as conversion
import mialab.data.structure as structure
import mialab.data.loading as load
import mialab.utilities.file_access_utilities as futil
import mialab.utilities.pipeline_utilities as putil
import mialab.utilities.statistic_utilities as statistics
FLAGS = None # the program flags
IMAGE_KEYS = [structure.BrainImageTypes.T1, structure.BrainImageTypes.T2, structure.BrainImageTypes.GroundTruth] # the list of images we will load
TRAIN_BATCH_SIZE = 70 # 1..70, the higher the faster but more memory usage
TEST_BATCH_SIZE = 2 # 1..30, the higher the faster but more memory usage
def main(FLAGS,trees,nodes):
"""Brain tissue segmentation using decision forests.
The main routine executes the medical image analysis pipeline:
- Image loading
- Registration
- Pre-processing
- Feature extraction
- Decision forest classifier model building
- Segmentation using the decision forest classifier model on unseen images
- Post-processing of the segmentation
- Evaluation of the segmentation
"""
# load atlas images
putil.load_atlas_images(FLAGS.data_atlas_dir)
print('-' * 5, 'Training...')
# generate a model directory (use datetime to ensure that the directory is empty)
# we need an empty directory because TensorFlow will continue training an existing model if it is not empty
t = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')
t='DF_trees_'+str(trees)+'_nodes_'+str(nodes)
model_dir = os.path.join(FLAGS.model_dir, t)
os.makedirs(model_dir, exist_ok=True)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_train_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
pre_process_params = {'zscore_pre': True,
'coordinates_feature': True,
'intensity_feature': True,
'gradient_intensity_feature': True}
# initialize decision forest parameters
df_params = df.DecisionForestParameters()
df_params.num_classes = 4
df_params.num_trees = trees
df_params.max_nodes = nodes
df_params.model_dir = model_dir
forest = None
start_time_total_train = timeit.default_timer()
for batch_index in range(0, len(data_items), TRAIN_BATCH_SIZE):
# slicing manages out of range; no need to worry
batch_data = dict(data_items[batch_index: batch_index+TRAIN_BATCH_SIZE])
# load images for training and pre-process
images = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
print('pre-processing done')
# generate feature matrix and label vector
data_train = np.concatenate([img.feature_matrix[0] for img in images])
labels_train = np.concatenate([img.feature_matrix[1] for img in images])
if forest is None:
df_params.num_features = data_train.shape[1]
print(df_params)
forest = df.DecisionForest(df_params)
start_time = timeit.default_timer()
forest.train(data_train, labels_train)
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
time_total_train = timeit.default_timer() - start_time_total_train
print('-' * 5, 'Testing...')
result_dir = os.path.join(FLAGS.result_dir, t)
os.makedirs(result_dir, exist_ok=True)
# initialize evaluator
evaluator = putil.init_evaluator(result_dir)
# crawl the training image directories
crawler = load.FileSystemDataCrawler(FLAGS.data_test_dir,
IMAGE_KEYS,
futil.BrainImageFilePathGenerator(),
futil.DataDirectoryFilter())
data_items = list(crawler.data.items())
for batch_index in range(0, len(data_items), TEST_BATCH_SIZE):
# slicing manages out of range; no need to worry
batch_data = dict(data_items[batch_index: batch_index + TEST_BATCH_SIZE])
# load images for testing and pre-process
pre_process_params['training'] = False
images_test = putil.pre_process_batch(batch_data, pre_process_params, multi_process=True)
images_prediction = []
images_probabilities = []
for img in images_test:
print('-' * 10, 'Testing', img.id_)
start_time = timeit.default_timer()
probabilities, predictions = forest.predict(img.feature_matrix[0])
print(' Time elapsed:', timeit.default_timer() - start_time, 's')
# convert prediction and probabilities back to SimpleITK images
image_prediction = conversion.NumpySimpleITKImageBridge.convert(predictions.astype(np.uint8),
img.image_properties)
image_probabilities = conversion.NumpySimpleITKImageBridge.convert(probabilities, img.image_properties)
# evaluate segmentation without post-processing
evaluator.evaluate(image_prediction, img.images[structure.BrainImageTypes.GroundTruth], img.id_)
images_prediction.append(image_prediction)
images_probabilities.append(image_probabilities)
# post-process segmentation and evaluate with post-processing
post_process_params = {'crf_post': True}
images_post_processed = putil.post_process_batch(images_test, images_prediction, images_probabilities,
post_process_params, multi_process=True)
for i, img in enumerate(images_test):
evaluator.evaluate(images_post_processed[i], img.images[structure.BrainImageTypes.GroundTruth],
img.id_ + '-PP')
# save results
sitk.WriteImage(images_prediction[i], os.path.join(result_dir, images_test[i].id_ + '_SEG.mha'), True)
sitk.WriteImage(images_post_processed[i], os.path.join(result_dir, images_test[i].id_ + '_SEG-PP.mha'), True)
# write summary of parameters to results dir
with open(os.path.join(result_dir, 'summary.txt'), 'w') as summary_file:
print('Training data size: {}'.format(len(data_items)), file=summary_file)
print('Total training time: {:.1f}s'.format(time_total_train), file=summary_file)
print('Decision forest', file=summary_file)
print(df_params, file=summary_file)
stats = statistics.gather_statistics(os.path.join(result_dir, 'results.csv'))
print('Result statistics:', file=summary_file)
print(stats, file=summary_file)
| 42.913295 | 147 | 0.672953 |
f858eabcb92a83334ed1184e5319a30f973e4512
| 177 |
py
|
Python
|
packs/consul/actions/query_node.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 164 |
2015-01-17T16:08:33.000Z
|
2021-08-03T02:34:07.000Z
|
packs/consul/actions/query_node.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 442 |
2015-01-01T11:19:01.000Z
|
2017-09-06T23:26:17.000Z
|
packs/consul/actions/query_node.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 202 |
2015-01-13T00:37:40.000Z
|
2020-11-07T11:30:10.000Z
|
from lib import action
class ConsulQueryNodeAction(action.ConsulBaseAction):
def run(self, node):
index, node = self.consul.catalog.node(node)
return node
| 22.125 | 53 | 0.706215 |
285e57c6fecfc215e3fbcb722e4fb8764dfba320
| 763 |
py
|
Python
|
frida/controller/mongodb.py
|
att/frida
|
a8cedffc5be79f7a4070f2e6324b33e91448520c
|
[
"MIT"
] | 6 |
2019-04-17T20:37:53.000Z
|
2021-10-30T23:47:15.000Z
|
frida/controller/mongodb.py
|
att/frida
|
a8cedffc5be79f7a4070f2e6324b33e91448520c
|
[
"MIT"
] | null | null | null |
frida/controller/mongodb.py
|
att/frida
|
a8cedffc5be79f7a4070f2e6324b33e91448520c
|
[
"MIT"
] | 3 |
2019-04-17T20:38:57.000Z
|
2019-11-01T17:40:58.000Z
|
# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
import json
import frida.database as fdb
import frida.manifest.profile_mongodb as modb
from frida.controller.base import BaseController
from frida.adapter.factory import AdapterFactory
class MongoDbController(BaseController):
def __init__(self):
pass
def profile(self, target_db: fdb.Database):
"""
Profiles a database and returns its technical metadata to a JSON file
"""
adapter = AdapterFactory.get_bound_adapter(database=target_db)
metadata = {"FRIDA.DATABASE": []}
for model in adapter.execute(modb._mongo_get_db_stats):
metadata["FRIDA.DATABASE"].append(model)
return json.dumps(metadata)
| 30.52 | 77 | 0.706422 |
50687e3a683d8445028d8b5a78046a8ff018f89f
| 8,295 |
py
|
Python
|
test.py
|
peterhudec/gae_installer
|
c2fba0935c8a53c928099b344d1533313904ec1b
|
[
"MIT"
] | 6 |
2015-07-01T04:12:16.000Z
|
2017-03-20T08:45:25.000Z
|
test.py
|
authomatic/gae_installer
|
c2fba0935c8a53c928099b344d1533313904ec1b
|
[
"MIT"
] | 1 |
2016-01-07T10:13:57.000Z
|
2016-01-07T10:13:57.000Z
|
test.py
|
peterhudec/gae_installer
|
c2fba0935c8a53c928099b344d1533313904ec1b
|
[
"MIT"
] | 7 |
2015-01-27T15:10:05.000Z
|
2016-09-23T15:34:03.000Z
|
"""
Tests the setup.py script by running ``python setup.py install`` in a
temporarily activated virtual environment.
"""
import os
import re
import shutil
import subprocess
import sys
import unittest
import urllib2
import version
VENV_NAME = '_e'
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
VENV_PATH = os.path.join(BASE_PATH, VENV_NAME)
BUILD_PATH = 'build'
ACTIVATE_THIS_PATH = os.path.join(VENV_PATH, 'bin', 'activate_this.py')
def _which(program):
"""Returns path of command executable path."""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tearDownClass()
if _which('virtualenv'):
os.system('virtualenv {0} -q -p python2.7'.format(VENV_PATH))
if not os.path.isdir(VENV_PATH):
sys.exit('Failed to create virtual environment "{0}"!'
.format(VENV_PATH))
cls._activate_venv()
else:
sys.exit('Cannot run tests because the "virtualenv" '
'command is not installed on your system!'
'\nRead installation instructions '
'here:\nhttps://virtualenv.pypa.io/en/latest/virtualenv'
'.html#installation')
@classmethod
def tearDownClass(cls):
cls._remove_venv()
cls._remove_build()
@classmethod
def _remove_venv(cls):
"""Removes virtual environment"""
if os.path.isdir(VENV_PATH):
shutil.rmtree(VENV_PATH)
@classmethod
def _remove_build(cls):
"""Removes virtual environment"""
if os.path.isdir(BUILD_PATH):
shutil.rmtree(BUILD_PATH)
@classmethod
def _activate_venv(cls):
"""
Activates virtual environment
http://virtualenv.readthedocs.org/en/latest/virtualenv.html#using-virtualenv-without-bin-python
"""
execfile(ACTIVATE_THIS_PATH, dict(__file__=ACTIVATE_THIS_PATH))
def _install(self):
os.system('python {0} -q install'
.format(os.path.join(BASE_PATH, 'setup.py')))
def _import_gae(self):
import google.appengine
def test_import(self):
# GAE import should fail first
self.assertRaises(ImportError, self._import_gae)
# When we install it...
self._install()
# (we need to activate venv again, otherwise it won't work)
self._activate_venv()
# (remove shadowing google modules if any)
if 'google' in sys.modules:
del sys.modules['google']
# Now the import should not fail
import google.appengine
# Ensure that the imported module lives in our venv
assert VENV_PATH in google.appengine.__path__[0]
# Pattern for elimination of _e/lib/python2.7 and _e/local/lib/python2.7
# differences in scripts output
venv_lib_pattern = re.compile(r'(_e/).*(/python)')
venv_lib_replacement = r'\1...\2'
# The _get_gae_dir file should exist
get_gae_dir_path = os.path.join(VENV_PATH, 'bin', '_get_gae_dir')
self.assertTrue(os.path.isfile(get_gae_dir_path))
# The _get_gae_command should return the path of the
# installed google_appengine SDK
gae_dir = google.appengine.__file__.split('/google/')[0]
output, error = subprocess.Popen(['_get_gae_dir'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True).communicate()
output = venv_lib_pattern.sub(venv_lib_replacement, output.strip())
gae_dir_clean = venv_lib_pattern.sub(venv_lib_replacement, gae_dir)
self.assertEquals(output, gae_dir_clean)
# Skip the run_tests.py file
original_commands = os.listdir(gae_dir)
original_commands.remove('run_tests.py')
# Patter for replacing time in output
time_pattern = re.compile(r'\d\d:\d\d:\d\d,\d\d\d')
for command in original_commands:
if command.endswith('.py') and command[0] != '_':
original_file = os.path.join(gae_dir, command)
name = command[:-3]
self.assertTrue(os.path.isfile(original_file),
"File {} doesn't exist!".format(original_file))
original_output, original_error = subprocess.Popen(
['python', original_file],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
).communicate()
self.assertTrue(os.path.isfile(
os.path.join(VENV_PATH, 'bin', name)),
"File {} doesn't exist!".format(name)
)
output, error = subprocess.Popen(
[name],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True
).communicate()
# Output can contain varying time so we need to eliminate it
original_output = time_pattern.sub('', original_output)
original_error = time_pattern.sub('', original_error)
output = time_pattern.sub('', output)
error = time_pattern.sub('', error)
# Eliminate of _e/lib/python2.7 and _e/local/lib/python2.7
# differences
original_output = venv_lib_pattern.sub(venv_lib_replacement,
original_output)
original_error = venv_lib_pattern.sub(venv_lib_replacement,
original_error)
output = venv_lib_pattern.sub(venv_lib_replacement, output)
error = venv_lib_pattern.sub(venv_lib_replacement, error)
assert output == original_output
assert error == original_error
self.assertEquals(output, original_output,
"Stdouts of {} and {} don't match!"
.format(name, original_file))
self.assertEquals(error, original_error,
"Stderrs of {} and {} don't match!"
.format(name, original_file))
ok = output == original_output and error == original_error
print 'TESTING SCRIPT: {} {}'\
.format(name, 'OK' if ok else 'ERROR')
class TestNewVersion(unittest.TestCase):
def test_new_version(self):
"""
Tests whether the current version is the most recent one.
"""
prefix = 'google_appengine_'
major, minor, micro = map(int, version.version.split('.'))
bucket_list = urllib2.urlopen('https://storage.googleapis.com/'
'appengine-sdks/').read()
match = re.search(
pattern=r'{}({}\.\d+.\d+)'.format(prefix, major + 1),
string=bucket_list
)
if not match:
match = re.search(
pattern=r'{}({}\.{}.\d+)'.format(prefix, major, minor + 1),
string=bucket_list
)
if not match:
match = re.search(
pattern=r'{}({}\.{}.{})'
.format(prefix, major, minor, micro + 1),
string=bucket_list
)
self.assertIsNone(
obj=match,
msg='New GAE version {} available!'.format(match.groups()[0])
if match else ''
)
if __name__ == '__main__':
if sys.version_info.major == 2 and sys.version_info.minor >= 7:
unittest.main(failfast=True)
else:
sys.exit('GAE Installer requires Python 2.7 or higher!')
| 34.5625 | 103 | 0.559011 |
95f9eb683001c6dc7c306aeec30fef919bef5832
| 1,707 |
py
|
Python
|
runapp/lessons/6/run_clock.py
|
bryvogel/cgm-clock
|
62ace6ee312e6232282eeaaa69c01d261cfa3a52
|
[
"Apache-2.0"
] | null | null | null |
runapp/lessons/6/run_clock.py
|
bryvogel/cgm-clock
|
62ace6ee312e6232282eeaaa69c01d261cfa3a52
|
[
"Apache-2.0"
] | null | null | null |
runapp/lessons/6/run_clock.py
|
bryvogel/cgm-clock
|
62ace6ee312e6232282eeaaa69c01d261cfa3a52
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import time
import datetime
from Libs.Clock import Clock
from Libs.SevenSegment import Display
from Libs.Weather import Weather
from Libs.Input import Button
from Libs.GStreamer import Speaker
from config import configuration
# The weather station
station_name = configuration.get('weather_station')
weather_station = Weather(station_name)
# Connect to the internal machine clock
clock = Clock()
# Connect to the LED display
display = Display()
# Connect to the speaker
speaker = Speaker()
# Play some music
def playMusic():
speaker.playList(["TestTrack.ogg", "AmicusMeus.ogg"])
# Wake us up at 8:30 in the morning
clock.atTime(8, 30, playMusic)
# Show the current weather
def switchWeatherStations():
# Clear the display
display.setColon(False)
display.setEvening(False)
display.setHours(0)
display.setMinutes(0)
# Show the current temperature
current_temp = weather_station.getCurrentTemp()
display.setMinutes(current_temp)
# Wait for about three seconds
clock.waitAbout(3)
# What to do when you press a button
Button(24).whenPressed(switchWeatherStations)
# Show the current time
def showCurrentTime():
now = datetime.datetime.now()
# Set the hours
is_evening = now.hour > 12
display.setHours(now.hour if not is_evening else now.hour - 12)
# Set the indicator lights
display.setColon(True)
display.setEvening(is_evening)
# Set the minutes
display.setMinutes(now.minute)
# What to do when the internal clock ticks
clock.onTick(showCurrentTime)
# Set the brightness (0 to 15, 15 is the brightest)
display.setBrightness(1)
| 24.385714 | 68 | 0.71529 |
13c6e33c6c3c9cb17643facefe0c4e44d829f33f
| 552 |
py
|
Python
|
yesua1/startup.py
|
rishabhranawat/CrowdPlatform
|
1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d
|
[
"MIT"
] | 1 |
2020-07-23T21:35:40.000Z
|
2020-07-23T21:35:40.000Z
|
yesua1/startup.py
|
rishabhranawat/CrowdPlatform
|
1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d
|
[
"MIT"
] | 9 |
2021-02-08T20:32:35.000Z
|
2022-03-02T14:58:07.000Z
|
yesua1/startup.py
|
rishabhranawat/CrowdPlatform
|
1de2ad7e70fbf6cbf2e29bc9368341134b4f7e0d
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.contrib import admin
def autoload(submodules):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
for submodule in submodules:
try:
import_module("{0}.{1}".format(app, submodule))
except:
if module_has_submodule(mod, submodule):
raise
def run():
autoload(["receivers"])
admin.autodiscover()
| 25.090909 | 63 | 0.648551 |
6db5fc0bc4b5b2a0fed8fe2d0fd2e9dd654184c1
| 1,321 |
py
|
Python
|
utilities/loss_function.py
|
Musketeer-Liu/Machine_Learning_Lab
|
cba2ee116011e87d4f7df525f1a1ac4c87f7b4e4
|
[
"MIT"
] | null | null | null |
utilities/loss_function.py
|
Musketeer-Liu/Machine_Learning_Lab
|
cba2ee116011e87d4f7df525f1a1ac4c87f7b4e4
|
[
"MIT"
] | null | null | null |
utilities/loss_function.py
|
Musketeer-Liu/Machine_Learning_Lab
|
cba2ee116011e87d4f7df525f1a1ac4c87f7b4e4
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
from data_operation import accuracy_score
class Loss(object):
# def __init__(self):
# pass
def loss(self, y_true, y_pred):
return NotImplementedError()
def gradient(self, y_true, y_pred):
return NotImplementedError()
def accuracy(self, y_true, y_pred):
return 0
class SquareLoss(Loss):
# def __init__(self):
# pass
def loss(self, y_true, y_pred):
return 0.5 * np.power((y_true - y_pred), 2)
def gradient(self, y_true, y_pred):
return -(y_true - y_pred)
class SoftLoss(Loss):
# def __init__(self):
# pass
def gradient(self, y_true, y_pred):
return y_true - y_pred
class CrossEntropy(Loss):
# def __init__(self):
# pass
def loss(self, y_true, y_pred):
# Avoid divission by zero
y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15)
return - y_true*np.log(y_pred) - (1-y_true)*np.log(1-y_pred)
def accuracy(self, y_true, y_pred):
return accuracy_score(np.argmax(y_true, axis=1), np.argmax(y_pred, axis=1))
def gradient(self, y_true, y_pred):
# Avoid divission by zero
y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15)
return - y_true/y_pred + (1-y_true)/(1-y_pred)
| 22.016667 | 83 | 0.610901 |
cb189270b8769418ae30639c59e3aa1c694409b7
| 151 |
py
|
Python
|
tests/test_fun.py
|
happy-developer-fr/minio-test
|
16a83b79d5cfe3e703bbafcbe1ab7d57243d631e
|
[
"MIT"
] | null | null | null |
tests/test_fun.py
|
happy-developer-fr/minio-test
|
16a83b79d5cfe3e703bbafcbe1ab7d57243d631e
|
[
"MIT"
] | null | null | null |
tests/test_fun.py
|
happy-developer-fr/minio-test
|
16a83b79d5cfe3e703bbafcbe1ab7d57243d631e
|
[
"MIT"
] | null | null | null |
import src.my_package.fun
def test_is_it_funny():
src.my_package.fun.is_it_funny()
assert src.my_package.fun.is_it_funny().__eq__("oh yeah")
| 21.571429 | 61 | 0.748344 |
ef203cd1cf109606fec4f5d057ce25290fc3db40
| 21,520 |
py
|
Python
|
pyNastran/op2/vector_utils.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/vector_utils.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | 1 |
2021-06-07T16:33:59.000Z
|
2021-06-07T16:33:59.000Z
|
pyNastran/op2/vector_utils.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | 1 |
2021-12-17T10:45:08.000Z
|
2021-12-17T10:45:08.000Z
|
"""
defines some methods for working with arrays:
- filter1d(a, b=None, zero_tol=0.001)
- where_searchsorted(a, v, side='left', x=None, y=None)
- sortedsum1d(ids, values, axis=None)
- iformat(format_old, precision=2)
- abs_max_min_global(values)
- abs_max_min_vector(values)
- abs_max_min(values, global_abs_max=True)
- principal_3d(o11, o22, o33, o12, o23, o13)
- transform_force(force_in_local,
coord_out, coords,
nid_cd, i_transform)
- transform_force_moment(force_in_local, moment_in_local,
coord_out, coords,
nid_cd, i_transform,
xyz_cid0, summation_point_cid0=None,
consider_rxf=True,
debug=False, log=None)
- transform_force_moment_sum(force_in_local, moment_in_local,
coord_out, coords,
nid_cd, i_transform,
xyz_cid0, summation_point_cid0=None,
consider_rxf=True,
debug=False, log=None)
"""
from __future__ import annotations
from struct import calcsize
from itertools import count
from typing import Optional, Dict, TYPE_CHECKING
import numpy as np
from numpy import arccos, sqrt, pi, in1d, cos, unique, cross, ndarray
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import CORDx # , CORD1R, CORD1C, CORD1S, CORD2R, CORD2C, CORD2S
def filter1d(a: ndarray, b: Optional[ndarray]=None, zero_tol: float=0.001):
"""
Filters a 1d numpy array of values near 0.
Parameters
----------
a : (n, ) float ndarray
a vector to compare
b : (n, ) float ndarray; default=None
another vector to compare
If b is defined, both a and b must be near 0 for a value to be removed.
zero_tol : float; default=0.001
the zero tolerance value
Returns
-------
k : (m, ) int ndarray
the indices of the removed values
a = [1., 2., 0.1]
>>> i = filter(a, zero_tol=0.5)
a[i]
>>> [0, 1]
a = [1., 2., 0.1]
b = [1., -0.1, 0.1]
>>> i = filter(a, b, zero_tol=0.5)
[0, 1]
"""
a = np.asarray(a)
i = np.where(np.abs(a) > zero_tol)[0]
if b is None:
return i
b = np.asarray(b)
assert a.shape == b.shape, 'a.shape=%s b.shape=%s' % (str(a.shape), str(b.shape))
assert a.size == b.size, 'a.size=%s b.size=%s' % (str(a.size), str(b.size))
j = np.where(np.abs(b) > zero_tol)[0]
k = np.unique(np.hstack([i, j]))
return k
def where_searchsorted(a, v, side='left', x=None, y=None):
"""Implements a np.where that assumes a sorted array set."""
# TODO: take advantage of searchsorted
assert x is None, x
assert y is None, y
assert side == 'left', side
i = np.where(in1d(a, v), x=x, y=y)
return i
def sortedsum1d(ids, values, axis=None):
"""
Sums the values in a sorted 1d/2d array.
Parameters
----------
ids : (n, ) int ndarray
the integer values in a sorted order that indicate what is to
be summed
values : (n, m) float ndarray
the values to be summed
Presumably m is 2 because this function is intended to be used
for summing forces and moments
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default
(axis=None) is perform a sum over all the dimensions of the
input array. axis may be negative, in which case it counts
from the last to the first axis.
If this is a tuple of ints, a sum is performed on multiple
axes, instead of a single axis or all the axes as before.
Not actually supported...
Returns
-------
out : (nunique, m) float ndarray
the summed values
Examples
--------
**1D Example**
For an set of arrays, there are 5 values in sorted order.
..code-block :: python
ids = [1, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5]
values = 1.0 * ids
We want to sum the values such that:
..code-block :: python
out = [2.0, 4.0, 9.0, 4.0, 15.0]
**2D Example**
..code-block :: python
ids = [1, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5]
values = [
[1.0, 1.0, 2.0, 2.0, 3.0, 3.0],
[1.0, 1.0, 2.0, 2.0, 3.0, 3.0],
]
values = 1.0 * ids
For 2D
.. todo:: This could probably be more efficient
.. todo:: Doesn't support axis
"""
uids = unique(ids)
i1 = np.searchsorted(ids, uids, side='left') # left is the default
i2 = np.searchsorted(ids, uids, side='right')
out = np.zeros(values.shape, dtype=values.dtype)
for i, i1i, i2i in zip(count(), i1, i2):
out[i, :] = values[i1i:i2i, :].sum(axis=axis)
return out
def iformat(format_old: str, precision: int=2) -> str:
"""
Converts binary data types to size vector arrays.
Parameters
----------
format_old : str
the int/float data types in single precision format
precision : int; default=2
the precision to convert to
1 : single precision (no conversion)
2 : double precision
Returns
-------
format_new : str
the int/float data types in single/double precision format
Examples
--------
>>> iformat('8i6f10s', precision=1)
'8i6f10s'
>>> iformat('8i6f10s', precision=2)
'8l6d10q'
"""
if precision == 2: # double
format_new = format_old.replace('i', 'l').replace('f', 'd')
elif precision == 1: # single
format_new = format_old.replace('l', 'i').replace('d', 'f')
else:
raise NotImplementedError(precision)
ndata = calcsize(format_new)
return format_new, ndata
def abs_max_min_global(values):
"""
This is useful for figuring out absolute max or min principal stresses
across single/multiple elements and finding a global max/min value.
Parameters
----------
values: ndarray/listtuple
an ND-array of values;
common NDARRAY/list/tuple shapes:
1. [nprincipal_stresses]
2. [nelements, nprincipal_stresses]
Returns
-------
abs_max_mins: int/float
an array of the max or min principal stress
don't input mixed types
nvalues >= 1
>>> element1 = [0.0, -1.0, 2.0] # 2.0
>>> element2 = [0.0, -3.0, 2.0] # -3.0
>>> values = abs_max_min_global([element1, element2])
>>> values
-3.0
>>> element1 = [0.0, -1.0, 2.0] # 2.0
>>> values = abs_max_min_global([element1])
>>> values
2.0
.. note:: [3.0, 2.0, -3.0] will return 3.0, and
[-3.0, 2.0, 3.0] will return 3.0
"""
# support lists/tuples
values = np.asarray(values)
# find the [max,
# min]
# we organize it as [max, min], which is why the note applies
# we could make both of the edge cases return -3.0, but if you're using
# this function it shouldn't matter
values2 = np.array([values.max(), values.min()])
# we figure out the absolute max/min
abs_vals = np.abs(values2)
abs_val = abs_vals.max()
# find the location of the absolute max value
# 1. we take the first value (the where[0]) to chop the return value
# since there is no else conditional
# 2. we take the first value (the where[0][0]) to only get the max
# value if 2+ values are returned
j = np.where(abs_val == abs_vals)[0][0]
# get the raw value from the absoluted value, so:
# value = npabs(raw_value)
return values2[j]
def abs_max_min_vector(values):
"""
This is useful for figuring out principal stresses across multiple
elements.
Parameters
----------
values: ndarray/listtuple
an array of values, where the rows are interated over
and the columns are going to be compressed
common NDARRAY/list/tuple shapes:
1. [nprincipal_stresses]
2. [nelements, nprincipal_stresses]
Returns
-------
abs_max_mins: NDARRAY shape=[nelements] with dtype=values.dtype
an array of the max or min principal stress
don't input mixed types
::
>>> element1 = [0.0, 1.0, 2.0] # 2.0
>>> element2 = [0.0, -1.0, 2.0] # 2.0
>>> element3 = [0.0, -3.0, 2.0] # -3.0
>>> values = [element1 element2, element3]
>>> values0 = abs_max_min_vectorized(values)
>>> values0
[2.0, 2.0, -3.0]
.. note:: [3.0, 2.0, -3.0] will return 3.0, and
[-3.0, 2.0, 3.0] will return 3.0
"""
# support lists/tuples
values = np.asarray(values)
# find the [maxs,
# mins]
# we organize it as [maxs, mins], which is why the note applies
# we could make both of the edge cases return -3.0, but if you're using
# this function it shouldn't matter
maxs_mins = np.array([values.max(axis=1), values.min(axis=1)])
# we figure out the absolute max/min for each row
abs_vals = np.abs(maxs_mins)
absolute_maxs = abs_vals.max(axis=0)
outs = np.zeros(absolute_maxs.shape[0], dtype=values.dtype)
for i, absolute_max in enumerate(absolute_maxs):
# find the location of the absolute max value
# 1. we take the first value (the where[0]) to chop the return value
# since there is no else conditional
# 2. we take the first value (the where[0][0]) to only get the max
# value if 2+ values are returned
j = np.where(absolute_max == abs_vals[:, i])[0][0]
# get the raw value from the absoluted value, so:
# value = npabs(raw_value)
outs[i] = maxs_mins[j, i]
return outs
def abs_max_min(values, global_abs_max=True):
"""
Gets the maximum value of x and -x.
This is used for getting the max/min principal stress.
"""
if global_abs_max:
return abs_max_min_global(values)
return abs_max_min_vector(values)
# def principal_2d(o11, o22, o12):
# oxx = 5
# return oxx, oy
def principal_3d(o11, o22, o33, o12, o23, o13):
"""http://www.continuummechanics.org/cm/principalstrain.html"""
# e = a
i1 = o11 + o22 + o33
i2 = o11*o22 + o22*o33 + o11*o33 - o12**2 - o13**2 - o23**2
i3 = o11*o22*o33 - o11*o23**2 - o22*o13**2 + 2*o12*o13*o23
Q = 3 * i2 - i1**2
R = (2*i1**3 - 9*i1*i2 + 27*i3) / 54.
theta = arccos(R / sqrt(-Q**3))
q2 = 2 * sqrt(-Q)
i13 = 1./3. * i1
p1 = q2 * cos(theta/3) + i13
p2 = q2 * cos(theta/3 + 2*pi/3.) + i13
p3 = q2 * cos(theta/3 + 4*pi/3.) + i13
max_min_mid = np.array([p1, p2, p3])
pmax = max_min_mid.max(axis=0)
pmin = max_min_mid.min(axis=0)
return pmax, pmin
def transform_force(force_in_local,
coord_out: CORDx, coords: Dict[int, CORDx],
nid_cd: int, unused_icd_transform):
"""
Transforms force/moment from global to local and returns all the forces.
Supports cylindrical/spherical coordinate systems.
Parameters
----------
force : (N, 3) ndarray
forces in the local frame
coord_out : CORD()
the desired local frame
coords : dict[int] = CORDx
all the coordinate systems
key : int
value : CORDx
nid_cd : (M, 2) int ndarray
the (BDF.point_ids, cd) array
icd_transform : dict[cd] = (Mi, ) int ndarray
the mapping for nid_cd
.. warning:: the function signature will change...
.. todo:: sum of moments about a point must have an rxF term to get the
same value as Patran.
Fglobal = Flocal @ T
Flocal = T.T @ Fglobal
Flocal2 = T2.T @ (Flocal1 @ T1)
"""
force_out = np.zeros(force_in_local.shape, dtype=force_in_local.dtype)
#nids = nid_cd[:, 0]
cds = nid_cd[:, 1]
ucds = unique(cds)
coord_out_cid = coord_out.cid
coord_out_T = coord_out.beta()
for cd in ucds:
i = np.where(cds == cd)[0]
#nidsi = nids[i]
analysis_coord = coords[cd]
cd_T = analysis_coord.beta()
# rxF from local_in to global to local_out
force_in_locali = force_in_local[i, :]
force_in_globali = force_in_locali @ cd_T
force_outi = (coord_out_T @ force_in_globali.T).T
force_out[i, :] = force_outi
return -force_out
def transform_force_moment(force_in_local, moment_in_local,
coord_out: CORDx, coords: Dict[int, CORDx],
nid_cd: int, icd_transform: Dict[int, ndarray],
xyz_cid0: ndarray,
summation_point_cid0: Optional[ndarray]=None,
consider_rxf: bool=True,
debug: bool=False, log=None):
"""
Transforms force/moment from global to local and returns all the forces.
Parameters
----------
force_in_local : (N, 3) ndarray
forces in the local frame
moment_in_local : (N, 3) ndarray
moments in the local frame
coord_out : CORDx()
the desired local frame
coords : dict[int] = CORDx
all the coordinate systems
key : int
value : CORDx
nid_cd : (M, 2) int ndarray
the (BDF.point_ids, cd) array
icd_transform : dict[cd] = (Mi, ) int ndarray
the mapping for nid_cd
xyz_cid0 : (n, 3) ndarray
the nodes in the global frame
summation_point_cid0 : (3, ) ndarray
the summation point in the global frame???
consider_rxf : bool; default=True
considers the r x F term
debug : bool; default=False
debugging flag
log : log; default=None
a log object that gets used when debug=True
Returns
-------
force_out : (n, 3) float ndarray
the ith float components in the coord_out coordinate frame
moment_out : (n, 3) float ndarray
the ith moment components about the summation point in the
coord_out coordinate frame
.. todo:: doesn't seem to handle cylindrical/spherical systems
https://flexiblelearning.auckland.ac.nz/sportsci303/13_3/forceplate_manual.pdf
xyz0 = T_1_to_0 @ xyz1
xyz1 = T_1_to_0.T @ xyz0
xyz2 = T_2_to_0.T @ xyz0
xyz2 = T_2_to_0.T @ T_1_to_0 @ xyz1
xyz_g = T_a2g @ xyz_a
xyz_g = T_b2g @ xyz_b
T_b2g @ xyz_b = T_a2g @ xyz_a
xyz_b = T_b2g.T @ T_a2g @ xyz_a = T_g2b @ T_a2g @ xyz_a
"""
#print('consider_rxf =', consider_rxf)
#debug = True
assert log is not None
assert nid_cd.shape[0] == force_in_local.shape[0]
dtype = force_in_local.dtype
#dtype = 'float64'
force_in_local_sum = force_in_local.sum(axis=0)
force_out = np.zeros(force_in_local.shape, dtype=dtype)
moment_out = np.zeros(force_in_local.shape, dtype=dtype)
nids = nid_cd[:, 0]
cds = nid_cd[:, 1] #* 0
ucds = unique(cds)
#coord_out_cid = coord_out.cid
beta_out = coord_out.beta().T
if debug:
log.debug('beta_out =\n%s' % beta_out)
log.debug(coord_out)
if consider_rxf:
for ii in range(xyz_cid0.shape[0]):
log.debug('***i=%s xyz=%s nid=%s cd=%s' % (
ii, xyz_cid0[ii, :], nid_cd[ii, 0], nid_cd[ii, 1]))
log.debug('------------')
log.debug('ucds = %s' % ucds)
if consider_rxf and summation_point_cid0 is None:
summation_point_cid0 = np.array([0., 0., 0.])
#eye = np.eye(3, dtype=beta_cd.dtype)
for cd in ucds:
#log.debug('cd = %s' % cd)
i = np.where(cds == cd)[0]
nidsi = nids[i]
analysis_coord = coords[cd]
beta_cd = analysis_coord.beta()
force_in_locali = -force_in_local[i, :]
moment_in_locali = -moment_in_local[i, :]
#if 0 and np.array_equal(beta_cd, eye):
force_in_globali = force_in_locali
moment_in_globali = moment_in_locali
if debug:
#log.debug('analysis_coord =\n%s' % analysis_coord)
log.debug('beta_cd =\n%s' % beta_cd)
log.debug('i = %s' % i)
log.debug('force_in_local = %s' % force_in_local)
log.debug('force_in_local.sum() = %s' % force_in_local_sum)
#force_in_locali.astype('float64')
#moment_in_locali.astype('float64')
# rotate loads from an arbitrary coordinate system to local xyz
if 0:
log.debug(analysis_coord)
log.debug(force_in_locali)
force_in_locali = analysis_coord.coord_to_xyz_array(force_in_locali)
moment_in_locali = analysis_coord.coord_to_xyz_array(moment_in_locali)
if debug:
log.debug('i = %s' % i)
log.debug('nids = %s' % nidsi)
log.debug('force_input = %s' % force_in_locali)
# rotate the forces/moments into a coordinate system coincident
# with the local frame and with the same primary directions
# as the global frame
force_in_globali = force_in_locali.dot(beta_cd)
moment_in_globali = moment_in_locali.dot(beta_cd)
# rotate the forces and moments into a coordinate system coincident
# with the output frame and with the same primary directions
# as the output frame
#if 0 and np.array_equal(beta_out, eye):
#force_outi = force_in_globali
#moment_outi = moment_in_globali
force_outi = force_in_globali.dot(beta_out)
moment_outi = moment_in_globali.dot(beta_out)
if debug:
#if show_local:
log.debug('force_in_locali = \n%s' % force_in_locali.T)
log.debug('force_in_globali = \n%s' % force_in_globali.T)
log.debug('force_in_locali.sum() = %s' % force_in_locali.sum(axis=0))
log.debug('force_in_globali.sum() = %s' % force_in_globali.sum(axis=0))
log.debug('force_outi = %s' % force_outi)
#log.debug('moment_outi = %s' % moment_outi)
# these are in the local XYZ coordinates
# we'll do the final transform later
force_out[i, :] = force_outi
moment_out[i, :] = moment_outi
# Now we need to consider the "r x F" term.
# We calculate it in the global xyz frame about the summation
# point. Then we transform it to the output XYZ frame
if consider_rxf:
delta = xyz_cid0[i, :] - summation_point_cid0[np.newaxis, :]
rxf = cross(delta, force_in_globali)
rxf_in_cid = rxf.dot(beta_out)
if debug:
log.debug('delta_moment = %s' % delta)
#log.debug('rxf = %s' % rxf.T)
#log.debug('rxf_in_cid = %s' % rxf_in_cid)
moment_out[i, :] += rxf_in_cid
# rotate loads from the local XYZ to an arbitrary coordinate system
# flip the sign of the output to be consistent with Patran
#force_out2 = -coord_out.xyz_to_coord_array(force_out)
#moment_out2 = -coord_out.xyz_to_coord_array(moment_out)
#return force_out2, moment_out2
return force_out, moment_out
def transform_force_moment_sum(force_in_local, moment_in_local,
coord_out, coords,
nid_cd, icd_transform,
xyz_cid0, summation_point_cid0=None,
consider_rxf=True,
debug=False, log=None):
"""
Transforms force/moment from global to local and returns a sum of forces/moments.
Parameters
----------
force_in_local : (N, 3) ndarray
forces in the local frame
moment_in_local : (N, 3) ndarray
moments in the local frame
coord_out : CORDx()
the desired local frame
coords : dict[int] = CORDx
all the coordinate systems
key : int
value : CORDx
nid_cd : (M, 2) int ndarray
the (BDF.point_ids, cd) array
icd_transform : dict[cd] = (Mi, ) int ndarray
the mapping for nid_cd
xyz_cid0 : (nnodes + nspoints + nepoints, 3) ndarray
the grid locations in coordinate system 0
summation_point_cid0 : (3, ) ndarray
the summation point in the global frame
consider_rxf : bool; default=True
considers the r x F term
debug : bool; default=False
debugging flag
log : log; default=None
a log object that gets used when debug=True
Returns
-------
force_out : (n, 3) float ndarray
the ith float components in the coord_out coordinate frame
moment_out : (n, 3) float ndarray
the ith moment components about the summation point in the coord_out coordinate frame
force_out_sum : (3, ) float ndarray
the sum of forces in the coord_out coordinate frame
moment_out_sum : (3, ) float ndarray
the sum of moments about the summation point in the coord_out coordinate frame
.. todo:: doesn't seem to handle cylindrical/spherical systems
"""
assert log is not None
out = transform_force_moment(
force_in_local, moment_in_local,
coord_out, coords, nid_cd,
icd_transform, xyz_cid0,
summation_point_cid0=summation_point_cid0, consider_rxf=consider_rxf,
debug=debug, log=log)
force_out, moment_out = out
if debug:
log.debug('force_sum = %s' % force_out.sum(axis=0))
if consider_rxf:
log.debug('moment_sum = %s' % moment_out.sum(axis=0))
return force_out, moment_out, force_out.sum(axis=0), moment_out.sum(axis=0)
| 33.006135 | 93 | 0.595632 |
aaaa285ef305a12c09e817b55f7f8a65c425d1ba
| 1,774 |
py
|
Python
|
app/utils/helpers.py
|
bossenti/OutdoorTripFinder-Backend
|
6818cd19640e3826cca42fa797b5d1d7a264f856
|
[
"Apache-2.0"
] | null | null | null |
app/utils/helpers.py
|
bossenti/OutdoorTripFinder-Backend
|
6818cd19640e3826cca42fa797b5d1d7a264f856
|
[
"Apache-2.0"
] | null | null | null |
app/utils/helpers.py
|
bossenti/OutdoorTripFinder-Backend
|
6818cd19640e3826cca42fa797b5d1d7a264f856
|
[
"Apache-2.0"
] | null | null | null |
import math
import string
import random
# in km
EARTH_RADIUS = 6371
def deg_to_radian(val, backwards=False):
"""
converts values in degree to radians and vice versa
:param backwards: indicates the convertion to be in the other direction
:param val: value in degrees
:return: converted value as radian value
"""
if not backwards:
return val * math.pi / 180
else:
return val * 180 / math.pi
def distance_between_coordinates(lat1, long1, lat2, long2):
"""
calculates the distance between two geo-coordinates with the Haversine formula
reference:
https://en.wikipedia.org/wiki/Haversine_formula
:param lat1: latitude of position one
:param long1: longitude of position one
:param lat2: latituted of position two
:param long2: longitude of position two
:return: ceiled distance between both coordinates
"""
diff_lat = deg_to_radian(lat2 - lat1)
diff_long = deg_to_radian(long2 - long1)
# separate calculation into both terms of sum in the square root
left_form = math.sin(diff_lat / 2) ** 2
right_form = math.cos(deg_to_radian(lat1)) * math.cos(deg_to_radian(lat2)) * math.sin(diff_long / 2) ** 2
return math.ceil(2 * EARTH_RADIUS * math.asin(math.sqrt(left_form + right_form)))
def rand_alphanumeric(ln=16):
return ''.join(random.choices(string.ascii_letters + string.digits, k=ln))
def sort_by_dist(dic):
return dic['dist']
def intersection(ids, keys_used):
lst3 = [v for v in keys_used if v not in ids]
return lst3
def sort_dict(items):
return {k: v for k, v in sorted(items, key=lambda item: item[1], reverse=True)}
def sort_dict(items):
return {k: v for k, v in sorted(items, key=lambda item: item[1], reverse=True)}
| 28.612903 | 109 | 0.694476 |
2c880ceb8f5f418b7c59a809d9f36547054607eb
| 2,950 |
py
|
Python
|
sample_setups/pcmdi_parameter_files/variability_modes/alternative_obs/myParam_NAO_cmip3.py
|
jasonb5/pcmdi_metrics
|
0c23d8d247da24d0ab9deb04d8db9619af628680
|
[
"BSD-3-Clause"
] | null | null | null |
sample_setups/pcmdi_parameter_files/variability_modes/alternative_obs/myParam_NAO_cmip3.py
|
jasonb5/pcmdi_metrics
|
0c23d8d247da24d0ab9deb04d8db9619af628680
|
[
"BSD-3-Clause"
] | null | null | null |
sample_setups/pcmdi_parameter_files/variability_modes/alternative_obs/myParam_NAO_cmip3.py
|
jasonb5/pcmdi_metrics
|
0c23d8d247da24d0ab9deb04d8db9619af628680
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import os
# =================================================
# Background Information
# -------------------------------------------------
mip = "cmip3"
exp = "20c3m"
frequency = "mo"
realm = "atm"
# =================================================
# Analysis Options
# -------------------------------------------------
variability_mode = "NAO" # Available domains: NAM, NAO, SAM, PNA, PDO
seasons = [
"DJF",
"MAM",
"JJA",
"SON",
] # Available seasons: DJF, MAM, JJA, SON, monthly, yearly
RemoveDomainMean = True # Remove Domain Mean from each time step (default=True)
EofScaling = False # Convert EOF pattern as unit variance (default=False)
landmask = False # Maskout land region thus consider only ocean grid (default=False)
ConvEOF = True # Calculate conventioanl EOF for model
CBF = True # Calculate Common Basis Function (CBF) for model
# =================================================
# Miscellaneous
# -------------------------------------------------
update_json = True # False
debug = False # False
# =================================================
# Observation
# -------------------------------------------------
reference_data_name = "ERA20C"
reference_data_path = os.path.join(
"/p/user_pub/PCMDIobs/PCMDIobs2/atmos/mon/psl/ERA-20C/gn/v20200707",
"psl_mon_ERA-20C_BE_gn_v20200707_190001-201012.nc",
)
varOBS = "psl"
ObsUnitsAdjust = (True, "divide", 100.0) # Pa to hPa; or (False, 0, 0)
osyear = 1900
oeyear = 2005
eofn_obs = 1
# =================================================
# Models
# -------------------------------------------------
modpath = os.path.join(
"/p/user_pub/pmp/pmp_results/pmp_v1.1.2/additional_xmls/%(mip)/%(exp)",
"%(variable).%(model).%(realization).xml",
)
modnames = [
"bccr_bcm2_0",
"cccma_cgcm3_1",
"cccma_cgcm3_1_t63",
"cnrm_cm3",
"gfdl_cm2_0",
"gfdl_cm2_1",
"giss_aom",
"giss_model_e_h",
"giss_model_e_r",
"iap_fgoals1_0_g",
"ingv_echam4",
"inmcm3_0",
"ipsl_cm4",
"miroc3_2_hires",
"miroc3_2_medres",
"miub_echo_g",
"mpi_echam5",
"mri_cgcm2_3_2a",
"ukmo_hadcm3",
"ukmo_hadgem1",
]
modnames = ["all"]
# modnames = ['CSIRO-Mk3-6-0']
realization = "*" # realizations
# realization = 'run1'
varModel = "psl"
ModUnitsAdjust = (True, "divide", 100.0) # Pa to hPa
msyear = 1900
meyear = 2005
eofn_mod = 1
# =================================================
# Output
# -------------------------------------------------
case_id = "{:v%Y%m%d}".format(datetime.datetime.now())
pmprdir = "/p/user_pub/pmp/pmp_results/pmp_v1.1.2"
if debug:
pmprdir = "/work/lee1043/imsi/result_test"
results_dir = os.path.join(
pmprdir,
"%(output_type)",
"variability_modes",
"%(mip)",
"%(exp)",
"%(case_id)",
"%(variability_mode)",
"%(reference_data_name)",
)
nc_out = True # Write output in NetCDF
plot = True # Create map graphics
| 25 | 85 | 0.521356 |
a533d5cff0c6530d9f36b6a5f5db6c785cd8b252
| 3,868 |
py
|
Python
|
quantization/lsq/qnn.py
|
creaiter/Quantization-Implementations
|
8e97ae9b91dbd3f02ebeccc05d12aee241d252d4
|
[
"MIT"
] | 2 |
2021-10-12T06:09:55.000Z
|
2022-01-28T08:17:13.000Z
|
quantization/lsq/qnn.py
|
creaiter/Quantization-Implementations
|
8e97ae9b91dbd3f02ebeccc05d12aee241d252d4
|
[
"MIT"
] | null | null | null |
quantization/lsq/qnn.py
|
creaiter/Quantization-Implementations
|
8e97ae9b91dbd3f02ebeccc05d12aee241d252d4
|
[
"MIT"
] | null | null | null |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from .quantizer import LSQ
class QuantConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',
nbits=32, symmetric=False, **kwargs):
super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, padding_mode)
assert nbits > 0 and nbits < 33
self.nbits = nbits
if symmetric:
self.q_n = -1. * 2. ** (self.nbits - 1) + 1
self.q_p = 2. ** (self.nbits - 1) - 1
else:
self.q_n = -1. * 2. ** (self.nbits - 1)
self.q_p = 2. ** (self.nbits - 1) - 1
if self.nbits != 32:
self.quantizer = LSQ(self.nbits, self.q_n, self.q_p)
def forward(self, x):
if self.nbits == 32:
quantized_weight = self.weight
else:
quantized_weight = self.quantizer(self.weight)
return self._conv_forward(x, quantized_weight)
class QuantLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True, nbits=32, symmetric=False, **kwargs):
super(QuantLinear, self).__init__(in_features, out_features, bias)
assert nbits > 0 and nbits < 33
self.nbits = nbits
if symmetric:
self.q_n = -1. * 2. ** (self.nbits - 1) + 1
self.q_p = 2. ** (self.nbits - 1) - 1
else:
self.q_n = -1. * 2. ** (self.nbits - 1)
self.q_p = 2. ** (self.nbits - 1) - 1
if self.nbits != 32:
self.quantizer = LSQ(self.nbits, self.q_n, self.q_p)
def forward(self, x):
if self.nbits == 32:
quantized_weight = self.weight
else:
quantized_weight = self.quantizer(self.weight)
return F.linear(x, quantized_weight, self.bias)
class QuantReLU(nn.ReLU):
def __init__(self, inplace=False, nbits=32, **kwargs):
super(QuantReLU, self).__init__(inplace)
assert nbits > 0 and nbits < 33
self.nbits = nbits
self.q_n = 0.
self.q_p = 2. ** self.nbits - 1
if self.nbits != 32:
self.quantizer = LSQ(self.nbits, self.q_n, self.q_p)
def forward(self, x):
if self.nbits == 32:
output = F.relu(x, inplace=self.inplace)
else:
output = self.quantizer(x)
return output
class QuantReLU6(nn.ReLU6):
def __init__(self, inplace=False, nbits=32, **kwargs):
super(QuantReLU6, self).__init__(inplace)
assert nbits > 0 and nbits < 33
self.nbits = nbits
self.q_n = 0.
self.q_p = 2. ** self.nbits - 1
if self.nbits != 32:
self.quantizer = LSQ(self.nbits, self.q_n, self.q_p)
def forward(self, x):
output = F.hardtanh(x, self.min_val, self.max_val, self.inplace)
if self.nbits != 32:
output = self.quantizer(output)
return output
class QuantIdentity(nn.Identity):
def __init__(self, nbits=32, symmetric=True, **kwargs):
super(QuantIdentity, self).__init__()
assert nbits > 0 and nbits < 33
self.nbits = nbits
if symmetric:
self.q_n = -1. * 2. ** (self.nbits - 1) + 1
self.q_p = 2. ** (self.nbits - 1) - 1
else:
self.q_n = -1. * 2. ** (self.nbits - 1)
self.q_p = 2. ** (self.nbits - 1) - 1
if self.nbits != 32:
self.quantizer = LSQ(self.nbits, self.q_n, self.q_p)
def forward(self, x):
if self.nbits == 32:
output = x
else:
output = self.quantizer(x)
return output
| 30.456693 | 98 | 0.553516 |
0e0a52ac5c95c6299e8abbd422696e513983383a
| 291 |
py
|
Python
|
brainspace/datasets/__init__.py
|
josemariamoreira/BrainSpace
|
d7e8e65c6463a81146e7fcfcca902feef04d329d
|
[
"BSD-3-Clause"
] | null | null | null |
brainspace/datasets/__init__.py
|
josemariamoreira/BrainSpace
|
d7e8e65c6463a81146e7fcfcca902feef04d329d
|
[
"BSD-3-Clause"
] | null | null | null |
brainspace/datasets/__init__.py
|
josemariamoreira/BrainSpace
|
d7e8e65c6463a81146e7fcfcca902feef04d329d
|
[
"BSD-3-Clause"
] | null | null | null |
from .base import (load_conte69, load_mask, load_group_fc, load_parcellation,
load_gradient, load_marker)
__all__ = ['load_conte69',
'load_mask',
'load_group_fc',
'load_gradient',
'load_parcellation',
'load_marker']
| 29.1 | 77 | 0.587629 |
0ab9e34a8d3e2b972aac50a04fcb569442318cc0
| 30,426 |
py
|
Python
|
sphinxcontrib/jupyter/writers/translate_all.py
|
AnjuJoon/sphinxcontrib-jupyter
|
ac828e0b165fa81bc5b4073ec604faee965ceabc
|
[
"BSD-3-Clause"
] | null | null | null |
sphinxcontrib/jupyter/writers/translate_all.py
|
AnjuJoon/sphinxcontrib-jupyter
|
ac828e0b165fa81bc5b4073ec604faee965ceabc
|
[
"BSD-3-Clause"
] | null | null | null |
sphinxcontrib/jupyter/writers/translate_all.py
|
AnjuJoon/sphinxcontrib-jupyter
|
ac828e0b165fa81bc5b4073ec604faee965ceabc
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import re
import nbformat.v4
from docutils import nodes, writers
from .translate_code import JupyterCodeTranslator
from .utils import JupyterOutputCellGenerators
from shutil import copyfile
import os
class JupyterTranslator(JupyterCodeTranslator, object):
""" Jupyter Translator for Text and Code
"""
SPLIT_URI_ID_REGEX = re.compile(r"([^\#]*)\#?(.*)")
def __init__(self, builder, document):
super(JupyterTranslator, self).__init__(builder, document)
# Settings
self.sep_lines = " \n"
self.sep_paras = "\n\n"
self.indent_char = " "
self.indent = self.indent_char * 4
self.default_ext = ".ipynb"
self.html_ext = ".html"
self.urlpath = builder.urlpath
# Variables used in visit/depart
self.in_code_block = False # if False, it means in markdown_cell
self.in_block_quote = False
self.block_quote_type = "block-quote"
self.in_note = False
self.in_attribution = False
self.in_rubric = False
self.in_footnote = False
self.in_footnote_reference = False
self.in_download_reference = False
self.in_inpage_reference = False
self.in_caption = False
self.in_toctree = False
self.in_list = False
self.in_math = False
self.in_math_block = False
self.code_lines = []
self.markdown_lines = []
self.indents = []
self.section_level = 0
self.bullets = []
self.list_item_starts = []
self.in_topic = False
self.reference_text_start = 0
self.in_reference = False
self.list_level = 0
self.in_citation = False
self.math_block_label = None
self.images = []
self.files = []
self.table_builder = None
# Slideshow option
self.metadata_slide = False #False is the value by default for all the notebooks
self.slide = "slide" #value by default
# specific visit and depart methods
# ---------------------------------
def visit_document(self, node):
"""at start
"""
JupyterCodeTranslator.visit_document(self, node)
def depart_document(self, node):
"""at end
Almost the exact same implementation as that of the superclass.
Notes
-----
[1] if copyfile is not graceful should catch exception if file not found / issue warning in sphinx
[2] should this be moved to CodeTranslator for support files when producing code only notebooks?
"""
self.add_markdown_cell()
#Parse .. jupyter-dependency::
if len(self.files) > 0:
for fl in self.files:
src_fl = os.path.join(self.builder.srcdir, fl)
out_fl = os.path.join(self.builder.outdir, os.path.basename(fl)) #copy file to same location as notebook (remove dir structure)
#Check if output directory exists
out_dir = os.path.dirname(out_fl)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print("Copying {} to {}".format(src_fl, out_fl))
copyfile(src_fl, out_fl)
JupyterCodeTranslator.depart_document(self, node)
# =========
# Sections
# =========
def visit_only(self, node):
pass
def depart_only(self, node):
pass
def visit_topic(self, node):
self.in_topic = True
def depart_topic(self, node):
self.in_topic = False
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
#=================
# Inline elements
#=================
def visit_Text(self, node):
text = node.astext()
#Escape Special markdown chars except in code block
if self.in_code_block == False:
text = text.replace("$", "\$")
if self.in_math:
text = '$ {} $'.format(text.strip())
elif self.in_math_block and self.math_block_label:
text = "$$\n{0}{1}$${2}".format(
text.strip(), self.math_block_label, self.sep_paras
)
self.math_block_label = None
elif self.in_math_block:
text = "$$\n{0}\n$${1}".format(text.strip(), self.sep_paras)
if self.in_code_block:
self.code_lines.append(text)
elif self.table_builder:
self.table_builder['line_pending'] += text
elif self.in_block_quote or self.in_note:
if self.block_quote_type == "epigraph":
self.markdown_lines.append(text.replace("\n", "\n> ")) #Ensure all lines are indented
else:
self.markdown_lines.append(text)
elif self.in_caption and self.in_toctree:
self.markdown_lines.append("# {}".format(text))
else:
self.markdown_lines.append(text)
def depart_Text(self, node):
pass
def visit_attribution(self, node):
self.in_attribution = True
self.markdown_lines.append("> ")
def depart_attribution(self, node):
self.in_attribution = False
self.markdown_lines.append("\n")
# image
def visit_image(self, node):
"""
Notes
-----
1. Should this use .has_attrs()?
2. the scale, height and width properties are not combined in this
implementation as is done in http://docutils.sourceforge.net/docs/ref/rst/directives.html#image
"""
uri = node.attributes["uri"]
self.images.append(uri) #TODO: list of image files
if self.jupyter_download_nb_image_urlpath:
for file_path in self.jupyter_static_file_path:
if file_path in uri:
uri = uri.replace(file_path +"/", self.jupyter_download_nb_image_urlpath)
break #don't need to check other matches
attrs = node.attributes
if self.jupyter_images_markdown:
#-Construct MD image
image = "".format(uri)
else:
# Construct HTML image
image = '<img src="{}" '.format(uri)
if "alt" in attrs.keys():
image += 'alt="{}" '.format(attrs["alt"])
style = ""
if "width" in attrs.keys():
style += "width:{};".format(attrs["width"])
if "height" in attrs.keys():
style += "height:{};".format(attrs["height"])
if "scale" in attrs.keys():
style = "width:{0}%;height:{0}%".format(attrs["scale"])
image += 'style="{}" '.format(style)
if "align" in attrs.keys():
image += 'align="{}"'.format(attrs["align"])
image = image.rstrip() + ">\n\n" #Add double space for html
self.markdown_lines.append(image)
# math
def visit_math(self, node):
"""inline math"""
# With sphinx < 1.8, a math node has a 'latex' attribute, from which the
# formula can be obtained and added to the text.
# With sphinx >= 1.8, a math node has no 'latex' attribute, which mean
# that a flag has to be raised, so that the in visit_Text() we know that
# we are dealing with a formula.
try: # sphinx < 1.8
math_text = node.attributes["latex"].strip()
except KeyError:
# sphinx >= 1.8
self.in_math = True
# the flag is raised, the function can be exited.
return
formatted_text = "$ {} $".format(math_text)
if self.table_builder:
self.table_builder['line_pending'] += formatted_text
else:
self.markdown_lines.append(formatted_text)
def depart_math(self, node):
self.in_math = False
def visit_displaymath(self, node):
"""directive math"""
# displaymath is called with sphinx < 1.8 only
math_text = node.attributes["latex"].strip()
if self.in_list and node["label"]:
self.markdown_lines.pop() #remove entry \n from table builder
if self.list_level == 0:
formatted_text = "$$\n{0}\n$${1}".format(
math_text, self.sep_paras)
else:
formatted_text = "$$\n{0}\n$${1}".format(
math_text, self.sep_paras)
#check for labelled math
if node["label"]:
#Use \tags in the LaTeX environment
if self.jupyter_target_pdf:
#pdf should have label following tag and removed html id tags in visit_target
referenceBuilder = " \\tag{" + str(node["number"]) + "}" + "\\label{" + node["ids"][0] + "}\n"
else:
referenceBuilder = " \\tag{" + str(node["number"]) + "}\n" #node["ids"] should always exist for labelled displaymath
formatted_text = formatted_text.rstrip("$$\n") + referenceBuilder + "$${}".format(self.sep_paras)
self.markdown_lines.append(formatted_text)
def depart_displaymath(self, node):
if self.in_list:
self.markdown_lines[-1] = self.markdown_lines[-1][:-1] #remove excess \n
def visit_math_block(self, node):
"""directive math"""
# visit_math_block is called only with sphinx >= 1.8
self.in_math_block = True
if self.in_list and node["label"]:
self.markdown_lines.pop() #remove entry \n from table builder
#check for labelled math
if node["label"]:
#Use \tags in the LaTeX environment
if self.jupyter_target_pdf:
#pdf should have label following tag and removed html id tags in visit_target
referenceBuilder = " \\tag{" + str(node["number"]) + "}" + "\\label{" + node["ids"][0] + "}\n"
else:
referenceBuilder = " \\tag{" + str(node["number"]) + "}\n"
#node["ids"] should always exist for labelled displaymath
self.math_block_label = referenceBuilder
def depart_math_block(self, node):
if self.in_list:
self.markdown_lines[-1] = self.markdown_lines[-1][:-1] #remove excess \n
self.in_math_block = False
def visit_table(self, node):
self.table_builder = dict()
self.table_builder['column_widths'] = []
self.table_builder['lines'] = []
self.table_builder['line_pending'] = ""
if 'align' in node:
self.table_builder['align'] = node['align']
else:
self.table_builder['align'] = "center"
def depart_table(self, node):
table_lines = "".join(self.table_builder['lines'])
self.markdown_lines.append(table_lines)
self.table_builder = None
def visit_thead(self, node):
""" Table Header """
self.table_builder['current_line'] = 0
def depart_thead(self, node):
""" create the header line which contains the alignment for each column """
header_line = "|"
for col_width in self.table_builder['column_widths']:
header_line += self.generate_alignment_line(
col_width, self.table_builder['align'])
header_line += "|"
self.table_builder['lines'].append(header_line + "\n")
def generate_alignment_line(self, line_length, alignment):
left = ":" if alignment != "right" else "-"
right = ":" if alignment != "left" else "-"
return left + "-" * (line_length - 2) + right
def visit_colspec(self, node):
self.table_builder['column_widths'].append(node['colwidth'])
def visit_row(self, node):
self.table_builder['line_pending'] = "|"
def depart_row(self, node):
finished_line = self.table_builder['line_pending'] + "\n"
self.table_builder['lines'].append(finished_line)
def visit_entry(self, node):
pass
def depart_entry(self, node):
self.table_builder['line_pending'] += "|"
def visit_raw(self, node):
pass
def visit_rubric(self, node):
self.in_rubric = True
self.add_markdown_cell()
if len(node.children) == 1 and node.children[0].astext() in ['Footnotes']:
self.markdown_lines.append('**{}**\n\n'.format(node.children[0].astext()))
raise nodes.SkipNode
def depart_rubric(self, node):
self.add_markdown_cell()
self.in_rubric = False
def visit_footnote_reference(self, node):
self.in_footnote_reference = True
refid = node.attributes['refid']
ids = node.astext()
if self.jupyter_target_html:
link = "<sup><a href=#{} id={}-link>[{}]</a></sup>".format(refid, refid, ids)
else:
link = "<sup>[{}](#{})</sup>".format(ids, refid)
self.markdown_lines.append(link)
raise nodes.SkipNode
def depart_footnote_reference(self, node):
self.in_footnote_reference = False
def visit_footnote(self, node):
self.in_footnote = True
def depart_footnote(self, node):
self.in_footnote = False
def visit_download_reference(self, node):
self.in_download_reference = True
html = "<a href={} download>".format(node["reftarget"])
self.markdown_lines.append(html)
def depart_download_reference(self, node):
self.markdown_lines.append("</a>")
self.in_download_reference = False
#================
# markdown cells
#================
# general paragraph
def visit_paragraph(self, node):
pass
def depart_paragraph(self, node):
if self.list_level > 0:
self.markdown_lines.append(self.sep_lines)
elif self.table_builder:
pass
elif self.block_quote_type == "epigraph":
try:
attribution = node.parent.children[1]
self.markdown_lines.append("\n>\n") #Continue block for attribution
except:
self.markdown_lines.append(self.sep_paras)
else:
self.markdown_lines.append(self.sep_paras)
# title(section)
def visit_title(self, node):
JupyterCodeTranslator.visit_title(self, node)
self.add_markdown_cell()
if self.in_topic:
self.markdown_lines.append(
"{} ".format("#" * (self.section_level + 1)))
elif self.table_builder:
self.markdown_lines.append(
"### {}\n".format(node.astext()))
else:
self.markdown_lines.append(
"{} ".format("#" * self.section_level))
def depart_title(self, node):
if not self.table_builder:
self.markdown_lines.append(self.sep_paras)
# emphasis(italic)
def visit_emphasis(self, node):
self.markdown_lines.append("*")
def depart_emphasis(self, node):
self.markdown_lines.append("*")
# strong(bold)
def visit_strong(self, node):
self.markdown_lines.append("**")
def depart_strong(self, node):
self.markdown_lines.append("**")
def visit_literal(self, node):
if self.in_download_reference:
return
self.markdown_lines.append("`")
def depart_literal(self, node):
if self.in_download_reference:
return
self.markdown_lines.append("`")
# figures
def visit_figure(self, node):
pass
def depart_figure(self, node):
self.markdown_lines.append(self.sep_lines)
# reference
def visit_reference(self, node):
"""anchor link"""
self.in_reference = True
if self.jupyter_target_pdf:
if "refid" in node:
if 'equation-' in node['refid']:
self.markdown_lines.append("\eqref{")
elif self.in_topic:
pass
else:
self.markdown_lines.append("\hyperlink{")
else:
self.markdown_lines.append("\hyperlink{")
else:
self.markdown_lines.append("[")
self.reference_text_start = len(self.markdown_lines)
def depart_reference(self, node):
if self.in_topic:
# Jupyter Notebook uses the target text as its id
uri_text = "".join(
self.markdown_lines[self.reference_text_start:]).strip()
uri_text = re.sub(
self.URI_SPACE_REPLACE_FROM, self.URI_SPACE_REPLACE_TO, uri_text)
if self.jupyter_target_html:
#Adjust contents (toc) text when targetting html to prevent nbconvert from breaking html on )
uri_text = uri_text.replace("(", "%28")
uri_text = uri_text.replace(")", "%29")
#Format end of reference in topic
if self.jupyter_target_pdf:
uri_text = uri_text.lower()
SPECIALCHARS = [r"!", r"@", r"#", r"$", r"%", r"^", r"&", r"*", r"(", r")", r"[", r"]", r"{",
r"}", r"|", r":", r";", r","]
for CHAR in SPECIALCHARS:
uri_text = uri_text.replace(CHAR,"")
formatted_text = " \\ref{" + uri_text + "}" #Use Ref and Plain Text titles
else:
formatted_text = "](#{})".format(uri_text)
self.markdown_lines.append(formatted_text)
else:
# if refuri exists, then it includes id reference(#hoge)
if "refuri" in node.attributes:
refuri = node["refuri"]
# add default extension(.ipynb)
if "internal" in node.attributes and node.attributes["internal"] == True:
if self.jupyter_target_html:
refuri = self.add_extension_to_inline_link(refuri, self.html_ext)
## add url path if it is set
if self.urlpath is not None:
refuri = self.urlpath + refuri
elif self.jupyter_target_pdf and 'references#' in refuri:
label = refuri.split("#")[-1]
bibtex = self.markdown_lines.pop()
if "hyperlink" in self.markdown_lines[-1]:
self.markdown_lines.pop()
refuri = "reference-\\cite{" + label
else:
refuri = self.add_extension_to_inline_link(refuri, self.default_ext)
else:
# in-page link
if "refid" in node:
refid = node["refid"]
self.in_inpage_reference = True
if not self.jupyter_target_pdf:
#markdown doesn't handle closing brackets very well so will replace with %28 and %29
#ignore adjustment when targeting pdf as pandoc doesn't parse %28 correctly
refid = refid.replace("(", "%28")
refid = refid.replace(")", "%29")
if self.jupyter_target_pdf:
refuri = refid
else:
#markdown target
refuri = "#{}".format(refid)
# error
else:
self.error("Invalid reference")
refuri = ""
#TODO: review if both %28 replacements necessary in this function?
# Propose delete above in-link refuri
if not self.jupyter_target_pdf:
#ignore adjustment when targeting pdf as pandoc doesn't parse %28 correctly
refuri = refuri.replace("(", "%28") #Special case to handle markdown issue with reading first )
refuri = refuri.replace(")", "%29")
if self.jupyter_target_pdf and 'reference-' in refuri:
self.markdown_lines.append(refuri.replace("reference-","") + "}")
elif self.jupyter_target_pdf and self.in_inpage_reference:
labeltext = self.markdown_lines.pop()
# Check for Equations as they do not need labetext
if 'equation-' in refuri:
self.markdown_lines.append(refuri + "}")
else:
self.markdown_lines.append(refuri + "}{" + labeltext + "}")
# if self.jupyter_target_pdf and self.in_toctree:
# #TODO: this will become an internal link when making a single unified latex file
# formatted_text = " \\ref{" + refuri + "}"
# self.markdown_lines.append(formatted_text)
else:
self.markdown_lines.append("]({})".format(refuri))
if self.in_toctree:
self.markdown_lines.append("\n")
self.in_reference = False
# target: make anchor
def visit_target(self, node):
if "refid" in node.attributes:
refid = node.attributes["refid"]
if self.jupyter_target_pdf:
if 'equation' in refid:
#no html targets when computing notebook to target pdf in labelled math
pass
else:
#set hypertargets for non math targets
self.markdown_lines.append("\n\\hypertarget{" + refid + "}{}\n\n")
else:
self.markdown_lines.append("\n<a id='{}'></a>\n".format(refid))
# list items
def visit_bullet_list(self, node):
self.list_level += 1
# markdown does not have option changing bullet chars
self.bullets.append("-")
self.indents.append(len(self.bullets[-1] * 2)) #add two per level
def depart_bullet_list(self, node):
self.list_level -= 1
if self.list_level == 0:
self.markdown_lines.append(self.sep_paras)
if self.in_topic:
self.add_markdown_cell()
self.bullets.pop()
self.indents.pop()
def visit_enumerated_list(self, node):
self.list_level += 1
# markdown does not have option changing bullet chars
self.bullets.append("1.")
self.indents.append(len(self.bullets[-1]))
def depart_enumerated_list(self, node):
self.list_level -= 1
if self.list_level == 0:
self.markdown_lines.append(self.sep_paras)
self.bullets.pop()
self.indents.pop()
def visit_list_item(self, node):
self.in_list = True
head = "{} ".format(self.bullets[-1])
self.markdown_lines.append(head)
self.list_item_starts.append(len(self.markdown_lines))
def depart_list_item(self, node):
self.in_list = False
list_item_start = self.list_item_starts.pop()
indent = self.indent_char * self.indents[-1]
br_removed_flag = False
# remove last breakline
if self.markdown_lines and self.markdown_lines[-1][-1] == "\n":
br_removed_flag = True
self.markdown_lines[-1] = self.markdown_lines[-1][:-1]
for i in range(list_item_start, len(self.markdown_lines)):
self.markdown_lines[i] = self.markdown_lines[i].replace(
"\n", "\n{}".format(indent))
# add breakline
if br_removed_flag:
self.markdown_lines.append("\n")
# definition list
def visit_definition_list(self, node):
self.markdown_lines.append("\n<dl style='margin: 20px 0;'>\n")
def depart_definition_list(self, node):
self.markdown_lines.append("\n</dl>{}".format(self.sep_paras))
def visit_term(self, node):
self.markdown_lines.append("<dt>")
def depart_term(self, node):
self.markdown_lines.append("</dt>\n")
def visit_definition(self, node):
self.markdown_lines.append("<dd>\n")
def depart_definition(self, node):
self.markdown_lines.append("</dd>\n")
# field list
def visit_field_list(self, node):
self.visit_definition_list(node)
def depart_field_list(self, node):
self.depart_definition_list(node)
def visit_field_name(self, node):
self.visit_term(node)
def depart_field_name(self, node):
self.depart_term(node)
def visit_field_body(self, node):
self.visit_definition(node)
def depart_field_body(self, node):
self.depart_definition(node)
# citation
def visit_citation(self, node):
self.in_citation = True
if "ids" in node.attributes:
ids = node.attributes["ids"]
id_text = ""
for id_ in ids:
id_text += "{} ".format(id_)
else:
id_text = id_text[:-1]
self.markdown_lines.append(
"<a id='{}'></a>\n".format(id_text))
def depart_citation(self, node):
self.in_citation = False
# label
def visit_label(self, node):
if self.in_footnote:
ids = node.parent.attributes["ids"]
id_text = ""
for id_ in ids:
id_text += "{} ".format(id_)
else:
id_text = id_text[:-1]
if self.jupyter_target_html:
self.markdown_lines.append("<p><a id={} href=#{}-link><strong>[{}]</strong></a> ".format(id_text, id_text, node.astext()))
else:
self.markdown_lines.append("<a id='{}'></a>\n**[{}]** ".format(id_text, node.astext()))
raise nodes.SkipNode
if self.in_citation:
self.markdown_lines.append("\[")
def depart_label(self, node):
if self.in_citation:
self.markdown_lines.append("\] ")
# ===============================================
# code blocks are implemented in the superclass
# ===============================================
def visit_block_quote(self, node):
if self.in_list: #allow for 4 spaces interpreted as block_quote
self.markdown_lines.append("\n")
return
self.in_block_quote = True
if "epigraph" in node.attributes["classes"]:
self.block_quote_type = "epigraph"
self.markdown_lines.append("> ")
def depart_block_quote(self, node):
if "epigraph" in node.attributes["classes"]:
self.block_quote_type = "block-quote"
self.markdown_lines.append("\n")
self.in_block_quote = False
def visit_literal_block(self, node):
JupyterCodeTranslator.visit_literal_block(self, node)
if self.in_code_block:
self.add_markdown_cell()
def depart_literal_block(self, node):
JupyterCodeTranslator.depart_literal_block(self, node)
def visit_note(self, node):
self.in_note = True
self.markdown_lines.append(">**Note**\n>\n>")
def depart_note(self, node):
self.in_note = False
def depart_raw(self, node):
self.markdown_lines.append("\n\n")
# =============
# Jupyter Nodes
# =============
def visit_jupyter_node(self, node):
try:
if 'cell-break' in node.attributes:
self.add_markdown_cell()
if 'slide' in node.attributes:
self.metadata_slide = node['slide'] # this activates the slideshow metadata for the notebook
if 'slide-type' in node.attributes:
if "fragment" in node['slide-type']:
self.add_markdown_cell(slide_type=node['slide-type']) #start a new cell
self.slide = node['slide-type'] # replace the default value
except:
pass
#Parse jupyter_dependency directive (TODO: Should this be a separate node type?)
try:
self.files.append(node['uri'])
except:
pass
def depart_jupyter_node(self, node):
if 'cell-break' in node.attributes:
pass
if 'slide' in node.attributes:
pass
if 'slide-type' in node.attributes:
pass
def visit_comment(self, node):
raise nodes.SkipNode
def visit_compact_paragraph(self, node):
try:
if node.attributes['toctree']:
self.in_toctree = True
except:
pass #Should this execute visit_compact_paragragh in BaseTranslator?
def depart_compact_paragraph(self, node):
try:
if node.attributes['toctree']:
self.in_toctree = False
except:
pass
def visit_caption(self, node):
self.in_caption = True
def depart_caption(self, node):
self.in_caption = False
if self.in_toctree:
self.markdown_lines.append("\n")
# ================
# general methods
# ================
def add_markdown_cell(self, slide_type="slide"):
"""split a markdown cell here
* add the slideshow metadata
* append `markdown_lines` to notebook
* reset `markdown_lines`
"""
line_text = "".join(self.markdown_lines)
formatted_line_text = self.strip_blank_lines_in_end_of_block(line_text)
slide_info = {'slide_type': self.slide}
if len(formatted_line_text.strip()) > 0:
new_md_cell = nbformat.v4.new_markdown_cell(formatted_line_text)
if self.metadata_slide: # modify the slide metadata on each cell
new_md_cell.metadata["slideshow"] = slide_info
self.slide = slide_type
self.output["cells"].append(new_md_cell)
self.markdown_lines = []
@classmethod
def split_uri_id(cls, uri):
return re.search(cls.SPLIT_URI_ID_REGEX, uri).groups()
@classmethod
def add_extension_to_inline_link(cls, uri, ext):
if "." not in uri:
if len(uri) > 0 and uri[0] == "#":
return uri
uri, id_ = cls.split_uri_id(uri)
if len(id_) == 0:
return "{}{}".format(uri, ext)
else:
return "{}{}#{}".format(uri, ext, id_)
#adjust relative references
elif "../" in uri:
uri = uri.replace("../", "")
uri, id_ = cls.split_uri_id(uri)
if len(id_) == 0:
return "{}{}".format(uri, ext)
else:
return "{}{}#{}".format(uri, ext, id_)
return uri
| 36.007101 | 149 | 0.564156 |
ef61706dc805b4fdc539524783ec8f8823b8ee46
| 6,044 |
py
|
Python
|
gamestate-changes/change_statistics/basicUtils.py
|
phylib/MinecraftNDN-RAFNET19
|
c7bfa7962707af367fafe9d879bc63637c06aec7
|
[
"MIT"
] | 1 |
2020-05-18T15:55:09.000Z
|
2020-05-18T15:55:09.000Z
|
gamestate-changes/change_statistics/basicUtils.py
|
phylib/MinecraftNDN-RAFNET19
|
c7bfa7962707af367fafe9d879bc63637c06aec7
|
[
"MIT"
] | null | null | null |
gamestate-changes/change_statistics/basicUtils.py
|
phylib/MinecraftNDN-RAFNET19
|
c7bfa7962707af367fafe9d879bc63637c06aec7
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from enum import Enum
ChangeType = Enum('ChangeType', 'block tile_entity entity status')
def getType(value):
if value == 'status':
return ChangeType.status
elif value == 'BLOCK':
return ChangeType.block
elif value.startswith('TILE_ENTITY'):
return ChangeType.tile_entity
else:
return ChangeType.entity
def to_date(str):
return datetime.strptime(str, '%d-%m-%Y_%H:%M:%S:%f')
# data structure for storing change information
# takes lines/rows as lists of string values and saves them (some values converted to more useful data types) in order
class IntervalData:
def __init__(self):
self.entries = [] # all change entries can be found in this list
self.status_entries = [] # additional list only storing the status information entries
def addLogRowItems(self, rowItems):
logTime = to_date(rowItems[0]) # wall clock time of log entry creation
worldFullTime = int(rowItems[1]) # MC server full time (ticks since startup)
type = getType(rowItems[2]) # Category/Type of change (ChangeType.(status|block|tile_entity|entity)
# store basic information (common to entries of all types)
entry = {'logTime': logTime, 'worldFullTime': worldFullTime, 'type': type, 'typeStr': rowItems[2]}
if type == ChangeType.status: # information specific to status entries
# 20-10-2018_19:34:40:724 time type="status" #loadedChunks #changedChunks #tileEntities #changedTileEntities #entities #changedEntities #onlinePlayers totalStateDiffTime
loadedChunks = int(rowItems[3]) # total number of loaded chunks
changedChunks = int(rowItems[4]) # number of chunks that changed (indicated by Events)
tileEntities = int(rowItems[5]) # total number of tile/block-entities
changedTileEntities = int(rowItems[6]) # number of tile entities that changed
entities = int(rowItems[7]) # total number of entities
changedEntities = int(rowItems[8]) # number of entities that changed
onlinePlayers = int(rowItems[9]) # total number of players logged in to the server
totalStateDiffTime = float(rowItems[10].replace('ms','')) # time it took the measurement plugin to compare the current with the last state (comparing "dirty" chunks as indicated by Events)
# update dictionary with type-specific information
entry.update({"loadedChunks": loadedChunks, 'changedChunks': changedChunks, 'tileEntities': tileEntities, 'changedTileEntities': changedTileEntities, 'entities': entities
, 'changedEntities': changedEntities, 'onlinePlayers': onlinePlayers, 'totalStateDiffTime': totalStateDiffTime})
# store change entry (in all lists)
self.entries.append(entry)
self.status_entries.append(entry)
else:
# change must be involving a block, tile/block-entity or entity, which all share the following properties
xpos = rowItems[3] # global coordinate system (block x coordinate)
ypos = rowItems[4] # global coordinate system (block y coordinate)
zpos = rowItems[5] # global coordinate system (block z coordinate)
world = rowItems[6] # name of the world (e.g. "world", "world_nether", "world_the_end"
chunk = rowItems[7] # x,z coordinates of the chunk that the change happened in
section = rowItems[8] # section number (0-15) of the section that the change happened in (inside the chunk)
# add properties common to block and (tile) entity
entry.update({'xpos': xpos, 'ypos': ypos, 'zpos': zpos, 'world': world, 'chunk': chunk, 'section': section})
if type == ChangeType.entity or type == ChangeType.tile_entity:
# change involves tile/block-entity or entity
# 20-10-2018_19:34:40:724 time type="entity" xpos ypos zpos world chunk section uuid [changed attributes]
uuid = rowItems[9] # all entities and tileEntities have an identifier (uuid)
changes = rowItems[10] # the NBT diff of the previous and current state of the (tile) entity
# update dict with (tile-)entity specific infos
entry.update({'uuid': uuid, 'changes': changes})
# store change entry
self.entries.append(entry)
elif type == ChangeType.block:
# change involves a block
# 20-10-2018_19:34:40:724 time type="block" xpos ypos zpos world chunk section material skylight emittedLight BlockData
material = rowItems[9] # the material a block consists of
skylight = rowItems[10] # the transparency regarding light from above (sun/moon)
emittedLight = rowItems[11] # light emitted/reflected by/from the block itself
blockData = rowItems[12] # additional data (<= one byte)
# update dictionary with block specific information
entry.update({'material' : material, 'skylight': skylight, 'emittedLight': emittedLight, 'blockData': blockData})
# store change entry
self.entries.append(entry)
else:
raise ValueError("type '" + type + "' is not handled!") # handle type that is not handled otherwise
def clearEntries(self):
del(self.entries)
self.entries=[]
def getNumStatusEntries(self):
return len(self.status_entries)
def append(self, other):
if type(other) != type(self):
raise ValueError("Object types do not match up!")
self.entries += other.entries
self.status_entries += other.status_entries
| 62.309278 | 202 | 0.627399 |
28a42507f1646fa07299a387a05c4a957da93bb9
| 1,888 |
py
|
Python
|
data/tair/add_gene.py
|
SilkeAllmannLab/gwas
|
ced0000477890316f3c3fd9ec6b327aeb9364b25
|
[
"Apache-2.0"
] | 1 |
2021-08-28T07:02:15.000Z
|
2021-08-28T07:02:15.000Z
|
data/tair/add_gene.py
|
SilkeAllmannLab/gwas
|
ced0000477890316f3c3fd9ec6b327aeb9364b25
|
[
"Apache-2.0"
] | 3 |
2020-01-30T15:02:51.000Z
|
2020-03-31T15:53:39.000Z
|
data/tair/add_gene.py
|
SilkeAllmannLab/gwas
|
ced0000477890316f3c3fd9ec6b327aeb9364b25
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# Usage: python add_gene.py chrom_pos_marker.tsv chr01_start_end_gene.tsv chr01_genes_with_marker.tsv
# In[1]:
import sys
import pandas as pd
# file with chromosome position marker id
chrom_pos_marker_id_file_path = sys.argv[1]
# file with chrom start end gene
chrom_start_end_gene_file_path = sys.argv[2]
# out file
outfile = sys.argv[3]
# In[2]:
chrom_pos_marker = pd.read_csv(chrom_pos_marker_id_file_path,
sep="\t",
names=["chrom","pos","marker_id"])
# In[3]:
genes = pd.read_csv(chrom_start_end_gene_file_path,
sep="\t",
names=["chrom","start","end","gene"])
# to collect gene as key and marker as values
gene_to_marker_dict = {}
# two loops
# outer loop iterates over each Arabidopsis gene
# inner loop iterates over each marker
n_genes = genes.shape[0]
for i in range(0,n_genes,1):
# collect the chromosome + positions of gene number i
row = genes.iloc[i,:]
chrom_gene = row["chrom"]
start_gene = row["start"]
end_gene = row["end"]
gene = row["gene"]
print("working on gene:",gene)
# find chromosome of gene and filter the chrom_pos_marker dataframe
chrom_pos_marker_filtered = chrom_pos_marker.query('chrom == @chrom_gene')
# is the SNP marker position between start and end position of the gene number i?
for j in range(0,chrom_pos_marker_filtered.shape[0],1):
marker_j = chrom_pos_marker_filtered.iloc[j,:]
if start_gene <= marker_j["pos"] <= end_gene:
gene_to_marker_dict[gene] = marker_j["marker_id"]
# In[ ]:
genes_with_marker = pd.DataFrame.from_dict(gene_to_marker_dict, orient = "index",columns=["marker_id"])
genes_with_marker.reset_index(level = 0, inplace = True)
genes_with_marker = genes_with_marker.rename(columns={"index":"gene"})
# In[ ]:
genes_with_marker.to_csv(outfile, sep = "\t")
| 25.173333 | 103 | 0.700212 |
9f024035a9a25ef15387f973fc545e10795504d8
| 1,097 |
py
|
Python
|
h/views/home.py
|
julien-cheng/h
|
36c8ec044725720cf36f0986cdf025395aca8929
|
[
"BSD-2-Clause"
] | 2 |
2019-08-04T07:22:11.000Z
|
2020-07-17T05:01:41.000Z
|
h/views/home.py
|
fuelpress/i.fuel.press
|
af7b25895d813af0fef656dcf483afe852a99d76
|
[
"BSD-2-Clause"
] | 4 |
2020-03-24T17:38:24.000Z
|
2022-03-02T05:45:01.000Z
|
h/views/home.py
|
fuelpress/i.fuel.press
|
af7b25895d813af0fef656dcf483afe852a99d76
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Views serving the homepage and related endpoints."""
from __future__ import unicode_literals
from pyramid import httpexceptions
from pyramid.view import view_config
@view_config(route_name="via_redirect", request_method="GET")
def via_redirect(context, request):
url = request.params.get("url")
if url is None:
raise httpexceptions.HTTPBadRequest('"url" parameter missing')
via_link = "https://via.hypothes.is/{}".format(url)
raise httpexceptions.HTTPFound(location=via_link)
@view_config(route_name="index", request_method="GET")
def index_redirect(context, request):
try:
redirect = request.registry.settings["h.homepage_redirect_url"]
except KeyError:
# When the redirect URL isn't explicitly configured, we send people to
# the main activity stream.
redirect = request.route_url("activity.search")
if request.user is not None:
redirect = request.route_url(
"activity.user_search", username=request.user.username
)
raise httpexceptions.HTTPFound(redirect)
| 29.648649 | 78 | 0.711942 |
af07a2462ed7aa7fcc9b71d3ea441e37aeb5e6a1
| 3,987 |
py
|
Python
|
train-model/scripts/preprocess.py
|
leijianqiao3/web-audio-recognition
|
c2c9f8b2d1af43dcc637335c62a0653f5b12e38b
|
[
"Apache-2.0"
] | 80 |
2017-12-22T12:38:08.000Z
|
2021-08-31T00:14:46.000Z
|
train-model/scripts/preprocess.py
|
leijianqiao3/web-audio-recognition
|
c2c9f8b2d1af43dcc637335c62a0653f5b12e38b
|
[
"Apache-2.0"
] | null | null | null |
train-model/scripts/preprocess.py
|
leijianqiao3/web-audio-recognition
|
c2c9f8b2d1af43dcc637335c62a0653f5b12e38b
|
[
"Apache-2.0"
] | 31 |
2017-12-25T20:34:40.000Z
|
2021-09-05T10:03:45.000Z
|
#!/usr/bin/env python
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Concatenate all training examples into one large, conjoined wav file. This is
necessary to reduce the number of XHRs we need to make to load all of the data.
Ensure that each sample is exactly 44100 samples long."""
import argparse
import numpy as np
import os
import random
import scipy.io.wavfile
OUT_PATH = 'out'
DATA_PATH = '../speech_commands_v0.01/'
DURATION = 1
SAMPLE_RATE = 16000
EXAMPLES_PER_LABEL = 10000
BACKGROUND_LABEL = '_background_noise_'
parser = argparse.ArgumentParser(description='Combine labeled wavs into one.')
parser.add_argument('--labels', nargs='+', help='Which labels to process',
default=[])
parser.add_argument('--examples_per_label', type=int, default=EXAMPLES_PER_LABEL,
help='How many examples to take from each label')
parser.add_argument('--examples_per_other_label', type=int,
default=EXAMPLES_PER_LABEL * 4,
help='How many examples to take from each label')
parser.add_argument('--out', type=str, default=OUT_PATH,
help='Directory to write the files to.')
parser.add_argument('--other', action='store_true', default=True)
parser.add_argument('--background', action='store_true', default=True)
args = parser.parse_args()
def GetAllLabels():
subdirs = [x[0].split('/')[-1] for x in os.walk(DATA_PATH)]
return subdirs[2:]
def GetAllExamplePaths(label):
path = os.path.join(DATA_PATH, label)
examples = [x[2] for x in os.walk(path)][0]
return [os.path.join(label, eg) for eg in examples if eg.endswith('.wav')]
def GetAllExamplePathsForLabels(labels):
out = []
for label in labels:
out += GetAllExamplePaths(label)
return out
def ReadWav(path):
sr, y = scipy.io.wavfile.read(path)
return y
def PadBuffer(buf, length):
if len(buf) > length:
return buf[:length]
elif len(buf) == length:
return buf
else:
# Array is too short, zero-pad it.
return buf + ([0] * (length - len(buf)))
def WriteWav(buf, path):
# Ensure directory exists.
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.mkdir(dir)
scipy.io.wavfile.write(path, SAMPLE_RATE, buf)
if __name__ == '__main__':
labels = args.labels or GetAllLabels()
if args.background:
labels += [BACKGROUND_LABEL]
print('Loading %d labels' % len(labels))
for label in labels:
example_paths = GetAllExamplePaths(label)
bufs = example_paths[:args.examples_per_label]
print('Writing %d examples for label %s.' % (len(bufs), label))
combined_buf = []
for path in bufs:
buf = list(ReadWav(os.path.join(DATA_PATH, path)))
if label != BACKGROUND_LABEL:
buf = PadBuffer(buf, DURATION * SAMPLE_RATE)
combined_buf += buf
arr = np.array(combined_buf, dtype=np.int16)
WriteWav(arr, os.path.join(args.out, '%s.wav' % label))
if args.other:
# Get a bunch of non-specified examples and put them all into other.wav.
other_labels = set(GetAllLabels()).difference(labels)
other_paths = GetAllExamplePathsForLabels(other_labels)[:args.examples_per_other_label]
random.shuffle(other_paths)
combined_buf = []
for path in other_paths:
buf = list(ReadWav(os.path.join(DATA_PATH, path)))
buf = PadBuffer(buf, DURATION * SAMPLE_RATE)
combined_buf += buf
arr = np.array(combined_buf, dtype=np.int16)
print('Writing %s examples to other.wav' % len(other_paths))
WriteWav(arr, os.path.join(args.out, 'other.wav'))
| 32.950413 | 91 | 0.712315 |
ba7d23fbe1e9ad416691dc1d184793b13a3f1ae8
| 3,982 |
py
|
Python
|
win64-postgresql/pgAdmin 4/web/pgadmin/browser/server_groups/servers/types.py
|
vnetcon/curvy
|
ed3749bd5d298c7ab6c0625de91c211d6da4c762
|
[
"Apache-2.0"
] | null | null | null |
win64-postgresql/pgAdmin 4/web/pgadmin/browser/server_groups/servers/types.py
|
vnetcon/curvy
|
ed3749bd5d298c7ab6c0625de91c211d6da4c762
|
[
"Apache-2.0"
] | 3 |
2021-09-02T15:51:44.000Z
|
2022-03-02T09:53:17.000Z
|
win64-postgresql/pgAdmin 4/web/pgadmin/browser/server_groups/servers/types.py
|
vnetcon/curvy
|
ed3749bd5d298c7ab6c0625de91c211d6da4c762
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import os
import sys
from flask import render_template
from flask_babelex import gettext as _
from pgadmin.utils.preferences import Preferences
import config
class ServerType(object):
"""
Server Type
Create an instance of this class to define new type of the server support,
In order to define new type of instance, you may want to override this
class with overriden function - instanceOf for type checking for
identification based on the version.
"""
registry = dict()
UTILITY_PATH_LABEL = _("PostgreSQL Binary Path")
UTILITY_PATH_HELP = _(
"Path to the directory containing the PostgreSQL utility programs"
" (pg_dump, pg_restore etc)."
)
def __init__(self, server_type, description, priority):
self.stype = server_type
self.desc = description
self.spriority = priority
self.utility_path = None
assert (server_type not in ServerType.registry)
ServerType.registry[server_type] = self
@property
def icon(self):
return "%s.svg" % self.stype
@property
def server_type(self):
return self.stype
@property
def description(self):
return self.desc
@classmethod
def register_preferences(cls):
paths = Preferences('paths', _('Paths'))
for key in cls.registry:
st = cls.registry[key]
default_path = config.DEFAULT_BINARY_PATHS.get(st.stype, "")
st.utility_path = paths.register(
'bin_paths', st.stype + '_bin_dir',
st.UTILITY_PATH_LABEL,
'text', default_path, category_label=_('Binary paths'),
help_str=st.UTILITY_PATH_HELP
)
@property
def priority(self):
return self.spriority
def __str__(self):
return "Type: {0}, Description:{1}, Priority: {2}".format(
self.stype, self.desc, self.spriority
)
def instanceOf(self, version):
return True
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"css/server_type.css",
server_type=self.stype,
icon=self.icon
)
]
@classmethod
def types(cls):
return sorted(
ServerType.registry.values(),
key=lambda x: x.priority,
reverse=True
)
def utility(self, operation, sversion):
res = None
if operation == 'backup':
res = 'pg_dump'
elif operation == 'backup_server':
res = 'pg_dumpall'
elif operation == 'restore':
res = 'pg_restore'
elif operation == 'sql':
res = 'psql'
else:
raise Exception(
_("Could not find the utility for the operation '%s'".format(
operation
))
)
bin_path = self.utility_path.get()
if "$DIR" in bin_path:
# When running as an WSGI application, we will not find the
# '__file__' attribute for the '__main__' module.
main_module_file = getattr(
sys.modules['__main__'], '__file__', None
)
if main_module_file is not None:
bin_path = bin_path.replace(
"$DIR", os.path.dirname(main_module_file)
)
return os.path.abspath(os.path.join(
bin_path,
(res if os.name != 'nt' else (res + '.exe'))
))
# Default Server Type
ServerType('pg', _("PostgreSQL"), -1)
| 27.846154 | 78 | 0.55324 |
91c9e1eb3c8a8d2c32d9aa8cd9a07688c100fc89
| 838 |
py
|
Python
|
python/controls/stack/horizontal_stack_vertical_alignments.py
|
pglet/pglet-samples
|
ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9
|
[
"MIT"
] | null | null | null |
python/controls/stack/horizontal_stack_vertical_alignments.py
|
pglet/pglet-samples
|
ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9
|
[
"MIT"
] | null | null | null |
python/controls/stack/horizontal_stack_vertical_alignments.py
|
pglet/pglet-samples
|
ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9
|
[
"MIT"
] | null | null | null |
import pglet
from pglet import Stack, Text
with pglet.page("horizontal-stack-vertical-alignments") as page:
bg_color = '#ddddee'
page.horizontal_align = 'stretch'
def items(count):
items = []
for i in range(1, count + 1):
items.append(Text(value=i, align='center', vertical_align='center', width=30, height=30, bgcolor='BlueMagenta10', color='white', padding=5))
return items
page.add(
Text('start'),
Stack(horizontal=True, vertical_align='start', height=100, bgcolor=bg_color, gap=20, controls=items(3)),
Text('center'),
Stack(horizontal=True, vertical_align='center', height=100, bgcolor=bg_color, gap=20, controls=items(3)),
Text('end'),
Stack(horizontal=True, vertical_align='end', height=100, bgcolor=bg_color, gap=20, controls=items(3)))
input()
| 33.52 | 146 | 0.668258 |
da5d2d4fede35dcc8bcdb31a9f0d855e0d57ecda
| 51,134 |
py
|
Python
|
python_modules/libraries/dagster-aws/dagster_aws/emr/configs.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-aws/dagster_aws/emr/configs.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-aws/dagster_aws/emr/configs.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
from dagster import Bool, Dict, Field, Float, Int, List, PermissiveDict, String
from .types import (
EbsVolumeType,
EmrActionOnFailure,
EmrAdjustmentType,
EmrComparisonOperator,
EmrInstanceRole,
EmrMarket,
EmrRepoUpgradeOnBoot,
EmrScaleDownBehavior,
EmrStatistic,
EmrSupportedProducts,
EmrTimeoutAction,
EmrUnit,
)
def _define_configurations():
return Field(
List(
Dict(
fields={
'Classification': Field(
String,
description='The classification within a configuration.',
is_optional=True,
),
'Configurations': Field(
List(PermissiveDict()),
description='''A list of additional configurations to apply within a
configuration object.''',
is_optional=True,
),
'Properties': Field(
PermissiveDict(),
description='''A set of properties specified within a configuration
classification.''',
is_optional=True,
),
}
)
),
description='''For Amazon EMR releases 4.0 and later. The list of configurations supplied
for the EMR cluster you are creating.
An optional configuration specification to be used when provisioning cluster instances,
which can include configurations for applications and software bundled with Amazon EMR. A
configuration consists of a classification, properties, and optional nested configurations.
A classification refers to an application-specific configuration file. Properties are the
settings you want to change in that file. For more information, see the EMR Configuring
Applications guide.''',
is_optional=True,
)
def _define_steps():
name = Field(String, description='The name of the step.', is_optional=False)
actionOnFailure = Field(
EmrActionOnFailure,
description='''The action to take when the cluster step fails. Possible values are
TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE. TERMINATE_JOB_FLOW is provided for
backward compatibility. We recommend using TERMINATE_CLUSTER instead.''',
is_optional=True,
)
hadoopJarStep = Field(
Dict(
fields={
'Properties': Field(
List(Dict(fields={'Key': Field(String), 'Value': Field(String)})),
description='''A list of Java properties that are set when the step runs. You
can use these properties to pass key value pairs to your main function.''',
is_optional=True,
),
'Jar': Field(
String,
description='A path to a JAR file run during the step.',
is_optional=False,
),
'MainClass': Field(
String,
description='''The name of the main class in the specified Java file. If not
specified, the JAR file should specify a Main-Class in its manifest file.''',
is_optional=True,
),
'Args': Field(
List(String),
description='''A list of command line arguments passed to the JAR file's main
function when executed.''',
is_optional=True,
),
}
),
description='The JAR file used for the step.',
)
return Field(
List(
Dict(
fields={
'Name': name,
'ActionOnFailure': actionOnFailure,
'HadoopJarStep': hadoopJarStep,
}
)
),
description='A list of steps to run.',
)
def _define_bootstrap_actions():
name = Field(String, description='The name of the bootstrap action.', is_optional=False)
path = Field(
String,
description='''Location of the script to run during a bootstrap action. Can be either a
location in Amazon S3 or on a local file system.''',
is_optional=False,
)
args = Field(
List(String),
description='A list of command line arguments to pass to the bootstrap action script.',
is_optional=True,
)
bootstrap_action = Dict(
fields={
'Name': name,
'ScriptBootstrapAction': Field(
Dict(fields={'Path': path, 'Args': args}),
description='The script run by the bootstrap action.',
is_optional=False,
),
}
)
return Field(
List(bootstrap_action),
description='''A list of bootstrap actions to run before Hadoop starts on the cluster
nodes.''',
is_optional=True,
)
def _define_ebs_configuration():
volume_specification = Field(
Dict(
fields={
'VolumeType': Field(
EbsVolumeType,
description='''The volume type. Volume types supported are gp2, io1,
standard.''',
is_optional=False,
),
'Iops': Field(
Int,
description='''The number of I/O operations per second (IOPS) that the volume
supports.''',
is_optional=True,
),
'SizeInGB': Field(
Int,
description='''The volume size, in gibibytes (GiB). This can be a number from
1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.''',
is_optional=False,
),
}
),
description='''EBS volume specifications such as volume type, IOPS, and size (GiB) that will
be requested for the EBS volume attached to an EC2 instance in the cluster.''',
is_optional=False,
)
volumes_per_instance = Field(
Int,
description='''Number of EBS volumes with a specific volume configuration that will be
associated with every instance in the instance group''',
is_optional=True,
)
return Field(
Dict(
fields={
'EbsBlockDeviceConfigs': Field(
List(
Dict(
fields={
'VolumeSpecification': volume_specification,
'VolumesPerInstance': volumes_per_instance,
}
)
),
description='''An array of Amazon EBS volume specifications attached to a
cluster instance.''',
is_optional=True,
),
'EbsOptimized': Field(
Bool,
description='Indicates whether an Amazon EBS volume is EBS-optimized.',
is_optional=True,
),
}
),
description='''EBS configurations that will be attached to each EC2 instance in the
instance group.''',
is_optional=True,
)
def _define_auto_scaling_policy():
simple_scaling_policy_configuration = Field(
Dict(
fields={
'AdjustmentType': Field(
EmrAdjustmentType,
description='''The way in which EC2 instances are added (if ScalingAdjustment
is a positive number) or terminated (if ScalingAdjustment is a negative number)
each time the scaling activity is triggered. CHANGE_IN_CAPACITY is the default.
CHANGE_IN_CAPACITY indicates that the EC2 instance count increments or
decrements by ScalingAdjustment , which should be expressed as an integer.
PERCENT_CHANGE_IN_CAPACITY indicates the instance count increments or decrements
by the percentage specified by ScalingAdjustment , which should be expressed as
an integer. For example, 20 indicates an increase in 20% increments of cluster
capacity. EXACT_CAPACITY indicates the scaling activity results in an instance
group with the number of EC2 instances specified by ScalingAdjustment , which
should be expressed as a positive integer.''',
is_optional=True,
),
'ScalingAdjustment': Field(
Int,
description='''The amount by which to scale in or scale out, based on the
specified AdjustmentType . A positive value adds to the instance group's EC2
instance count while a negative number removes instances. If AdjustmentType is
set to EXACT_CAPACITY , the number should only be a positive integer. If
AdjustmentType is set to PERCENT_CHANGE_IN_CAPACITY , the value should express
the percentage as an integer. For example, -20 indicates a decrease in 20%
increments of cluster capacity.''',
is_optional=False,
),
'CoolDown': Field(
Int,
description='''The amount of time, in seconds, after a scaling activity
completes before any further trigger-related scaling activities can start. The
default value is 0.''',
is_optional=True,
),
}
),
description='''The type of adjustment the automatic scaling activity makes when
triggered, and the periodicity of the adjustment.''',
is_optional=False,
)
action = Field(
Dict(
fields={
'Market': Field(
EmrMarket,
description='''Not available for instance groups. Instance groups use the market
type specified for the group.''',
is_optional=True,
),
'SimpleScalingPolicyConfiguration': simple_scaling_policy_configuration,
}
),
description='The conditions that trigger an automatic scaling activity.',
is_optional=False,
)
dimensions = Field(
List(
Dict(
fields={
'Key': Field(String, description='The dimension name.', is_optional=False),
'Value': Field(String, description='The dimension value.', is_optional=True),
}
)
),
description='''A CloudWatch metric dimension.''',
is_optional=True,
)
trigger = Field(
Dict(
fields={
'CloudWatchAlarmDefinition': Field(
Dict(
fields={
'ComparisonOperator': Field(
EmrComparisonOperator,
description='''Determines how the metric specified by MetricName is
compared to the value specified by Threshold.''',
is_optional=False,
),
'EvaluationPeriods': Field(
Int,
description='''The number of periods, expressed in seconds using
Period, during which the alarm condition must exist before the alarm
triggers automatic scaling activity. The default value is 1.''',
is_optional=True,
),
'MetricName': Field(
String,
description='''The name of the CloudWatch metric that is watched to
determine an alarm condition.''',
is_optional=True,
),
'Namespace': Field(
String,
description='''The namespace for the CloudWatch metric. The default
is AWS/ElasticMapReduce.''',
is_optional=True,
),
'Period': Field(
Int,
description='''The period, in seconds, over which the statistic is
applied. EMR CloudWatch metrics are emitted every five minutes (300
seconds), so if an EMR CloudWatch metric is specified, specify
300.''',
is_optional=False,
),
'Statistic': Field(
EmrStatistic,
description='''The statistic to apply to the metric associated with
the alarm. The default is AVERAGE.''',
is_optional=True,
),
'Threshold': Field(
Float,
description='''The value against which the specified statistic is
compared.''',
is_optional=False,
),
'Unit': Field(
EmrUnit,
description='''The unit of measure associated with the CloudWatch
metric being watched. The value specified for Unit must correspond
to the units specified in the CloudWatch metric.''',
is_optional=True,
),
'Dimensions': dimensions,
}
),
description='''The definition of a CloudWatch metric alarm. When the defined
alarm conditions are met along with other trigger parameters, scaling activity
begins.''',
is_optional=False,
)
}
),
description='''The CloudWatch alarm definition that determines when automatic scaling
activity is triggered.''',
is_optional=False,
)
return Field(
Dict(
fields={
'Constraints': Field(
Dict(
fields={
'MinCapacity': Field(
Int,
description='''The lower boundary of EC2 instances in an instance
group below which scaling activities are not allowed to shrink.
Scale-in activities will not terminate instances below this
boundary.''',
is_optional=False,
),
'MaxCapacity': Field(
Int,
description='''The upper boundary of EC2 instances in an instance
group beyond which scaling activities are not allowed to grow.
Scale-out activities will not add instances beyond this
boundary.''',
is_optional=False,
),
}
),
description='''The upper and lower EC2 instance limits for an automatic scaling
policy. Automatic scaling activity will not cause an instance group to grow
above or below these limits.''',
is_optional=False,
),
'Rules': Field(
List(
Dict(
fields={
'Name': Field(
String,
description='''The name used to identify an automatic
scaling rule. Rule names must be unique within a scaling
policy.''',
),
'Description': Field(
String,
description='''A friendly, more verbose description of the
automatic scaling rule.''',
),
'Action': action,
'Trigger': trigger,
}
)
),
description='''A scale-in or scale-out rule that defines scaling activity,
including the CloudWatch metric alarm that triggers activity, how EC2 instances
are added or removed, and the periodicity of adjustments. The automatic scaling
policy for an instance group can comprise one or more automatic scaling
rules.''',
is_optional=False,
),
}
),
description='''An automatic scaling policy for a core instance group or task instance group
in an Amazon EMR cluster. The automatic scaling policy defines how an instance group
dynamically adds and terminates EC2 instances in response to the value of a CloudWatch
metric. See the EMR PutAutoScalingPolicy docs.
''',
is_optional=True,
)
def _define_instance_groups():
return Field(
List(
Dict(
fields={
'Name': Field(
String,
description='Friendly name given to the instance group.',
is_optional=True,
),
'Market': Field(
EmrMarket,
description='''Market type of the EC2 instances used to create a cluster
node.''',
is_optional=True,
),
'InstanceRole': Field(
EmrInstanceRole,
description='The role of the instance group in the cluster.',
is_optional=False,
),
'BidPrice': Field(
String,
description='''The maximum Spot price your are willing to pay for EC2
instances.
An optional, nullable field that applies if the MarketType for the
instance group is specified as SPOT . Specify the maximum spot price in
USD. If the value is NULL and SPOT is specified, the maximum Spot price
is set equal to the On-Demand price.''',
is_optional=True,
),
'InstanceType': Field(
String,
description='''The EC2 instance type for all instances in the instance
group.''',
is_optional=False,
),
'InstanceCount': Field(
Int,
description='Target number of instances for the instance group.',
is_optional=False,
),
'Configurations': _define_configurations(),
'EbsConfiguration': _define_ebs_configuration(),
'AutoScalingPolicy': _define_auto_scaling_policy(),
}
)
),
description='Configuration for the instance groups in a cluster.',
is_optional=True,
)
def _define_instance_fleets():
target_on_demand_capacity = Field(
Int,
description='''The target capacity of On-Demand units for the instance fleet, which
determines how many On-Demand instances to provision. When the instance fleet launches,
Amazon EMR tries to provision On-Demand instances as specified by InstanceTypeConfig. Each
instance configuration has a specified WeightedCapacity. When an On-Demand instance is
provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR
provisions instances until the target capacity is totally fulfilled, even if this results
in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon
EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is
provisioned, and the target capacity is exceeded by 3 units.
Note: If not specified or set to 0, only Spot instances are provisioned for the instance
fleet using TargetSpotCapacity. At least one of TargetSpotCapacity and
TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of
TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.
''',
is_optional=False,
)
target_spot_capacity = Field(
Int,
description='''The target capacity of Spot units for the instance fleet, which determines
how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to
provision Spot instances as specified by InstanceTypeConfig . Each instance configuration
has a specified WeightedCapacity . When a Spot instance is provisioned, the WeightedCapacity
units count toward the target capacity. Amazon EMR provisions instances until the target
capacity is totally fulfilled, even if this results in an overage. For example, if there are
2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a
WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is
exceeded by 3 units.
Note: If not specified or set to 0, only On-Demand instances are provisioned for the
instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be
greater than 0. For a master instance fleet, only one of TargetSpotCapacity and
TargetOnDemandCapacity can be specified, and its value must be 1.
''',
is_optional=True,
)
instance_type_configs = Field(
List(
Dict(
fields={
'InstanceType': Field(
String,
description='An EC2 instance type, such as m3.xlarge.',
is_optional=False,
),
'WeightedCapacity': Field(
Int,
description='''The number of units that a provisioned instance of this
type provides toward fulfilling the target capacities defined in
InstanceFleetConfig. This value is 1 for a master instance fleet, and
must be 1 or greater for core and task instance fleets. Defaults to 1
if not specified.''',
is_optional=True,
),
'BidPrice': Field(
String,
description='''The bid price for each EC2 Spot instance type as defined
by InstanceType. Expressed in USD. If neither BidPrice nor
BidPriceAsPercentageOfOnDemandPrice is provided,
BidPriceAsPercentageOfOnDemandPrice defaults to 100%.''',
),
'BidPriceAsPercentageOfOnDemandPrice': Field(
Float,
description='''The bid price, as a percentage of On-Demand price, for
each EC2 Spot instance as defined by InstanceType . Expressed as a
number (for example, 20 specifies 20%). If neither BidPrice nor
BidPriceAsPercentageOfOnDemandPrice is provided,
BidPriceAsPercentageOfOnDemandPrice defaults to 100%.''',
is_optional=True,
),
'EbsConfiguration': _define_ebs_configuration(),
'Configurations': _define_configurations(),
}
)
),
description='''An instance type configuration for each instance type in an instance fleet,
which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and
Spot target capacities. There can be a maximum of 5 instance type configurations in a
fleet.''',
is_optional=True,
)
launch_specifications = Field(
Dict(
fields={
'SpotSpecification': Field(
Dict(
fields={
'TimeoutDurationMinutes': Field(
Int,
description='''The spot provisioning timeout period in minutes. If
Spot instances are not provisioned within this time period, the
TimeOutAction is taken. Minimum value is 5 and maximum value is
1440. The timeout applies only during initial provisioning, when the
cluster is first created.''',
is_optional=False,
),
'TimeoutAction': Field(
EmrTimeoutAction,
description='''The action to take when TargetSpotCapacity has not
been fulfilled when the TimeoutDurationMinutes has expired; that is,
when all Spot instances could not be provisioned within the Spot
provisioning timeout. Valid values are TERMINATE_CLUSTER and
SWITCH_TO_ON_DEMAND. SWITCH_TO_ON_DEMAND specifies that if no Spot
instances are available, On-Demand Instances should be provisioned
to fulfill any remaining Spot capacity.''',
is_optional=False,
),
'BlockDurationMinutes': Field(
Int,
description='''The defined duration for Spot instances (also known
as Spot blocks) in minutes. When specified, the Spot instance does
not terminate before the defined duration expires, and defined
duration pricing for Spot instances applies. Valid values are 60,
120, 180, 240, 300, or 360. The duration period starts as soon as a
Spot instance receives its instance ID. At the end of the duration,
Amazon EC2 marks the Spot instance for termination and provides a
Spot instance termination notice, which gives the instance a
two-minute warning before it terminates.''',
is_optional=True,
),
}
),
description='''The launch specification for Spot instances in the fleet, which
determines the defined duration and provisioning timeout behavior.''',
is_optional=False,
)
}
),
description='The launch specification for the instance fleet.',
is_optional=True,
)
return Field(
List(
Dict(
fields={
'Name': Field(
String,
description='The friendly name of the instance fleet.',
is_optional=True,
),
'InstanceFleetType': Field(
EmrInstanceRole,
description='''The node type that the instance fleet hosts. Valid values
are MASTER,CORE,and TASK.''',
),
'TargetOnDemandCapacity': target_on_demand_capacity,
'TargetSpotCapacity': target_spot_capacity,
'InstanceTypeConfigs': instance_type_configs,
'LaunchSpecifications': launch_specifications,
}
)
),
description='''Describes the EC2 instances and instance configurations for clusters that use
the instance fleet configuration.''',
is_optional=True,
)
def define_emr_run_job_flow_config():
name = Field(String, description='The name of the job flow.', is_optional=False)
log_uri = Field(
String,
description='''The location in Amazon S3 to write the log files of the job flow. If a value
is not provided, logs are not created.''',
is_optional=True,
)
additional_info = Field(
String, description='A JSON string for selecting additional features.', is_optional=True
)
ami_version = Field(
String,
description='''Applies only to Amazon EMR AMI versions 3.x and 2.x. For Amazon EMR releases
4.0 and later, ReleaseLabel is used. To specify a custom AMI, use CustomAmiID.''',
is_optional=True,
)
release_label = Field(
String,
description='''The Amazon EMR release label, which determines the version of open-source
application packages installed on the cluster. Release labels are in the form emr-x.x.x,
where x.x.x is an Amazon EMR release version, for example, emr-5.14.0 . For more information
about Amazon EMR release versions and included application versions and features, see
https://docs.aws.amazon.com/emr/latest/ReleaseGuide/. The release label applies only to
Amazon EMR releases versions 4.x and later. Earlier versions use AmiVersion.''',
is_optional=True,
)
instances = Field(
Dict(
fields={
'MasterInstanceType': Field(
String,
description='The EC2 instance type of the master node.',
is_optional=True,
),
'SlaveInstanceType': Field(
String,
description='The EC2 instance type of the core and task nodes.',
is_optional=True,
),
'InstanceCount': Field(
Int, description='The number of EC2 instances in the cluster.', is_optional=True
),
'InstanceGroups': _define_instance_groups(),
'InstanceFleets': _define_instance_fleets(),
'Ec2KeyName': Field(
String,
description='''The name of the EC2 key pair that can be used to ssh to the
master node as the user called "hadoop."''',
is_optional=True,
),
'Placement': Field(
Dict(
fields={
'AvailabilityZone': Field(
String,
description='''The Amazon EC2 Availability Zone for the cluster.
AvailabilityZone is used for uniform instance groups, while
AvailabilityZones (plural) is used for instance fleets.''',
is_optional=True,
),
'AvailabilityZones': Field(
List(String),
description='''When multiple Availability Zones are specified,
Amazon EMR evaluates them and launches instances in the optimal
Availability Zone. AvailabilityZones is used for instance fleets,
while AvailabilityZone (singular) is used for uniform instance
groups.''',
is_optional=True,
),
}
),
description='The Availability Zone in which the cluster runs.',
is_optional=True,
),
'KeepJobFlowAliveWhenNoSteps': Field(
Bool,
description='''Specifies whether the cluster should remain available after
completing all steps.''',
is_optional=True,
),
'TerminationProtected': Field(
Bool,
description='''Specifies whether to lock the cluster to prevent the Amazon EC2
instances from being terminated by API call, user intervention, or in the event
of a job-flow error.''',
is_optional=True,
),
'HadoopVersion': Field(
String,
description='''Applies only to Amazon EMR release versions earlier than 4.0. The
Hadoop version for the cluster. Valid inputs are "0.18" (deprecated), "0.20"
(deprecated), "0.20.205" (deprecated), "1.0.3", "2.2.0", or "2.4.0". If you do
not set this value, the default of 0.18 is used, unless the AmiVersion parameter
is set in the RunJobFlow call, in which case the default version of Hadoop for
that AMI version is used.''',
is_optional=True,
),
'Ec2SubnetId': Field(
String,
description='''Applies to clusters that use the uniform instance group
configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon
VPC), set this parameter to the identifier of the Amazon VPC subnet where you
want the cluster to launch. If you do not specify this value, the cluster
launches in the normal Amazon Web Services cloud, outside of an Amazon VPC, if
the account launching the cluster supports EC2 Classic networks in the region
where the cluster launches.
Amazon VPC currently does not support cluster compute quadruple extra large
(cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance type
for clusters launched in an Amazon VPC.''',
is_optional=True,
),
'Ec2SubnetIds': Field(
List(String),
description='''Applies to clusters that use the instance fleet configuration.
When multiple EC2 subnet IDs are specified, Amazon EMR evaluates them and
launches instances in the optimal subnet.''',
is_optional=True,
),
'EmrManagedMasterSecurityGroup': Field(
String,
description='''The identifier of the Amazon EC2 security group for the master
node.''',
is_optional=True,
),
'EmrManagedSlaveSecurityGroup': Field(
String,
description='''The identifier of the Amazon EC2 security group for the core and
task nodes.''',
is_optional=True,
),
'ServiceAccessSecurityGroup': Field(
String,
description='''The identifier of the Amazon EC2 security group for the Amazon
EMR service to access clusters in VPC private subnets.''',
is_optional=True,
),
'AdditionalMasterSecurityGroups': Field(
List(String),
description='''A list of additional Amazon EC2 security group IDs for the master
node.''',
is_optional=True,
),
'AdditionalSlaveSecurityGroups': Field(
List(String),
description='''A list of additional Amazon EC2 security group IDs for the core
and task nodes.''',
is_optional=True,
),
}
),
description='A specification of the number and type of Amazon EC2 instances.',
is_optional=False,
)
supported_products = Field(
List(EmrSupportedProducts),
description='''A list of strings that indicates third-party software to use. For
more information, see the Amazon EMR Developer Guide. Currently supported
values are:
- "mapr-m3" - launch the job flow using MapR M3 Edition.
- "mapr-m5" - launch the job flow using MapR M5 Edition.
''',
is_optional=True,
)
new_supported_products = Field(
List(
Dict(
fields={
'Name': Field(String, is_optional=False),
'Args': Field(List(String), description='The list of user-supplied arguments.'),
}
)
),
description='''
The list of supported product configurations which allow user-supplied arguments. EMR
accepts these arguments and forwards them to the corresponding installation script as
bootstrap action arguments.
A list of strings that indicates third-party software to use with the job flow that accepts
a user argument list. EMR accepts and forwards the argument list to the corresponding
installation script as bootstrap action arguments. For more information, see "Launch a Job
Flow on the MapR Distribution for Hadoop" in the Amazon EMR Developer Guide.
Supported values are:
- "mapr-m3" - launch the cluster using MapR M3 Edition.
- "mapr-m5" - launch the cluster using MapR M5 Edition.
- "mapr" with the user arguments specifying "--edition,m3" or "--edition,m5" - launch the
job flow using MapR M3 or M5 Edition respectively.
- "mapr-m7" - launch the cluster using MapR M7 Edition.
- "hunk" - launch the cluster with the Hunk Big Data Analtics Platform.
- "hue"- launch the cluster with Hue installed.
- "spark" - launch the cluster with Apache Spark installed.
- "ganglia" - launch the cluster with the Ganglia Monitoring System installed.''',
is_optional=True,
)
applications = Field(
List(
Dict(
fields={
'Name': Field(
String, description='The name of the application.', is_optional=False
),
'Version': Field(
String, description='The version of the application.', is_optional=True
),
'Args': Field(
List(String),
description='Arguments for Amazon EMR to pass to the application.',
is_optional=True,
),
'AdditionalInfo': Field(
PermissiveDict(),
description='''This option is for advanced users only. This is meta
information about third-party applications that third-party vendors use
for testing purposes.''',
is_optional=True,
),
}
)
),
description='''Applies to Amazon EMR releases 4.0 and later. A case-insensitive list of
applications for Amazon EMR to install and configure when launching the cluster. For a list
of applications available for each Amazon EMR release version, see the Amazon EMR Release
Guide.
With Amazon EMR release version 4.0 and later, the only accepted parameter is the
application name. To pass arguments to applications, you use configuration classifications
specified using configuration JSON objects. For more information, see the EMR Configuring
Applications guide.
With earlier Amazon EMR releases, the application is any Amazon or third-party software that
you can add to the cluster. This structure contains a list of strings that indicates the
software to use with the cluster and accepts a user argument list. Amazon EMR accepts and
forwards the argument list to the corresponding installation script as bootstrap action
argument.''',
is_optional=True,
)
visible_to_all_users = Field(
Bool,
description='''Whether the cluster is visible to all IAM users of the AWS account associated
with the cluster. If this value is set to True, all IAM users of that AWS account can view
and (if they have the proper policy permissions set) manage the cluster. If it is set to
False, only the IAM user that created the cluster can view and manage it.''',
is_optional=True,
default_value=True,
)
job_flow_role = Field(
String,
description='''Also called instance profile and EC2 role. An IAM role for an EMR cluster.
The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole.
In order to use the default role, you must have already created it using the CLI or console.
''',
is_optional=True,
)
service_role = Field(
String,
description='''The IAM role that will be assumed by the Amazon EMR service to access AWS
resources on your behalf.''',
is_optional=True,
)
tags = Field(
List(
Dict(
fields={
'Key': Field(
String,
description='''A user-defined key, which is the minimum required information
for a valid tag. For more information, see the EMR Tag guide.''',
is_optional=False,
),
'Value': Field(
String,
description='''A user-defined value, which is optional in a tag. For more
information, see the EMR Tag Clusters guide.''',
is_optional=True,
),
}
)
),
description='''A list of tags to associate with a cluster and propagate to Amazon EC2
instances.
A key/value pair containing user-defined metadata that you can associate with an Amazon EMR
resource. Tags make it easier to associate clusters in various ways, such as grouping
clusters to track your Amazon EMR resource allocation costs. For more information, see the
EMR Tag Clusters guide.''',
is_optional=True,
)
security_configuration = Field(
String,
description='The name of a security configuration to apply to the cluster.',
is_optional=True,
)
auto_scaling_role = Field(
String,
description='''An IAM role for automatic scaling policies. The default role is
EMR_AutoScaling_DefaultRole. The IAM role provides permissions that the automatic scaling
feature requires to launch and terminate EC2 instances in an instance group.''',
is_optional=True,
)
scale_down_behavior = Field(
EmrScaleDownBehavior,
description='''Specifies the way that individual Amazon EC2 instances terminate when an
automatic scale-in activity occurs or an instance group is resized.
TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour
boundary, regardless of when the request to terminate the instance was submitted. This
option is only available with Amazon EMR 5.1.0 and later and is the default for clusters
created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR
blacklists and drains tasks from nodes before terminating the Amazon EC2 instances,
regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least
active nodes first and blocks instance termination if it could lead to HDFS corruption.
TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR version 4.1.0 and later, and is
the default for versions of Amazon EMR earlier than 5.1.0.''',
is_optional=True,
)
custom_ami_id = Field(
String,
description='''Available only in Amazon EMR version 5.7.0 and later. The ID of a custom
Amazon EBS-backed Linux AMI. If specified, Amazon EMR uses this AMI when it launches cluster
EC2 instances. For more information about custom AMIs in Amazon EMR, see Using a Custom AMI
in the Amazon EMR Management Guide. If omitted, the cluster uses the base Linux AMI for the
ReleaseLabel specified. For Amazon EMR versions 2.x and 3.x, use AmiVersion instead.
For information about creating a custom AMI, see Creating an Amazon EBS-Backed Linux AMI in
the Amazon Elastic Compute Cloud User Guide for Linux Instances. For information about
finding an AMI ID, see Finding a Linux AMI.''',
is_optional=True,
)
repo_upgrade_on_boot = Field(
EmrRepoUpgradeOnBoot,
description='''Applies only when CustomAmiID is used. Specifies which updates from the
Amazon Linux AMI package repositories to apply automatically when the instance boots using
the AMI. If omitted, the default is SECURITY , which indicates that only security updates
are applied. If NONE is specified, no updates are applied, and all updates must be applied
manually.''',
is_optional=True,
)
kerberos_attributes = Field(
Dict(
fields={
'Realm': Field(
String,
description='''The name of the Kerberos realm to which all nodes in a cluster
belong. For example, EC2.INTERNAL.''',
is_optional=False,
),
'KdcAdminPassword': Field(
String,
description='''The password used within the cluster for the kadmin service on
the cluster-dedicated KDC, which maintains Kerberos principals, password
policies, and keytabs for the cluster.''',
is_optional=False,
),
'CrossRealmTrustPrincipalPassword': Field(
String,
description='''Required only when establishing a cross-realm trust with a KDC in
a different realm. The cross-realm principal password, which must be identical
across realms.''',
is_optional=True,
),
'ADDomainJoinUser': Field(
String,
description='''Required only when establishing a cross-realm trust with an
Active Directory domain. A user with sufficient privileges to join resources to
the domain.''',
is_optional=True,
),
'ADDomainJoinPassword': Field(
String,
description='''The Active Directory password for ADDomainJoinUser.''',
is_optional=True,
),
}
),
description='''Attributes for Kerberos configuration when Kerberos authentication is enabled
using a security configuration. For more information see Use Kerberos Authentication in the
EMR Management Guide .''',
is_optional=True,
)
return Field(
Dict(
fields={
'Name': name,
'LogUri': log_uri,
'AdditionalInfo': additional_info,
'AmiVersion': ami_version,
'ReleaseLabel': release_label,
'Instances': instances,
'Steps': _define_steps(),
'BootstrapActions': _define_bootstrap_actions(),
'SupportedProducts': supported_products,
'NewSupportedProducts': new_supported_products,
'Applications': applications,
'Configurations': _define_configurations(),
'VisibleToAllUsers': visible_to_all_users,
'JobFlowRole': job_flow_role,
'ServiceRole': service_role,
'Tags': tags,
'SecurityConfiguration': security_configuration,
'AutoScalingRole': auto_scaling_role,
'ScaleDownBehavior': scale_down_behavior,
'CustomAmiId': custom_ami_id,
'EbsRootVolumeSize': Field(
Int,
description='''The size, in GiB, of the EBS root device volume of the Linux AMI
that is used for each EC2 instance. Available in Amazon EMR version 4.x and
later.''',
is_optional=True,
),
'RepoUpgradeOnBoot': repo_upgrade_on_boot,
'KerberosAttributes': kerberos_attributes,
}
),
description='AWS EMR run job flow configuration',
)
| 47.390176 | 100 | 0.529393 |
72660f9a9c193ad6d220c4cede48bc128e4ee792
| 830 |
py
|
Python
|
docsearch/mapper/default.py
|
brpaz/ulauncher-docsearch
|
3ec40ebc18c97787e8fb2b8380178d37667601fd
|
[
"MIT"
] | 21 |
2020-03-04T00:27:52.000Z
|
2022-03-15T09:48:35.000Z
|
docsearch/mapper/default.py
|
brpaz/ulauncher-docsearch
|
3ec40ebc18c97787e8fb2b8380178d37667601fd
|
[
"MIT"
] | 7 |
2020-03-14T02:05:33.000Z
|
2021-05-09T14:53:40.000Z
|
docsearch/mapper/default.py
|
brpaz/ulauncher-docsearch
|
3ec40ebc18c97787e8fb2b8380178d37667601fd
|
[
"MIT"
] | 5 |
2020-03-10T10:48:54.000Z
|
2021-12-01T10:27:47.000Z
|
""" Default mapper for Algoia Docsearch responses """
class DefaultMapper():
def map(self, docset, hit):
title, description = self.map_description(hit)
if not description:
description = hit['url']
return {
'url': hit['url'],
'title': title,
'icon': docset['icon'],
'category': description
}
def map_description(self, hit):
""" Returns the text to display as result item title """
hierarchy = hit['hierarchy'].values()
# Filter the list by removing the empty values
res = [i for i in hierarchy if i]
if len(res) < 2:
return res[0], ""
# The last element found will be the description and the previous one the title.
return res[-1], ' -> '.join(res[:-1])
| 26.774194 | 88 | 0.551807 |
83587ebbb9a80f700b3bfcdb950d4988775ec8dd
| 1,797 |
py
|
Python
|
xue/prepopulate/querier.py
|
team-xue/xue
|
e6bd9539803a2bf902f48b65a9df86356b5d46b2
|
[
"BSD-3-Clause"
] | 1 |
2015-11-23T02:33:07.000Z
|
2015-11-23T02:33:07.000Z
|
xue/prepopulate/querier.py
|
team-xue/xue
|
e6bd9539803a2bf902f48b65a9df86356b5d46b2
|
[
"BSD-3-Clause"
] | null | null | null |
xue/prepopulate/querier.py
|
team-xue/xue
|
e6bd9539803a2bf902f48b65a9df86356b5d46b2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# simple prepopulated tables querier -- querier object
from __future__ import unicode_literals, division
from functools import wraps
from os.path import join as pathjoin
from os.path import abspath, dirname
from django.conf import settings
from xue.prepopulate.parse import parse_file
MYPATH = settings.XUE_PREPOPULATE_DIR
INPUT_FILES = settings.XUE_PREPOPULATE_FILES
def _prepare_db():
result_dct = {}
for fn in INPUT_FILES:
path = pathjoin(MYPATH, fn)
result_dct.update(parse_file(path))
return result_dct
class Querier(object):
def __init__(self):
self._db = _prepare_db()
def have(self, num):
return (num in self._db)
def lookup(self, num):
try:
return self._db[unicode(num)]
except KeyError:
raise ValueError('no user matching number \'%s\' exists' % num)
def extract_org(self, id_num):
# TODO: refactor to use something generic, maybe file-based
# XXX What is written below is TOTALLY hack and ABSOLUTELY NOT
# portable across schools, so clean this up ASAP!!
year, major_code, cls_seq = None, None, None
if id_num[0] == '0':
# old format
major_code = id_num[:4]
yr_2digit = int(id_num[4:6])
cls_seq = int(id_num[6:8])
year = (1900 if yr_2digit > 50 else 2000) + yr_2digit
else:
# new format in use beginning from 2012
# it's like 1030512101... also 10 digits
major_code = id_num[1:5]
yr_2digit = int(id_num[5:7])
cls_seq = int(id_num[7])
year = 2000 + yr_2digit
return year, major_code, cls_seq
querier = Querier()
# vim:set ai et ts=4 sw=4 sts=4 fenc=utf-8:
| 26.820896 | 75 | 0.62493 |
d267a0c93d48446063a5d78f7ee6ad837dace4ab
| 117 |
py
|
Python
|
music/admin.py
|
Ragh2018/website
|
f797ccd4412b4335f5ee45484159cec459492da3
|
[
"MIT"
] | null | null | null |
music/admin.py
|
Ragh2018/website
|
f797ccd4412b4335f5ee45484159cec459492da3
|
[
"MIT"
] | null | null | null |
music/admin.py
|
Ragh2018/website
|
f797ccd4412b4335f5ee45484159cec459492da3
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Album,song
admin.site.register(Album)
admin.site.register(song)
| 23.4 | 32 | 0.820513 |
3199bf1dcd98944482c1eca22846b0dcfa6deba0
| 3,875 |
py
|
Python
|
exp/cxif5315/proc-cxif5315-r0169-peaks-fit-file-v1.py
|
slaclab/lclslib
|
dbb11cf5f0f671fa0ec16c54b6346e9d1df3b0e0
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
exp/cxif5315/proc-cxif5315-r0169-peaks-fit-file-v1.py
|
slaclab/lclslib
|
dbb11cf5f0f671fa0ec16c54b6346e9d1df3b0e0
|
[
"BSD-3-Clause-LBNL"
] | 1 |
2016-06-23T22:51:33.000Z
|
2016-06-23T23:27:51.000Z
|
exp/cxif5315/proc-cxif5315-r0169-peaks-fit-file-v1.py
|
slaclab/lclslib
|
dbb11cf5f0f671fa0ec16c54b6346e9d1df3b0e0
|
[
"BSD-3-Clause-LBNL"
] | 1 |
2020-12-13T01:51:19.000Z
|
2020-12-13T01:51:19.000Z
|
#!/usr/bin/env python
##-----------------------------
"""Example of analysis script:
processes cxif5315-r0169 peak list with phi-beta fit info;
loop over all entries of the peak-fit file and create histograms for fit parameters
"""
##-----------------------------
import sys
import os
import math
import numpy as np
from time import time # strftime, localtime #, gmtime, time
from pyimgalgos.TDFileContainer import TDFileContainer
from pyimgalgos.TDPeakRecord import TDPeakRecord
from pyimgalgos.GlobalGraphics import hist1d, show, move, save #, move_fig, save_fig, fig_axes, plot_img, plot_peaks_on_img
#------------------------------
class Storage :
"""Store for shared parameters."""
prefix = './his-equ-fit' # file name prefix for histograms
DO_HIST = True # save histograms
def __init__(self) :
self.h1d_phi = []
self.h1d_beta = []
self.h1d_phi_err = []
self.h1d_beta_err = []
self.h1d_prob = []
sp = Storage()
#------------------------------
def h1d(hlst, bins=None, amp_range=None, weights=None, color=None, show_stat=True, log=False,\
figsize=(6,5), axwin=(0.15, 0.12, 0.78, 0.80), title='Title', xlabel='x', ylabel='y', titwin=None, fnm='fnm.png') :
"""Wrapper for hist1d, move, and save methods, using common store parameters
"""
fig, axhi, hi = hist1d(np.array(hlst), bins, amp_range, weights, color, show_stat,\
log, figsize, axwin, title, xlabel, ylabel, titwin)
#move(sp.hwin_x0y0[0], sp.hwin_x0y0[1])
save('%s-%s' % (sp.prefix, fnm), sp.DO_HIST)
return fig, axhi, hi
#------------------------------
def plot_histograms() :
#---------
h1d(np.array(sp.h1d_phi), bins=80, amp_range=(-40.,40.), \
title ='Equ: fit phi angle', xlabel='$\phi$ (deg)', ylabel='Events', fnm='phi.png')
#---------
h1d(np.array(sp.h1d_beta), bins=100, amp_range=(-60.,40.), \
title ='Equ: fit beta angle', xlabel='beta (deg)', ylabel='Events', fnm='beta.png')
#---------
h1d(np.array(sp.h1d_phi_err), bins=100, amp_range=(0.,10.), \
title ='Equ: fit phi angle', xlabel='error of $\phi$ (deg)', ylabel='Events', fnm='phi-err.png')
#---------
h1d(np.array(sp.h1d_beta_err), bins=100, amp_range=(0.,50.), \
title ='Equ: fit beta angle', xlabel='error of beta (deg)', ylabel='Events', fnm='beta-err.png')
#---------
h1d(np.array(sp.h1d_prob), bins=100, amp_range=(0.,1.), \
title ='Equ: fit probability', xlabel='probability', ylabel='Events', fnm='prob.png')
#---------
show()
#------------------------------
def proc_file(fname) :
"""Process file with peak records extended by the phi-beta fit info
"""
t0_sec = time()
sp.fc = TDFileContainer(fname, indhdr='Evnum', objtype=TDPeakRecord) #, pbits=256)
print 'File load time = %7.3f sec' % (time()-t0_sec)
#sp.fc.print_content(nlines=20)
for evnum in sp.fc.group_num_iterator() :
event = sp.fc.next()
lst_peaks = event.get_objs()
pk0 = lst_peaks[0]
print '%s\nEvent# %6d exp=%s:run=%d %s %s' % (80*'_', evnum, pk0.exp, pk0.run, pk0.date, pk0.time)
print ' %s\n%s' % (sp.fc.hdr, pk0.line)
#for pk in lst_peaks : print 'x=%.6f, y=%.6f' % (px.x, pk.y)
# fill histogram arrays for event
sp.h1d_phi .append(pk0.fit_phi)
sp.h1d_beta .append(pk0.fit_beta)
sp.h1d_phi_err .append(pk0.fit_phi_err)
sp.h1d_beta_err.append(pk0.fit_beta_err)
sp.h1d_prob .append(pk0.fit_prob)
if sp.DO_HIST : plot_histograms()
#------------------------------
if __name__ == "__main__" :
proc_file('/reg/neh/home1/dubrovin/LCLS/rel-mengning/work/pfv2-fit-cxif5315-r0169-2016-04-06T08:32:23.txt')
sys.exit('Processing is completed')
#------------------------------
| 35.227273 | 123 | 0.578065 |
a98e0ef0e2d025a102e86846b89c05b0eac808c8
| 11,885 |
py
|
Python
|
networking_cisco/plugins/cisco/db/device_manager/hosting_devices_db.py
|
CiscoSystems/networking-cisco
|
ed18627faf90caa1c0d1b7fe00f240a57901dee4
|
[
"Apache-2.0"
] | 8 |
2016-02-12T01:25:29.000Z
|
2019-01-13T14:19:25.000Z
|
networking_cisco/plugins/cisco/db/device_manager/hosting_devices_db.py
|
CiscoSystems/networking-cisco
|
ed18627faf90caa1c0d1b7fe00f240a57901dee4
|
[
"Apache-2.0"
] | 25 |
2016-01-28T12:33:41.000Z
|
2016-07-28T21:18:03.000Z
|
networking_cisco/plugins/cisco/db/device_manager/hosting_devices_db.py
|
CiscoSystems/networking-cisco
|
ed18627faf90caa1c0d1b7fe00f240a57901dee4
|
[
"Apache-2.0"
] | 9 |
2015-05-07T02:47:55.000Z
|
2019-10-18T15:25:27.000Z
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc
from neutron.db import common_db_mixin
from neutron.plugins.common import constants as svc_constants
from networking_cisco.plugins.cisco.db.device_manager import hd_models
from networking_cisco.plugins.cisco.extensions import ciscohostingdevicemanager
LOG = logging.getLogger(__name__)
AUTO_DELETE_DEFAULT = ciscohostingdevicemanager.AUTO_DELETE_DEFAULT
class HostingDeviceDBMixin(
ciscohostingdevicemanager.CiscoHostingDevicePluginBase,
common_db_mixin.CommonDbMixin):
"""A class implementing DB functionality for hosting devices."""
def create_hosting_device(self, context, hosting_device):
LOG.debug("create_hosting_device() called")
hd = hosting_device['hosting_device']
tenant_id = self._get_tenant_id_for_create(context, hd)
with context.session.begin(subtransactions=True):
credentials_id = hd.get('credentials_id')
if credentials_id is None:
hdt_db = self._get_hosting_device_template(context,
hd['template_id'])
credentials_id = hdt_db['default_credentials_id']
hd_db = hd_models.HostingDevice(
id=self._get_id(hd),
complementary_id=hd.get('complementary_id'),
tenant_id=tenant_id,
template_id=hd['template_id'],
credentials_id=credentials_id,
name=hd.get('name'),
description=hd.get('description'),
device_id=hd.get('device_id'),
admin_state_up=hd.get('admin_state_up', True),
management_ip_address=hd['management_ip_address'],
management_port_id=hd['management_port_id'],
protocol_port=hd.get('protocol_port'),
cfg_agent_id=hd.get('cfg_agent_id'),
created_at=hd.get('created_at', timeutils.utcnow()),
status=hd.get('status', svc_constants.ACTIVE),
tenant_bound=hd.get('tenant_bound'),
auto_delete=hd.get('auto_delete', AUTO_DELETE_DEFAULT))
context.session.add(hd_db)
return self._make_hosting_device_dict(hd_db)
def update_hosting_device(self, context, id, hosting_device):
LOG.debug("update_hosting_device() called")
hd = hosting_device['hosting_device']
with context.session.begin(subtransactions=True):
#TODO(bobmel): handle tenant_bound changes
hd_query = context.session.query(hd_models.HostingDevice)
if not hd_query.filter_by(id=id).update(hd):
raise ciscohostingdevicemanager.HostingDeviceNotFound(id=id)
#TODO(bobmel): notify_agent on changes to credentials,
# admin_state_up, tenant_bound
return self.get_hosting_device(context, id)
def delete_hosting_device(self, context, id):
LOG.debug("delete_hosting_device() called")
try:
with context.session.begin(subtransactions=True):
hd_query = context.session.query(hd_models.HostingDevice)
if not hd_query.filter_by(id=id).delete():
raise ciscohostingdevicemanager.HostingDeviceNotFound(
id=id)
except db_exc.DBError as e:
with excutils.save_and_reraise_exception() as ctxt:
if isinstance(e.inner_exception, sql_exc.IntegrityError):
ctxt.reraise = False
raise ciscohostingdevicemanager.HostingDeviceInUse(id=id)
def get_hosting_device(self, context, id, fields=None):
LOG.debug("get_hosting_device() called")
hd_db = self._get_hosting_device(context, id)
return self._make_hosting_device_dict(hd_db)
def get_hosting_devices(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
LOG.debug("get_hosting_devices() called")
return self._get_collection(context, hd_models.HostingDevice,
self._make_hosting_device_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker,
page_reverse=page_reverse)
def create_hosting_device_template(self, context, hosting_device_template):
LOG.debug("create_hosting_device_template() called")
hdt = hosting_device_template['hosting_device_template']
tenant_id = self._get_tenant_id_for_create(context, hdt)
#TODO(bobmel): check service types
with context.session.begin(subtransactions=True):
hdt_db = hd_models.HostingDeviceTemplate(
id=self._get_id(hdt),
tenant_id=tenant_id,
name=hdt.get('name'),
enabled=hdt.get('enabled', True),
host_category=hdt['host_category'],
service_types=hdt.get('service_types'),
image=hdt.get('image'),
flavor=hdt.get('flavor'),
default_credentials_id=hdt.get('default_credentials_id'),
configuration_mechanism=hdt.get('configuration_mechanism'),
protocol_port=hdt.get('protocol_port'),
booting_time=hdt.get('booting_time'),
slot_capacity=hdt['slot_capacity'],
desired_slots_free=hdt['desired_slots_free'],
tenant_bound=':'.join(hdt['tenant_bound']),
device_driver=hdt['device_driver'],
plugging_driver=hdt['plugging_driver'])
context.session.add(hdt_db)
return self._make_hosting_device_template_dict(hdt_db)
def update_hosting_device_template(self, context,
id, hosting_device_template):
LOG.debug("update_hosting_device_template() called")
hdt = hosting_device_template['hosting_device_template']
tenant_bound = hdt.get('tenant_bound')
if tenant_bound is not None:
hdt['tenant_bound'] = ':'.join(tenant_bound)
with context.session.begin(subtransactions=True):
hdt_query = context.session.query(hd_models.HostingDeviceTemplate)
if not hdt_query.filter_by(id=id).update(hdt):
raise ciscohostingdevicemanager.HostingDeviceTemplateNotFound(
id=id)
return self.get_hosting_device_template(context, id)
def delete_hosting_device_template(self, context, id):
LOG.debug("delete_hosting_device_template() called")
try:
with context.session.begin(subtransactions=True):
hdt_query = context.session.query(
hd_models.HostingDeviceTemplate)
if not hdt_query.filter_by(id=id).delete():
raise (ciscohostingdevicemanager.
HostingDeviceTemplateNotFound(id=id))
except db_exc.DBError as e:
with excutils.save_and_reraise_exception() as ctxt:
if isinstance(e.inner_exception, sql_exc.IntegrityError):
ctxt.reraise = False
raise (ciscohostingdevicemanager.
HostingDeviceTemplateInUse(id=id))
def get_hosting_device_template(self, context, id, fields=None):
LOG.debug("get_hosting_device_template() called")
hdt_db = self._get_hosting_device_template(context, id)
return self._make_hosting_device_template_dict(hdt_db)
def get_hosting_device_templates(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
LOG.debug("get_hosting_device_templates() called")
return self._get_collection(context, hd_models.HostingDeviceTemplate,
self._make_hosting_device_template_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker,
page_reverse=page_reverse)
def _get_id(self, res):
uuid = res.get('id')
if uuid:
return uuid
return uuidutils.generate_uuid()
def _get_hosting_device(self, context, id):
try:
return self._get_by_id(context, hd_models.HostingDevice, id)
except exc.NoResultFound:
raise ciscohostingdevicemanager.HostingDeviceNotFound(id=id)
def _make_hosting_device_dict(self, hd, fields=None):
res = {'id': hd['id'],
'complementary_id': hd['complementary_id'],
'tenant_id': hd['tenant_id'],
'template_id': hd['template_id'],
'credentials_id': hd['credentials_id'],
'name': hd['name'],
'description': hd['description'],
'device_id': hd['device_id'],
'admin_state_up': hd['admin_state_up'],
'management_ip_address': hd['management_ip_address'],
'management_port_id': hd['management_port_id'],
'protocol_port': hd['protocol_port'],
'cfg_agent_id': hd['cfg_agent_id'],
'created_at': hd['created_at'],
'status': hd['status'],
'tenant_bound': hd['tenant_bound'],
'auto_delete': hd['auto_delete']}
return self._fields(res, fields)
def _get_hosting_device_template(self, context, id):
try:
return self._get_by_id(context, hd_models.HostingDeviceTemplate,
id)
except exc.NoResultFound:
raise ciscohostingdevicemanager.HostingDeviceTemplateNotFound(
id=id)
def _make_hosting_device_template_dict(self, hdt, fields=None):
tb = hdt['tenant_bound'].split(':') if len(hdt['tenant_bound']) else []
res = {'id': hdt['id'],
'tenant_id': hdt['tenant_id'],
'name': hdt['name'],
'enabled': hdt['enabled'],
'host_category': hdt['host_category'],
'service_types': hdt['service_types'],
'image': hdt['image'],
'flavor': hdt['flavor'],
'default_credentials_id': hdt['default_credentials_id'],
'configuration_mechanism': hdt['configuration_mechanism'],
'protocol_port': hdt['protocol_port'],
'booting_time': hdt['booting_time'],
'slot_capacity': hdt['slot_capacity'],
'desired_slots_free': hdt['desired_slots_free'],
'tenant_bound': tb,
'device_driver': hdt['device_driver'],
'plugging_driver': hdt['plugging_driver']}
return self._fields(res, fields)
| 48.510204 | 79 | 0.613799 |
c4409e6022c96881f96f61d9bbc07b9005839581
| 19,921 |
py
|
Python
|
mezzanine/core/tests.py
|
abendig/mezzanine
|
3219ac9ba2d6d94ce63e8b2a747c3b264b13beec
|
[
"BSD-2-Clause"
] | null | null | null |
mezzanine/core/tests.py
|
abendig/mezzanine
|
3219ac9ba2d6d94ce63e8b2a747c3b264b13beec
|
[
"BSD-2-Clause"
] | null | null | null |
mezzanine/core/tests.py
|
abendig/mezzanine
|
3219ac9ba2d6d94ce63e8b2a747c3b264b13beec
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import unicode_literals
import re
try:
# Python 3
from urllib.parse import urlencode
except ImportError:
# Python 2
from urllib import urlencode
from django import VERSION
from django.contrib.admin import AdminSite
from django.contrib.admin.options import InlineModelAdmin
from django.contrib.sites.models import Site
from django.core import mail
from django.core.urlresolvers import reverse
from django.db import models
from django.forms import Textarea
from django.forms.models import modelform_factory
from django.templatetags.static import static
from django.test.utils import override_settings
from django.utils.html import strip_tags
from django.utils.unittest import skipUnless
from mezzanine.conf import settings
from mezzanine.core.admin import BaseDynamicInlineAdmin
from mezzanine.core.fields import RichTextField
from mezzanine.core.managers import DisplayableManager
from mezzanine.core.models import (CONTENT_STATUS_DRAFT,
CONTENT_STATUS_PUBLISHED)
from mezzanine.forms.admin import FieldAdmin
from mezzanine.forms.models import Form
from mezzanine.pages.models import RichTextPage
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.tests import (TestCase, run_pyflakes_for_package,
run_pep8_for_package)
from mezzanine.utils.html import TagCloser
class CoreTests(TestCase):
def test_tagcloser(self):
"""
Test tags are closed, and tags that shouldn't be closed aren't.
"""
self.assertEqual(TagCloser("<p>Unclosed paragraph").html,
"<p>Unclosed paragraph</p>")
self.assertEqual(TagCloser("Line break<br>").html,
"Line break<br>")
@skipUnless("mezzanine.mobile" in settings.INSTALLED_APPS and
"mezzanine.pages" in settings.INSTALLED_APPS,
"mobile and pages apps required")
def test_device_specific_template(self):
"""
Test that an alternate template is rendered when a mobile
device is used.
"""
ua = settings.DEVICE_USER_AGENTS[0][1][0]
kwargs = {"slug": "device-test"}
url = reverse("page", kwargs=kwargs)
kwargs["status"] = CONTENT_STATUS_PUBLISHED
RichTextPage.objects.get_or_create(**kwargs)
default = self.client.get(url)
mobile = self.client.get(url, HTTP_USER_AGENT=ua)
self.assertNotEqual(default.template_name[0], mobile.template_name[0])
def test_syntax(self):
"""
Run pyflakes/pep8 across the code base to check for potential errors.
"""
warnings = []
warnings.extend(run_pyflakes_for_package("mezzanine"))
warnings.extend(run_pep8_for_package("mezzanine"))
if warnings:
self.fail("Syntax warnings!\n\n%s" % "\n".join(warnings))
def test_utils(self):
"""
Miscellanous tests for the ``mezzanine.utils`` package.
"""
self.assertRaises(ImportError, import_dotted_path, "mezzanine")
self.assertRaises(ImportError, import_dotted_path, "mezzanine.NO")
self.assertRaises(ImportError, import_dotted_path, "mezzanine.core.NO")
try:
import_dotted_path("mezzanine.core")
except ImportError:
self.fail("mezzanine.utils.imports.import_dotted_path"
"could not import \"mezzanine.core\"")
@skipUnless("mezzanine.pages" in settings.INSTALLED_APPS,
"pages app required")
def test_description(self):
"""
Test generated description is text version of the first line
of content.
"""
description = "<p>How now brown cow</p>"
page = RichTextPage.objects.create(title="Draft",
content=description * 3)
self.assertEqual(page.description, strip_tags(description))
@skipUnless("mezzanine.pages" in settings.INSTALLED_APPS,
"pages app required")
def test_draft(self):
"""
Test a draft object as only being viewable by a staff member.
"""
self.client.logout()
draft = RichTextPage.objects.create(title="Draft",
status=CONTENT_STATUS_DRAFT)
response = self.client.get(draft.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 404)
self.client.login(username=self._username, password=self._password)
response = self.client.get(draft.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 200)
def test_searchable_manager_search_fields(self):
"""
Test that SearchableManager can get appropriate params.
"""
manager = DisplayableManager()
self.assertFalse(manager._search_fields)
manager = DisplayableManager(search_fields={'foo': 10})
self.assertTrue(manager._search_fields)
@skipUnless("mezzanine.pages" in settings.INSTALLED_APPS,
"pages app required")
def test_search(self):
"""
Objects with status "Draft" should not be within search results.
"""
RichTextPage.objects.all().delete()
published = {"status": CONTENT_STATUS_PUBLISHED}
first = RichTextPage.objects.create(title="test page",
status=CONTENT_STATUS_DRAFT).id
second = RichTextPage.objects.create(title="test another test page",
**published).id
# Draft shouldn't be a result.
results = RichTextPage.objects.search("test")
self.assertEqual(len(results), 1)
RichTextPage.objects.filter(id=first).update(**published)
results = RichTextPage.objects.search("test")
self.assertEqual(len(results), 2)
# Either word.
results = RichTextPage.objects.search("another test")
self.assertEqual(len(results), 2)
# Must include first word.
results = RichTextPage.objects.search("+another test")
self.assertEqual(len(results), 1)
# Mustn't include first word.
results = RichTextPage.objects.search("-another test")
self.assertEqual(len(results), 1)
if results:
self.assertEqual(results[0].id, first)
# Exact phrase.
results = RichTextPage.objects.search('"another test"')
self.assertEqual(len(results), 1)
if results:
self.assertEqual(results[0].id, second)
# Test ordering.
results = RichTextPage.objects.search("test")
self.assertEqual(len(results), 2)
if results:
self.assertEqual(results[0].id, second)
# Test the actual search view.
response = self.client.get(reverse("search") + "?q=test")
self.assertEqual(response.status_code, 200)
def _create_page(self, title, status):
return RichTextPage.objects.create(title=title, status=status)
def _test_site_pages(self, title, status, count):
# test _default_manager
pages = RichTextPage._default_manager.all()
self.assertEqual(pages.count(), count)
self.assertTrue(title in [page.title for page in pages])
# test objects manager
pages = RichTextPage.objects.all()
self.assertEqual(pages.count(), count)
self.assertTrue(title in [page.title for page in pages])
# test response status code
code = 200 if status == CONTENT_STATUS_PUBLISHED else 404
pages = RichTextPage.objects.filter(status=status)
response = self.client.get(pages[0].get_absolute_url(), follow=True)
self.assertEqual(response.status_code, code)
@skipUnless("mezzanine.pages" in settings.INSTALLED_APPS,
"pages app required")
def test_multisite(self):
from django.conf import settings
# setup
try:
old_site_id = settings.SITE_ID
except:
old_site_id = None
site1 = Site.objects.create(domain="site1.com")
site2 = Site.objects.create(domain="site2.com")
# create pages under site1, which should be only accessible
# when SITE_ID is site1
settings.SITE_ID = site1.pk
site1_page = self._create_page("Site1", CONTENT_STATUS_PUBLISHED)
self._test_site_pages("Site1", CONTENT_STATUS_PUBLISHED, count=1)
# create pages under site2, which should only be accessible
# when SITE_ID is site2
settings.SITE_ID = site2.pk
self._create_page("Site2", CONTENT_STATUS_PUBLISHED)
self._test_site_pages("Site2", CONTENT_STATUS_PUBLISHED, count=1)
# original page should 404
response = self.client.get(site1_page.get_absolute_url(), follow=True)
self.assertEqual(response.status_code, 404)
# change back to site1, and only the site1 pages should be retrieved
settings.SITE_ID = site1.pk
self._test_site_pages("Site1", CONTENT_STATUS_PUBLISHED, count=1)
# insert a new record, see the count change
self._create_page("Site1 Draft", CONTENT_STATUS_DRAFT)
self._test_site_pages("Site1 Draft", CONTENT_STATUS_DRAFT, count=2)
self._test_site_pages("Site1 Draft", CONTENT_STATUS_PUBLISHED, count=2)
# change back to site2, and only the site2 pages should be retrieved
settings.SITE_ID = site2.pk
self._test_site_pages("Site2", CONTENT_STATUS_PUBLISHED, count=1)
# insert a new record, see the count change
self._create_page("Site2 Draft", CONTENT_STATUS_DRAFT)
self._test_site_pages("Site2 Draft", CONTENT_STATUS_DRAFT, count=2)
self._test_site_pages("Site2 Draft", CONTENT_STATUS_PUBLISHED, count=2)
# tear down
if old_site_id:
settings.SITE_ID = old_site_id
else:
del settings.SITE_ID
site1.delete()
site2.delete()
def _static_proxy(self, querystring):
self.client.login(username=self._username, password=self._password)
proxy_url = '%s?%s' % (reverse('static_proxy'), querystring)
response = self.client.get(proxy_url)
self.assertEqual(response.status_code, 200)
@override_settings(STATIC_URL='/static/')
def test_static_proxy(self):
querystring = urlencode([('u', static("test/image.jpg"))])
self._static_proxy(querystring)
@override_settings(STATIC_URL='http://testserver/static/')
def test_static_proxy_with_host(self):
querystring = urlencode(
[('u', static("test/image.jpg"))])
self._static_proxy(querystring)
@override_settings(STATIC_URL='http://testserver:8000/static/')
def test_static_proxy_with_static_url_with_full_host(self):
from django.templatetags.static import static
querystring = urlencode([('u', static("test/image.jpg"))])
self._static_proxy(querystring)
def _get_csrftoken(self, response):
csrf = re.findall(
b'\<input type\=\'hidden\' name\=\'csrfmiddlewaretoken\' '
b'value\=\'([^"\']+)\' \/\>',
response.content
)
self.assertEqual(len(csrf), 1, 'No csrfmiddlewaretoken found!')
return csrf[0]
def _get_formurl(self, response):
action = re.findall(
b'\<form action\=\"([^\"]*)\" method\=\"post\"\>',
response.content
)
self.assertEqual(len(action), 1, 'No form with action found!')
if action[0] == b'':
action = response.request['PATH_INFO']
return action
@skipUnless('mezzanine.pages' in settings.INSTALLED_APPS,
'pages app required')
@override_settings(LANGUAGE_CODE="en")
def test_password_reset(self):
"""
Test sending of password-reset mails and evaluation of the links.
"""
self.client.logout()
del mail.outbox[:]
# Go to admin-login, search for reset-link
response = self.client.get('/admin/', follow=True)
self.assertContains(response, u'Forgot password?')
url = re.findall(
b'\<a href\=["\']([^\'"]+)["\']\>Forgot password\?\<\/a\>',
response.content
)
self.assertEqual(len(url), 1)
url = url[0]
# Go to reset-page, submit form
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
csrf = self._get_csrftoken(response)
url = self._get_formurl(response)
response = self.client.post(url, {
'csrfmiddlewaretoken': csrf,
'email': self._emailaddress
})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
# Get reset-link, submit form
url = re.findall(
r'http://example.com((?:/\w{2,3})?/reset/[^/]+/[^/]+/)',
mail.outbox[0].body
)[0]
response = self.client.get(url)
csrf = self._get_csrftoken(response)
url = self._get_formurl(response)
response = self.client.post(url, {
'csrfmiddlewaretoken': csrf,
'new_password1': 'newdefault',
'new_password2': 'newdefault'
}, follow=True)
self.assertEqual(response.status_code, 200)
def test_richtext_widget(self):
"""
Test that the RichTextField gets its widget type correctly from
settings, and is able to be overridden in a form's Meta.
"""
class RichTextModel(models.Model):
text_default = RichTextField()
text_overridden = RichTextField()
form_class = modelform_factory(
RichTextModel,
fields=('text_default', 'text_overridden'),
widgets={'text_overridden': Textarea})
form = form_class()
richtext_widget = import_dotted_path(settings.RICHTEXT_WIDGET_CLASS)
self.assertIsInstance(form.fields['text_default'].widget,
richtext_widget)
self.assertIsInstance(form.fields['text_overridden'].widget,
Textarea)
def test_admin_sites_dropdown(self):
"""
Ensures the site selection dropdown appears in the admin.
"""
self.client.login(username=self._username, password=self._password)
response = self.client.get('/admin/', follow=True)
set_site_url = reverse("set_site")
# Set site URL shouldn't appear without multiple sites.
self.assertNotContains(response, set_site_url)
site1 = Site.objects.create(domain="test-site-dropdown1.com",
name="test-site-dropdown1.com")
site2 = Site.objects.create(domain="test-site-dropdown2.com",
name="test-site-dropdown2.com")
response = self.client.get('/admin/', follow=True)
self.assertContains(response, set_site_url)
self.assertContains(response, site1.name)
self.assertContains(response, site2.name)
site1.delete()
site2.delete()
def test_dynamic_inline_admins(self):
"""
Verifies that ``BaseDynamicInlineAdmin`` properly adds the ``_order``
field for admins of ``Orderable`` subclasses.
"""
request = self._request_factory.get('/admin/')
request.user = self._user
field_admin = FieldAdmin(Form, AdminSite())
fieldsets = field_admin.get_fieldsets(request)
self.assertEqual(fieldsets[0][1]['fields'][-1], '_order')
if VERSION >= (1, 7):
fields = field_admin.get_fields(request)
self.assertEqual(fields[-1], '_order')
def test_dynamic_inline_admins_fields_tuple(self):
"""
Checks if moving the ``_order`` field works with immutable sequences.
"""
class MyModelInline(BaseDynamicInlineAdmin, InlineModelAdmin):
# Any model would work since we're only instantiating the class and
# not actually using it.
model = RichTextPage
fields = ('a', '_order', 'b')
request = self._request_factory.get('/admin/')
inline = MyModelInline(None, AdminSite())
fields = inline.get_fieldsets(request)[0][1]['fields']
self.assertSequenceEqual(fields, ('a', 'b', '_order'))
def test_dynamic_inline_admins_fields_without_order(self):
"""
Checks that ``_order`` field will be added if ``fields`` are listed
without it.
"""
class MyModelInline(BaseDynamicInlineAdmin, InlineModelAdmin):
model = RichTextPage
fields = ('a', 'b')
request = self._request_factory.get('/admin/')
inline = MyModelInline(None, AdminSite())
fields = inline.get_fieldsets(request)[0][1]['fields']
self.assertSequenceEqual(fields, ('a', 'b', '_order'))
def test_dynamic_inline_admins_fieldsets(self):
"""
Tests if ``_order`` is moved to the end of the last fieldsets fields.
"""
class MyModelInline(BaseDynamicInlineAdmin, InlineModelAdmin):
model = RichTextPage
fieldsets = (("Fieldset 1", {'fields': ('a',)}),
("Fieldset 2", {'fields': ('_order', 'b')}),
("Fieldset 3", {'fields': ('c')}))
request = self._request_factory.get('/admin/')
inline = MyModelInline(None, AdminSite())
fieldsets = inline.get_fieldsets(request)
self.assertEqual(fieldsets[-1][1]["fields"][-1], '_order')
self.assertNotIn('_order', fieldsets[1][1]["fields"])
@skipUnless("mezzanine.pages" in settings.INSTALLED_APPS,
"pages app required")
class SiteRelatedTestCase(TestCase):
def test_update_site(self):
from django.conf import settings
from mezzanine.utils.sites import current_site_id
# setup
try:
old_site_id = settings.SITE_ID
except:
old_site_id = None
site1 = Site.objects.create(domain="site1.com")
site2 = Site.objects.create(domain="site2.com")
# default behaviour, page gets assigned current site
settings.SITE_ID = site2.pk
self.assertEqual(settings.SITE_ID, current_site_id())
page = RichTextPage()
page.save()
self.assertEqual(page.site_id, site2.pk)
# Subsequent saves do not update site to current site
page.site = site1
page.save()
self.assertEqual(page.site_id, site1.pk)
# resave w/ update_site=True, page gets assigned current site
settings.SITE_ID = site1.pk
page.site = site2
page.save(update_site=True)
self.assertEqual(page.site_id, site1.pk)
# resave w/ update_site=False, page does not update site
settings.SITE_ID = site2.pk
page.save(update_site=False)
self.assertEqual(page.site_id, site1.pk)
# When update_site=True, new page gets assigned current site
settings.SITE_ID = site2.pk
page = RichTextPage()
page.site = site1
page.save(update_site=True)
self.assertEqual(page.site_id, site2.pk)
# When update_site=False, new page keeps current site
settings.SITE_ID = site2.pk
page = RichTextPage()
page.site = site1
page.save(update_site=False)
self.assertEqual(page.site_id, site1.pk)
# When site explicitly assigned, new page keeps assigned site
settings.SITE_ID = site2.pk
page = RichTextPage()
page.site = site1
page.save()
self.assertEqual(page.site_id, site1.pk)
# tear down
if old_site_id:
settings.SITE_ID = old_site_id
else:
del settings.SITE_ID
site1.delete()
site2.delete()
| 38.984344 | 79 | 0.63039 |
0e80ce5c588f6e623effcc181d5baa28701e6308
| 2,104 |
py
|
Python
|
main/templatetags/ranking_filter.py
|
sapuri/srandom.com
|
e0a7843886a97329f9022d4889ffafa3de708448
|
[
"MIT"
] | 3 |
2019-05-04T08:22:38.000Z
|
2019-12-14T13:07:49.000Z
|
main/templatetags/ranking_filter.py
|
sapuri/srandom.com
|
e0a7843886a97329f9022d4889ffafa3de708448
|
[
"MIT"
] | 49 |
2019-07-02T15:17:09.000Z
|
2022-03-21T20:11:59.000Z
|
main/templatetags/ranking_filter.py
|
sapuri/srandom.com
|
e0a7843886a97329f9022d4889ffafa3de708448
|
[
"MIT"
] | null | null | null |
from django import template
register = template.Library()
# 指定された曲の平均BAD数を返す
@register.filter
def bad_count_avg(bad_count_list, music):
if not bad_count_list:
return None
bad_count_sum = 0 # BAD数の合計
bad_count_num = 0 # BAD数の個数
for bad_count in bad_count_list:
if bad_count.music.id == music.id:
bad_count_sum += bad_count.int()
bad_count_num += 1
# BAD数の平均を計算 (小数点以下四捨五入)
bad_count_avg = round(bad_count_sum / bad_count_num)
return bad_count_avg
# 指定された曲の順位を返す
@register.filter
def bad_count_rank(bad_count_list_ordered, music_id_and_myself_id):
'''
指定された曲の順位を返す
@param bad_count_list_ordered: 昇順で整列済のBAD数リスト
@param music_id_and_myself_id: 曲IDと自ユーザーIDをコンマで区切った文字列
'''
if not bad_count_list_ordered:
return '-'
# コンマで分割
music_id_and_myself_id = music_id_and_myself_id.split(",")
music_id = int(music_id_and_myself_id[0])
myself_id = int(music_id_and_myself_id[1])
bad_count_num = 0 # BAD数の個数
bad_count_now = -1 # 現在のBAD数
myrank = -1 # 自ランク
found = False # BAD数を登録済であればTrueを返す
for bad_count in bad_count_list_ordered:
if bad_count.music.id == music_id:
bad_count_before = bad_count_now
bad_count_now = bad_count.bad_count
# BAD数が前後で重複した場合
if bad_count_now == bad_count_before:
# 指定されたユーザーの記録が見つかれば myrank にランクを格納
if bad_count.user.id == myself_id:
found = True
myrank = tmp_rank
bad_count_num += 1
# BAD数が重複しなかった場合
else:
bad_count_num += 1
# 一時ランクを更新
tmp_rank = bad_count_num
# 自分の記録が見つかれば myrank にランクを格納
if bad_count.user.id == myself_id:
found = True
myrank = bad_count_num
if found:
return '%d / %d' % (myrank, bad_count_num)
elif bad_count_num != 0:
return '- / %d' % (bad_count_num)
else:
return '- / -'
| 27.324675 | 67 | 0.602662 |
ca57524649791d4e2b49c32bfc1def0c7a5b47ff
| 8,326 |
py
|
Python
|
dreamcoder/PCFG/program.py
|
nathanael-fijalkow/ec
|
c20fd84ca3944f904f5c999ed5df46eb6b6f8ef5
|
[
"MIT"
] | 1 |
2021-06-11T08:20:25.000Z
|
2021-06-11T08:20:25.000Z
|
dreamcoder/PCFG/program.py
|
nathanael-fijalkow/ec
|
c20fd84ca3944f904f5c999ed5df46eb6b6f8ef5
|
[
"MIT"
] | null | null | null |
dreamcoder/PCFG/program.py
|
nathanael-fijalkow/ec
|
c20fd84ca3944f904f5c999ed5df46eb6b6f8ef5
|
[
"MIT"
] | null | null | null |
from dreamcoder.PCFG.type_system import *
from dreamcoder.PCFG.cons_list import index
import logging
# dictionary { number of environment : value }
# environment: a cons list
# list = None | (value, list)
# probability: a dictionary {(G.__hash(), S) : probability}
# such that P.probability[S] is the probability that P is generated
# from the non-terminal S when the underlying PCFG is G.
class Program:
"""
Object that represents a program: a lambda term with basic primitives
"""
def __eq__(self, other):
return (
isinstance(self, Program)
and isinstance(other, Program)
and self.type.__eq__(other.type)
and self.typeless_eq(other)
)
def typeless_eq(self, other, verbose=False):
if verbose:
print(
"checking:\n 1:{}\nclass: {}\n 2:{}\nclass: {}".format(
self,
self.__class__.__name__,
other,
other.__class__.__name__,
)
)
b = isinstance(self, Program) and isinstance(other, Program)
b2 = (
isinstance(self, Variable)
and isinstance(other, Variable)
and self.variable == other.variable
)
b2 = b2 or (
isinstance(self, Function)
and isinstance(other, Function)
and self.function.typeless_eq(other.function, verbose)
and len(self.arguments) == len(other.arguments)
and all(
[
x.typeless_eq(y, verbose)
for x, y in zip(self.arguments, other.arguments)
]
)
)
b2 = b2 or (
isinstance(self, Lambda)
and isinstance(other, Lambda)
and self.body.typeless_eq(other.body)
)
b2 = b2 or (
isinstance(self, BasicPrimitive)
and isinstance(other, BasicPrimitive)
and self.primitive == other.primitive
)
b2 = b2 or (
isinstance(self, New)
and isinstance(other, New)
and (self.body).typeless_eq(other.body, verbose)
)
return b and b2
def __gt__(self, other):
True
def __lt__(self, other):
False
def __ge__(self, other):
True
def __le__(self, other):
False
def __hash__(self):
return self.hash
class Variable(Program):
def __init__(self, variable, type_=UnknownType(), probability={}):
# self.variable is a natural number
# assert isinstance(variable, int)
self.variable = variable
# assert isinstance(type_, Type)
self.type = type_
self.hash = variable
self.probability = probability
self.evaluation = {}
def __repr__(self):
return "var" + format(self.variable)
def eval(self, dsl, environment, i):
if i in self.evaluation:
# logging.debug('Already evaluated')
return self.evaluation[i]
# logging.debug('Not yet evaluated')
try:
result = index(environment, self.variable)
self.evaluation[i] = result
return result
except (IndexError, ValueError, TypeError):
self.evaluation[i] = None
return None
class Function(Program):
def __init__(self, function, arguments, type_=UnknownType(), probability={}):
# assert isinstance(function, Program)
self.function = function
# assert isinstance(arguments, list)
self.arguments = arguments
self.type = type_
self.hash = hash(tuple([arg.hash for arg in self.arguments] + [self.function.hash]))
self.probability = probability
self.evaluation = {}
def __repr__(self):
if len(self.arguments) == 0:
return format(self.function)
else:
s = "(" + format(self.function)
for arg in self.arguments:
s += " " + format(arg)
return s + ")"
def eval(self, dsl, environment, i):
if i in self.evaluation:
# logging.debug('Already evaluated')
return self.evaluation[i]
# logging.debug('Not yet evaluated')
try:
if len(self.arguments) == 0:
return self.function.eval(dsl, environment, i)
else:
evaluated_arguments = []
for j in range(len(self.arguments)):
e = self.arguments[j].eval(dsl, environment, i)
evaluated_arguments.append(e)
result = self.function.eval(dsl, environment, i)
for evaluated_arg in evaluated_arguments:
result = result(evaluated_arg)
self.evaluation[i] = result
return result
except (IndexError, ValueError, TypeError):
self.evaluation[i] = None
return None
class Lambda(Program):
def __init__(self, body, type_=UnknownType(), probability={}):
# assert isinstance(body, Program)
self.body = body
# assert isinstance(type_, Type)
self.type = type_
self.hash = hash(94135 + body.hash)
self.probability = probability
self.evaluation = {}
def __repr__(self):
s = "(lambda " + format(self.body) + ")"
return s
def eval(self, dsl, environment, i):
if i in self.evaluation:
# logging.debug('Already evaluated')
return self.evaluation[i]
# logging.debug('Not yet evaluated')
try:
result = lambda x: self.body.eval(dsl, (x, environment), i)
self.evaluation[i] = result
return result
except (IndexError, ValueError, TypeError):
self.evaluation[i] = None
return None
class BasicPrimitive(Program):
def __init__(self, primitive, type_=UnknownType(), probability={}):
# assert isinstance(primitive, str)
self.primitive = primitive
# assert isinstance(type_, Type)
self.type = type_
self.hash = hash(primitive) + self.type.hash
self.probability = probability
self.evaluation = {}
def __repr__(self):
return format(self.primitive)
def eval(self, dsl, environment, i):
return dsl.semantics[self.primitive]
class New(Program):
def __init__(self, body, type_=UnknownType(), probability={}):
self.body = body
self.type = type_
self.hash = hash(783712 + body.hash) + type_.hash
self.probability = probability
self.evaluation = {}
def __repr__(self):
return format(self.body)
def eval(self, dsl, environment, i):
if i in self.evaluation:
# logging.debug('Already evaluated')
return self.evaluation[i]
# logging.debug('Not yet evaluated')
try:
result = self.body.eval(dsl, environment, i)
self.evaluation[i] = result
return result
except (IndexError, ValueError, TypeError):
self.evaluation[i] = None
return None
def reconstruct_from_list(program_as_list, target_type):
if len(program_as_list) == 1:
return program_as_list.pop()
else:
P = program_as_list.pop()
if isinstance(P, (New, BasicPrimitive)):
list_arguments = P.type.ends_with(target_type)
arguments = [None] * len(list_arguments)
for i in range(len(list_arguments)):
arguments[len(list_arguments) - i - 1] = reconstruct_from_list(
program_as_list, list_arguments[len(list_arguments) - i - 1]
)
return Function(P, arguments)
if isinstance(P, Variable):
return P
assert False
def reconstruct_from_compressed(program, target_type):
program_as_list = []
list_from_compressed(program, program_as_list)
program_as_list.reverse()
return reconstruct_from_list(program_as_list, target_type)
def list_from_compressed(program, program_as_list=[]):
(P, sub_program) = program
if sub_program:
list_from_compressed(sub_program, program_as_list)
program_as_list.append(P)
| 31.657795 | 92 | 0.575306 |
82371bf8e8cf85dc10bc4167d71e962a2c01a661
| 17,749 |
py
|
Python
|
Python/read_bbox.py
|
xyj77/MRIDataLoader
|
dbbe6a77b3c96d66e374705370ac38a518cdce0f
|
[
"MIT"
] | 2 |
2018-10-10T13:12:10.000Z
|
2019-04-18T13:11:55.000Z
|
Python/read_bbox.py
|
xyj77/MRIDataLoader
|
dbbe6a77b3c96d66e374705370ac38a518cdce0f
|
[
"MIT"
] | null | null | null |
Python/read_bbox.py
|
xyj77/MRIDataLoader
|
dbbe6a77b3c96d66e374705370ac38a518cdce0f
|
[
"MIT"
] | 3 |
2018-06-21T10:06:34.000Z
|
2018-10-10T13:18:16.000Z
|
#-*- coding:utf-8 -*-
import os
import re
import pandas
import pickle
import numpy as np
from PIL import Image
import scipy.io as sio
import scipy.misc as misc
BBOX_DATA_DIR = '../../Data/BboxAug'
MAT_DATA_DIR = '/media/lab304/J52/Dataset/Mat'
LABEL_PATH = '../../Data/labels.csv'
SAVE_DIR = '../../Data'
sampleSet = set()
def readLabel(asIndex = 'modalNo', index = 'A'):
'''Read labels
Read labels as request.
Args:
asIndex: String, Index type.
index: String, Index.
Returns:
labels: A DataFrame of reorganized labels.
For example:
serNo Location Center meanSize \
tumourNo modalNo
1 A 7 [257,167,301,236,5,8] [279,201,7] [42,64]
B 8 [255,178,300,237,6,8] [277,207,8] [42,64]
...
J 22 [247,170,287,234,14,24] [267,202,22] [42,64]
2 A 8 [214,182,254,229,6,9] [234,205,8] [35,42]
B 8 [218,183,253,227,7,9] [235,205,8] [35,42]
...
J 25 [206,186,240,227,23,28] [223,206,25] [35,42]
3 A 9 [281,142,303,166,8,9] [292,154,9] [19,22]
...
Or:
serNo Location Center meanSize \
patientNo tumourNo
'00070993' 1 8 [223,176,284,242,6,10] [253,209,8] [62,65]
'00090960' 1 5 [191,139,224,184,5,5] [207,161,5] [31,42]
'00159253' 1 13 [231,149,288,206,11,15] [259,177,13] [56,57]
'00190415' 1 7 [257,167,301,236,5,8] [279,201,7] [42,64]
2 8 [214,182,254,229,6,9] [234,205,8] [35,42]
3 9 [281,142,303,166,8,9] [292,154,9] [19,22]
'00431620' 1 15 [245,64,348,172,9,17] [296,118,15] [97,103]
'00525849' 1 9 [153,216,183,242,9,9] [168,229,9] [28,25]
'00582685' 1 12 [264,104,317,159,10,13] [290,131,12] [56,59]
...
Raises:
None
Usage:
readLabel(asIndex = 'patientNo', index = '00190415')
readLabel(asIndex = 'modalNo', index = 'A')
'''
#读csv文件
if asIndex == 'modalNo':
labels = pandas.read_csv(LABEL_PATH, index_col=[2,0,1])
elif asIndex == 'patientNo':
index = '\'' + index + '\''
labels = pandas.read_csv(LABEL_PATH, index_col=[0,1,2])
labels = labels.fillna('null')
''' DataFeame usage:
# print(labels.dtypes)
# print(labels.iloc[0])
# print(labels.loc[('\'00190415\'', 2, 'B'), :])
# print(labels.loc[('\'00190415\'', 2, 'B'), 'WHO'])
# print(labels.loc['\'00190415\'', 2, 'B']['WHO'])
'''
return labels.loc[index]
def readBbox(liverVolume, tumourInfo, saveNeg=False):
pattern = re.compile(r'[\d]')
tumourLoc = [int(x) for x in tumourInfo['Location'][1:-1].split(',') if pattern.search(x)]
tumourCenter = [int(x) for x in tumourInfo['Center'][1:-1].split(',') if pattern.search(x)]
tumourSize = [int(x) for x in tumourInfo['meanSize'][1:-1].split(',') if pattern.search(x)]
if saveNeg:
tumourD = [int(x) for x in tumourInfo['d'][1:-1].split(',') if pattern.search(x)]
tumourCenter[0], tumourCenter[1] = tumourCenter[0] + tumourD[0], tumourCenter[1] + tumourD[1]
return liverVolume[tumourCenter[0]-tumourSize[0]/2:tumourCenter[0]+tumourSize[0]/2+1,
tumourCenter[1]-tumourSize[1]/2:tumourCenter[1]+tumourSize[1]/2+1,
tumourLoc[4]:tumourLoc[5]+1]
def saveSliceToFile(Bbox, index, savePath):
'''
# Method 1:使用numpy的rot90和flip
'''
# 保存原图
image = np.squeeze(Bbox[:, :, index])
misc.imsave(savePath + '.jpg', image)
# 保存扩容图
############## rotate volume ##############
rot90 = np.rot90(Bbox) #设置逆时针旋转90
image = np.squeeze(rot90[:, :, index])
misc.imsave(savePath + '_90.jpg', image)
rot180 = np.rot90(Bbox, 2) #设置逆时针旋转180
image = np.squeeze(rot180[:, :, index])
misc.imsave(savePath + '_180.jpg', image)
rot270 = np.rot90(Bbox, 3) #设置逆时针旋转270
image = np.squeeze(rot270[:, :, index])
misc.imsave(savePath + '_270.jpg', image)
############### flip volume ###############
lr = np.fliplr(Bbox) #左右互换
image = np.squeeze(lr[:, :, index])
misc.imsave(savePath + '_lr.jpg', image)
ud = np.flipud(Bbox) #上下互换
image = np.squeeze(ud[:, :, index])
misc.imsave(savePath + '_ud.jpg', image)
print(savePath+' saved!')
'''
# Method 2: 使用PIL Image的transpose,和rotate
# 只能用于单通道灰度图和三通道彩色图
roi = Bbox[:, :, index]
# 扩容
image = np.squeeze(roi)
image = Image.fromarray(image)
# 调整尺寸特别小的数据的大小
w, h = image.size
m = min(w, h)
if m < 32:
w, h = int(32.0*w/m), int(32.0*h/m)
image = image.resize((w, h)) #重设宽,高
print(w, h)
# 保存原图
misc.imsave(savePath + '.jpg', roi)
# 保存扩容图
# dst5 = img.rotate(45) #逆时针旋转45
img = image.transpose(Image.ROTATE_90) #设置逆时针旋转90
misc.imsave(savePath + '_90.jpg', img)
img = image.transpose(Image.ROTATE_180) #设置逆时针旋转180
misc.imsave(savePath + '_180.jpg', img)
img = image.transpose(Image.ROTATE_270) #设置逆时针旋转270
misc.imsave(savePath + '_270.jpg', img)
img = image.transpose(Image.FLIP_LEFT_RIGHT) #左右互换
misc.imsave(savePath + '_lr.jpg', img)
img = image.transpose(Image.FLIP_TOP_BOTTOM) #上下互换
misc.imsave(savePath + '_ud.jpg', img)
print(savePath+' saved!')
'''
def saveFusionToFile(picMat, savePath, saveTpye):
if saveTpye == '.jpg':
# 保存原图
misc.imsave(savePath+saveTpye, picMat)
# 保存扩容图
############## rotate volume ##############
rot90 = np.rot90(picMat) #设置逆时针旋转90
misc.imsave(savePath + '_90' + saveTpye, rot90)
rot180 = np.rot90(picMat, 2) #设置逆时针旋转180
misc.imsave(savePath + '_180' + saveTpye, rot180)
rot270 = np.rot90(picMat, 3) #设置逆时针旋转270
misc.imsave(savePath + '_270' + saveTpye, rot270)
############### flip volume ###############
lr = np.fliplr(picMat) #左右互换
misc.imsave(savePath + '_lr' + saveTpye, lr)
ud = np.flipud(picMat) #上下互换
misc.imsave(savePath + '_ud' + saveTpye, ud)
else:
# 保存原图
np.save(savePath+saveTpye, picMat)
# 保存扩容图
############## rotate volume ##############
rot90 = np.rot90(picMat) #设置逆时针旋转90
np.save(savePath + '_90' + saveTpye, rot90)
rot180 = np.rot90(picMat, 2) #设置逆时针旋转180
np.save(savePath + '_180' + saveTpye, rot180)
rot270 = np.rot90(picMat, 3) #设置逆时针旋转270
np.save(savePath + '_270' + saveTpye, rot270)
############### flip volume ###############
lr = np.fliplr(picMat) #左右互换
np.save(savePath + '_lr' + saveTpye, lr)
ud = np.flipud(picMat) #上下互换
np.save(savePath + '_ud' + saveTpye, ud)
print(savePath+' saved!')
def saveSlice(patientNo, tumourNo, modal, Bbox, tumourInfo, standard, saveNeg=False):
if saveNeg and patientNo in ['00431620', '03930451']:
return
pattern = re.compile(r'[\d]')
tumourLoc = [int(x) for x in tumourInfo['Location'][1:-1].split(',') if pattern.search(x)]
serNo = tumourInfo['serNo']
tumourWHO = tumourInfo['WHO']
tumourEd = int(tumourInfo['Edmondson'])
# 确定存储目录
saveDir = os.path.join(os.path.join(SAVE_DIR, standard), modal)
if standard == 'Binary':
if saveNeg:
saveDir = os.path.join(saveDir, '0')
else:
saveDir = os.path.join(saveDir, '1')
else:
if standard == 'WHO':
saveDir = os.path.join(saveDir, str(tumourWHO-1))
else:
saveDir = os.path.join(saveDir, str(tumourEd-1))
if not os.path.exists(saveDir):
os.makedirs(saveDir)
up, bottom = serNo-tumourLoc[4], tumourLoc[5]-serNo
up, bottom = int(up*0.75+0.5), int(bottom*0.75+0.5)
# 保存中间层面
sampleName = patientNo + '_' + str(tumourNo+1) + '_0_' + modal\
+ '_' + str(tumourWHO) + '_' + str(tumourEd)
savePath = os.path.join(saveDir, sampleName)
saveSliceToFile(Bbox, up, savePath)# 保存到文件
sampleSet.add(patientNo + '_' + str(tumourNo+1) + '_0')# 保存样本编号
# 保存上面层面
for i in range(up):
sampleName = patientNo + '_' + str(tumourNo+1) + '_' + str(-1-i) + '_' + modal\
+ '_' + str(tumourWHO) + '_' + str(tumourEd)
savePath = os.path.join(saveDir, sampleName)
saveSliceToFile(Bbox, up-i-1, savePath)# 保存到文件
sampleSet.add(patientNo + '_' + str(tumourNo+1) + '_' + str(-1-i))
# 保存下面层面
for i in range(bottom):
sampleName = patientNo + '_' + str(tumourNo+1) + '_' +str(i+1) + '_' + modal\
+ '_' + str(tumourWHO) + '_' + str(tumourEd)
savePath = os.path.join(saveDir, sampleName)
saveSliceToFile(Bbox, up+i+1, savePath)# 保存到文件
sampleSet.add(patientNo + '_' + str(tumourNo+1) + '_' +str(i+1))
def saveFusion(patientNo, tumourNo, Bboxs, BboxInfo, fusionName, standard, saveTpye, saveNeg=False):
global sampleSet
if saveNeg and patientNo in ['00431620', '03930451']:
return
tumourWHO = BboxInfo[0]['WHO']
tumourEd = int(BboxInfo[0]['Edmondson'])
# 确定存储目录
saveDir = os.path.join(os.path.join(SAVE_DIR, standard), fusionName)
if standard == 'Binary':
if saveNeg:
saveDir = os.path.join(saveDir, '0')
else:
saveDir = os.path.join(saveDir, '1')
else:
if standard == 'WHO':
saveDir = os.path.join(saveDir, str(tumourWHO-1))
else:
saveDir = os.path.join(saveDir, str(tumourEd-1))
if not os.path.exists(saveDir):
os.makedirs(saveDir)
tumourSize = Bboxs[0].shape
# 保存融合图像
picMat = np.zeros((tumourSize[0], tumourSize[1], len(BboxInfo)))
# 计算能够在上下两端保存多少张图像
upSlice, bottomSlice = [], [] #中间序列所在层面(同时对应中间层面上面的层面数), 中间层面下的层面数
for info in BboxInfo:
tumourLoc = [int(x) for x in info['Location'][1:-1].split(',')]
serNo, Z1, Z2 = info['serNo'], tumourLoc[4], tumourLoc[5]
upSlice.append(serNo-Z1)
bottomSlice.append(Z2-serNo)
up, bottom = min(upSlice), min(bottomSlice)
print(upSlice, bottomSlice, up, bottom, int(up*0.75+0.5), int(bottom*0.75+0.5))
# 去掉过于边缘的图像
up, bottom = int(up*0.75+0.5), int(bottom*0.75+0.5)
# 保存中间层面
for index, info in enumerate(BboxInfo):
picMat[:, :, index] = Bboxs[index][:, :, upSlice[index]]
sampleName = patientNo + '_' + str(tumourNo+1) + '_0_' + fusionName\
+ '_' + str(tumourWHO) + '_' + str(tumourEd)
savePath = os.path.join(saveDir, sampleName)
saveFusionToFile(picMat, savePath, saveTpye)
sampleSet.add(patientNo + '_' + str(tumourNo+1) + '_0')
# 保存上面层面
for i in range(up):
for index, info in enumerate(BboxInfo):
picMat[:, :, index] = Bboxs[index][:, :, upSlice[index]-i-1]
sampleName = patientNo + '_' + str(tumourNo+1) + '_' +str(-1-i) + '_' + fusionName\
+ '_' + str(tumourWHO) + '_' + str(tumourEd)
savePath = os.path.join(saveDir, sampleName)
saveFusionToFile(picMat, savePath, saveTpye)
sampleSet.add(patientNo + '_' + str(tumourNo+1) + '_' +str(-1-i))
# 保存下面层面
for i in range(bottom):
for index, info in enumerate(BboxInfo):
picMat[:, :, index] = Bboxs[index][:, :, upSlice[index]+i+1]
sampleName = patientNo + '_' + str(tumourNo+1) + '_' + str(i+1) + '_' + fusionName\
+ '_' + str(tumourWHO) + '_' + str(tumourEd)
savePath = os.path.join(saveDir, sampleName)
saveFusionToFile(picMat, savePath, saveTpye)
sampleSet.add(patientNo + '_' + str(tumourNo+1) + '_' + str(i+1))
def readModalData(modal = 'A', standard = 'WHO'):
'''
指定模态读取数据
'''
if modal == 'K':
labels = readLabel(asIndex = 'modalNo', index = 'B')
else:
labels = readLabel(asIndex = 'modalNo', index = modal)
patientList = os.listdir(MAT_DATA_DIR)
for patientNo in patientList:
# 读取MRI体数据 512x512xS
dataDir = os.path.join(MAT_DATA_DIR, patientNo)
dataPath = os.path.join(dataDir, modal+'.mat')
liverVolume = sio.loadmat(dataPath)['D']
# 读取tumour信息
patientInfo = labels.loc['\'' + patientNo + '\'']
for tumourNo in range(len(patientInfo)):
# print(patientNo, tumourNo)
# 读取肿瘤信息
tumourInfo = patientInfo.iloc[tumourNo]
# roi区域
posBbox = readBbox(liverVolume, tumourInfo)
saveSlice(patientNo, tumourNo, modal, posBbox, tumourInfo, standard)
# print(posBbox.shape)
if standard is 'Binary':
# 背景区域
negBbox = readBbox(liverVolume, tumourInfo, saveNeg=True)
saveSlice(patientNo, tumourNo, modal, negBbox, tumourInfo, standard, saveNeg=True)
def readPatientData(Fusion = ['A', 'B', 'K'], standard = 'WHO', saveTpye = '.jpg'):
'''
按照病人读取多个模态数据
'''
patientList = os.listdir(MAT_DATA_DIR)
for patientNo in patientList:
# 读取病人的标注信息
labels = readLabel(asIndex = 'patientNo', index = patientNo)
for tumourNo in range(len(labels)/8):
Info = labels.loc[tumourNo+1]
posBboxs, negBboxs, BboxInfo, fusionName = [], [], [], ''
for modal in Fusion:
# 读取MRI体数据 512x512xS
fusionName = fusionName + modal
dataDir = os.path.join(MAT_DATA_DIR, patientNo)
dataPath = os.path.join(dataDir, modal+'.mat')
liverVolume = sio.loadmat(dataPath)['D']
if modal == 'K':
tumourInfo = Info.loc['B']
else:
tumourInfo = Info.loc[modal]
# 读取Bbox
posBbox = readBbox(liverVolume, tumourInfo)
posBboxs.append(posBbox)
BboxInfo.append(tumourInfo)
if standard == 'Binary':
negBbox = readBbox(liverVolume, tumourInfo, saveNeg=True)
negBboxs.append(negBbox)
saveFusion(patientNo, tumourNo, posBboxs, BboxInfo, fusionName,
standard, saveTpye)
if standard == 'Binary':
saveFusion(patientNo, tumourNo, negBboxs, BboxInfo, fusionName,
standard, saveTpye, saveNeg=True)
def main():
# modalList = ['A', 'B', 'K', 'E', 'F', 'G', 'H', 'I', 'J']
# fusionList = ['ABK', 'EGJ']
# 按照模态读取
# for modal in modalList:
# readModalData(modal=modal, standard = 'WHO')
# # 按照个体读取
# for fusion in fusionList:
# readPatientData(Fusion = list(fusion), standard = 'WHO')
# # 按照模态读取
# for modal in modalList:
# readModalData(modal=modal, standard = 'Edmondson')
# # 按照个体读取
# for fusion in fusionList:
# readPatientData(Fusion = list(fusion), standard = 'Edmondson')
# # 按照模态读取
# for modal in modalList:
# readModalData(modal=modal, standard = 'Binary')
# # 按照个体读取
# for fusion in fusionList:
# readPatientData(Fusion = list(fusion), standard = 'Binary')
########################################################################################
fusionList = ['EFGHIJ', 'ABKEFGHIJ']
global sampleSet
# 按照个体读取
for fusion in fusionList:
sampleSet.clear()
readPatientData(Fusion = list(fusion), standard = 'WHO', saveTpye = '.npy')
binPath = os.path.join(os.path.join(os.path.join(SAVE_DIR, 'WHO'), fusion), 'sample.bin')
with open(binPath, 'ab') as fp:
pickle.dump(sampleSet, fp)#顺序存入变量
# with open(binPath, 'rb') as fp:
# data=pickle.load(fp)#顺序导出变量
# print(data)
for fusion in fusionList:
sampleSet.clear()
readPatientData(Fusion = list(fusion), standard = 'Edmondson', saveTpye = '.npy')
binPath = os.path.join(os.path.join(os.path.join(SAVE_DIR, 'Edmondson'), fusion), 'sample.bin')
with open(binPath, 'ab') as fp:
pickle.dump(sampleSet, fp)#顺序存入变量
for fusion in fusionList:
sampleSet.clear()
readPatientData(Fusion = list(fusion), standard = 'Binary', saveTpye = '.npy')
binPath = os.path.join(os.path.join(os.path.join(SAVE_DIR, 'Binary'), fusion), 'sample.bin')
with open(binPath, 'ab') as fp:
pickle.dump(sampleSet, fp)#顺序存入变量
if __name__ == "__main__":
main()
| 40.065463 | 103 | 0.524199 |
ce0976dc870364860a0bbb8119085c428542536a
| 284 |
py
|
Python
|
setup.py
|
WaylonWalker/unsync
|
04a57892dde57472c45a9ca4092fb57d8a222871
|
[
"MIT"
] | null | null | null |
setup.py
|
WaylonWalker/unsync
|
04a57892dde57472c45a9ca4092fb57d8a222871
|
[
"MIT"
] | null | null | null |
setup.py
|
WaylonWalker/unsync
|
04a57892dde57472c45a9ca4092fb57d8a222871
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='unsync',
version='1.2.1',
packages=['unsync'],
url='https://github.com/alex-sherman/unsync',
license='MIT',
author='Alex-Sherman',
author_email='asherman1024@gmail.com',
description='Unsynchronize asyncio',
)
| 21.846154 | 49 | 0.661972 |
5aa3c08023b3a097a4d7ba965f41077b3395192f
| 701 |
py
|
Python
|
108.py
|
sunilmhta/UVA
|
397e58fb88ba44c542f19d082c448987281e84ee
|
[
"MIT"
] | 2 |
2017-03-06T21:19:57.000Z
|
2017-03-08T10:13:43.000Z
|
108.py
|
sunilmhta/UVA
|
397e58fb88ba44c542f19d082c448987281e84ee
|
[
"MIT"
] | null | null | null |
108.py
|
sunilmhta/UVA
|
397e58fb88ba44c542f19d082c448987281e84ee
|
[
"MIT"
] | null | null | null |
# Author: Sunil Kumar
# Problem : Maximum sum of a subrectangle [in a matrix], ID: 108, Judge: UVA
# Technique used: Kadane's algorithm
# Time Complexity ; O(n^3)
ans=-15000000
def kadane(start,end,matrix):
storelist=[sum(matrix[i][start:end+1]) for i in range(0,len(matrix))]
maxendinghere=maxtillnow = storelist[0]
for x in range(1,len(storelist)):
maxendinghere = max(maxendinghere+storelist[x],storelist[x])
maxtillnow = max(maxtillnow,maxendinghere)
global ans
ans=max(ans,maxtillnow)
return
num=int(input())
matrix=[[int(x) for x in input().split()]for i in range(0,num)]
newlist=[[kadane(startcol,endcol,matrix) for endcol in range(0,num)]for startcol in range(0,num)]
print(ans)
| 36.894737 | 97 | 0.727532 |
e8f8d02f03b32ef6d12488e59a5df21636ab899f
| 130,988 |
py
|
Python
|
ai_flow/protobuf/message_pb2.py
|
flink-extended/ai-flow
|
d1427a243097d94d77fedbe1966500ae26975a13
|
[
"Apache-2.0"
] | 79 |
2021-10-15T07:32:27.000Z
|
2022-03-28T04:10:19.000Z
|
ai_flow/protobuf/message_pb2.py
|
flink-extended/ai-flow
|
d1427a243097d94d77fedbe1966500ae26975a13
|
[
"Apache-2.0"
] | 153 |
2021-10-15T05:23:46.000Z
|
2022-02-23T06:07:10.000Z
|
ai_flow/protobuf/message_pb2.py
|
flink-extended/ai-flow
|
d1427a243097d94d77fedbe1966500ae26975a13
|
[
"Apache-2.0"
] | 23 |
2021-10-15T02:36:37.000Z
|
2022-03-17T02:59:27.000Z
|
#
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: message.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='message.proto',
package='ai_flow',
syntax='proto3',
serialized_options=b'\n\027org.aiflow.client.protoZ\010/ai_flow\210\001\001\220\001\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\rmessage.proto\x12\x07\x61i_flow\x1a\x1egoogle/protobuf/wrappers.proto\"K\n\x0bSchemaProto\x12\x11\n\tname_list\x18\x01 \x03(\t\x12)\n\ttype_list\x18\x02 \x03(\x0e\x32\x16.ai_flow.DataTypeProto\"\xc6\x05\n\x0c\x44\x61tasetProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x39\n\nproperties\x18\x03 \x03(\x0b\x32%.ai_flow.DatasetProto.PropertiesEntry\x12$\n\x06schema\x18\x04 \x01(\x0b\x32\x14.ai_flow.SchemaProto\x12\x31\n\x0b\x64\x61ta_format\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03uri\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0b\x64\x65scription\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x63reate_time\x18\x08 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\x0bupdate_time\x18\t \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x32\n\x0c\x63\x61talog_name\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x63\x61talog_type\x18\x0b \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x36\n\x10\x63\x61talog_database\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12<\n\x16\x63\x61talog_connection_uri\x18\r \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rcatalog_table\x18\x0e \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"a\n\x12ModelRelationProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12/\n\nproject_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\"\x8b\x01\n\nModelProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12/\n\nproject_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\nmodel_desc\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xb2\x01\n\x19ModelVersionRelationProto\x12,\n\x07version\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12-\n\x08model_id\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x38\n\x13project_snapshot_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\"\xa8\x03\n\x11ModelVersionProto\x12,\n\x07version\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12-\n\x08model_id\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x38\n\x13project_snapshot_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\nmodel_path\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_type\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cversion_desc\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rcurrent_stage\x18\x08 \x01(\x0e\x32\x1a.ai_flow.ModelVersionStage\x12\x31\n\x0b\x63reate_time\x18\t \x01(\x0b\x32\x1c.google.protobuf.DoubleValue\"\xd6\x04\n\x08JobProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12:\n\x15workflow_execution_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12&\n\tjob_state\x18\x04 \x01(\x0e\x32\x13.ai_flow.StateProto\x12\x35\n\nproperties\x18\x05 \x03(\x0b\x32!.ai_flow.JobProto.PropertiesEntry\x12,\n\x06job_id\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\nstart_time\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x08\x65nd_time\x18\x08 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x07log_uri\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tsignature\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12;\n\x12workflow_execution\x18\x0b \x01(\x0b\x32\x1f.ai_flow.WorkflowExecutionProto\x12\x35\n\x0f\x65xecution_label\x18\x0c \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe2\x01\n\rWorkflowProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x33\n\rworkflow_json\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12:\n\nproperties\x18\x04 \x03(\x0b\x32&.ai_flow.WorkflowProto.PropertiesEntry\x12\x11\n\tnamespace\x18\x05 \x01(\t\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xef\x04\n\x16WorkflowExecutionProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12/\n\nproject_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12,\n\x0f\x65xecution_state\x18\x04 \x01(\x0e\x32\x13.ai_flow.StateProto\x12\x43\n\nproperties\x18\x05 \x03(\x0b\x32/.ai_flow.WorkflowExecutionProto.PropertiesEntry\x12/\n\nstart_time\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x08\x65nd_time\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x07log_uri\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\rworkflow_json\x18\t \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tsignature\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x14\n\x0c\x65xecution_id\x18\x0b \x01(\t\x12(\n\x08workflow\x18\x0c \x01(\x0b\x32\x16.ai_flow.WorkflowProto\x12-\n\x07\x63ontext\x18\r \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xc3\x01\n\x0cProjectProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x39\n\nproperties\x18\x03 \x03(\x0b\x32%.ai_flow.ProjectProto.PropertiesEntry\x12)\n\x03uri\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x88\x03\n\x11WorkflowMetaProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12/\n\nproject_id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12>\n\nproperties\x18\x04 \x03(\x0b\x32*.ai_flow.WorkflowMetaProto.PropertiesEntry\x12\x30\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\x0bupdate_time\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\"\n\x1a\x63ontext_extractor_in_bytes\x18\x07 \x01(\x0c\x12+\n\x05graph\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe5\x01\n\x15WorkflowSnapshotProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x30\n\x0bworkflow_id\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12)\n\x03uri\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\tsignature\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\"\x91\x03\n\rArtifactProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12:\n\nproperties\x18\x03 \x03(\x0b\x32&.ai_flow.ArtifactProto.PropertiesEntry\x12\x33\n\rartifact_type\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12)\n\x03uri\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0b\x64\x65scription\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\x0b\x63reate_time\x18\x07 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x30\n\x0bupdate_time\x18\x08 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"z\n\x14RegisteredModelParam\x12\x30\n\nmodel_name\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_desc\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xde\x01\n\x11ModelVersionParam\x12\x30\n\nmodel_path\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_type\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cversion_desc\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\rcurrent_stage\x18\x04 \x01(\x0e\x32\x1a.ai_flow.ModelVersionStage\"v\n\x0eModelMetaParam\x12\x30\n\nmodel_name\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\rmodel_version\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\"A\n\x08Response\x12\x13\n\x0breturn_code\x18\x01 \x01(\t\x12\x12\n\nreturn_msg\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\t\"[\n\x13RegisteredModelMeta\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x30\n\nmodel_desc\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"\xd2\x02\n\x10ModelVersionMeta\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x15\n\rmodel_version\x18\x02 \x01(\x05\x12\x30\n\nmodel_path\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_type\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cversion_desc\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x33\n\x0eversion_status\x18\x07 \x01(\x0e\x32\x1b.ai_flow.ModelVersionStatus\x12\x31\n\rcurrent_stage\x18\x08 \x01(\x0e\x32\x1a.ai_flow.ModelVersionStage\x12\x13\n\x0b\x63reate_time\x18\t \x01(\x03\"\x88\x01\n\x15RegisteredModelDetail\x12\x36\n\x10registered_model\x18\x01 \x01(\x0b\x32\x1c.ai_flow.RegisteredModelMeta\x12\x37\n\x14latest_model_version\x18\x02 \x01(\x0b\x32\x19.ai_flow.ModelVersionMeta\"O\n\x14RegisteredModelMetas\x12\x37\n\x11registered_models\x18\x01 \x03(\x0b\x32\x1c.ai_flow.RegisteredModelMeta\"J\n\x0bResultProto\x12$\n\x06status\x18\x01 \x01(\x0e\x32\x14.ai_flow.StatusProto\x12\x15\n\rerror_message\x18\x02 \x01(\t\"\x98\x05\n\x0fMetricMetaProto\x12\x31\n\x0bmetric_name\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x0bmetric_type\x18\x02 \x01(\x0e\x32\x18.ai_flow.MetricTypeProto\x12\x31\n\x0bmetric_desc\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cproject_name\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0c\x64\x61taset_name\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmodel_name\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08job_name\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\nstart_time\x18\x08 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12-\n\x08\x65nd_time\x18\t \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12)\n\x03uri\x18\n \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12*\n\x04tags\x18\x0b \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12<\n\nproperties\x18\x0c \x03(\x0b\x32(.ai_flow.MetricMetaProto.PropertiesEntry\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xde\x02\n\x12MetricSummaryProto\x12\x0c\n\x04uuid\x18\x01 \x01(\x03\x12\x31\n\x0bmetric_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x30\n\nmetric_key\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0cmetric_value\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x35\n\x10metric_timestamp\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x32\n\rmodel_version\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int32Value\x12\x36\n\x10job_execution_id\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue*\xc0\x03\n\nReturnCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x12\n\x0eINTERNAL_ERROR\x10\x01\x12\x1b\n\x17TEMPORARILY_UNAVAILABLE\x10\x02\x12\x0c\n\x08IO_ERROR\x10\x03\x12\x0f\n\x0b\x42\x41\x44_REQUEST\x10\x04\x12\x1c\n\x17INVALID_PARAMETER_VALUE\x10\xe8\x07\x12\x17\n\x12\x45NDPOINT_NOT_FOUND\x10\xe9\x07\x12\x16\n\x11MALFORMED_REQUEST\x10\xea\x07\x12\x12\n\rINVALID_STATE\x10\xeb\x07\x12\x16\n\x11PERMISSION_DENIED\x10\xec\x07\x12\x15\n\x10\x46\x45\x41TURE_DISABLED\x10\xed\x07\x12\x1a\n\x15\x43USTOMER_UNAUTHORIZED\x10\xee\x07\x12\x1b\n\x16REQUEST_LIMIT_EXCEEDED\x10\xef\x07\x12\x1c\n\x17RESOURCE_ALREADY_EXISTS\x10\xd1\x0f\x12\x1c\n\x17RESOURCE_DOES_NOT_EXIST\x10\xd2\x0f\x12\x13\n\x0eQUOTA_EXCEEDED\x10\xb9\x17\x12\x1c\n\x17MAX_BLOCK_SIZE_EXCEEDED\x10\xba\x17\x12\x1b\n\x16MAX_READ_SIZE_EXCEEDED\x10\xbb\x17* \n\x0bStatusProto\x12\x06\n\x02OK\x10\x00\x12\t\n\x05\x45RROR\x10\x01*\xd6\x01\n\rDataTypeProto\x12\x19\n\x15\x44\x41TA_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05INT32\x10\x01\x12\t\n\x05INT64\x10\x02\x12\x0b\n\x07\x46LOAT32\x10\x03\x12\x0b\n\x07\x46LOAT64\x10\x04\x12\n\n\x06STRING\x10\x05\x12\x0e\n\nINT32ARRAY\x10\x06\x12\x0e\n\nINT64ARRAY\x10\x07\x12\x10\n\x0c\x46lOAT32ARRAY\x10\x08\x12\x10\n\x0c\x46LOAT64ARRAY\x10\t\x12\x0f\n\x0bSTRINGARRAY\x10\n\x12\t\n\x05\x42YTES\x10\x0b\x12\x0e\n\nBYTESARRAY\x10\x0c*{\n\nStateProto\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x08\n\x04INIT\x10\x01\x12\x0c\n\x08STARTING\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\x0c\n\x08\x46INISHED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\x0b\n\x07KILLING\x10\x06\x12\n\n\x06KILLED\x10\x07*F\n\rExecutionMode\x12\x1e\n\x1a\x45XECUTION_MODE_UNSPECIFIED\x10\x00\x12\t\n\x05\x42\x41TCH\x10\x01\x12\n\n\x06STREAM\x10\x02*}\n\x12ModelVersionStatus\x12\x18\n\x14PENDING_REGISTRATION\x10\x00\x12\x17\n\x13\x46\x41ILED_REGISTRATION\x10\x01\x12\t\n\x05READY\x10\x03\x12\x14\n\x10PENDING_DELETION\x10\x04\x12\x13\n\x0f\x46\x41ILED_DELETION\x10\x05*\\\n\x11ModelVersionStage\x12\r\n\tGENERATED\x10\x00\x12\r\n\tVALIDATED\x10\x01\x12\x0c\n\x08\x44\x45PLOYED\x10\x02\x12\x0e\n\nDEPRECATED\x10\x03\x12\x0b\n\x07\x44\x45LETED\x10\x04*)\n\x0fMetricTypeProto\x12\x0b\n\x07\x44\x41TASET\x10\x00\x12\t\n\x05MODEL\x10\x01\x42)\n\x17org.aiflow.client.protoZ\x08/ai_flow\x88\x01\x01\x90\x01\x01\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
_RETURNCODE = _descriptor.EnumDescriptor(
name='ReturnCode',
full_name='ai_flow.ReturnCode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INTERNAL_ERROR', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TEMPORARILY_UNAVAILABLE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IO_ERROR', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BAD_REQUEST', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_PARAMETER_VALUE', index=5, number=1000,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENDPOINT_NOT_FOUND', index=6, number=1001,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MALFORMED_REQUEST', index=7, number=1002,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INVALID_STATE', index=8, number=1003,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PERMISSION_DENIED', index=9, number=1004,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FEATURE_DISABLED', index=10, number=1005,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CUSTOMER_UNAUTHORIZED', index=11, number=1006,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REQUEST_LIMIT_EXCEEDED', index=12, number=1007,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESOURCE_ALREADY_EXISTS', index=13, number=2001,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RESOURCE_DOES_NOT_EXIST', index=14, number=2002,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='QUOTA_EXCEEDED', index=15, number=3001,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MAX_BLOCK_SIZE_EXCEEDED', index=16, number=3002,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MAX_READ_SIZE_EXCEEDED', index=17, number=3003,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=6669,
serialized_end=7117,
)
_sym_db.RegisterEnumDescriptor(_RETURNCODE)
ReturnCode = enum_type_wrapper.EnumTypeWrapper(_RETURNCODE)
_STATUSPROTO = _descriptor.EnumDescriptor(
name='StatusProto',
full_name='ai_flow.StatusProto',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ERROR', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7119,
serialized_end=7151,
)
_sym_db.RegisterEnumDescriptor(_STATUSPROTO)
StatusProto = enum_type_wrapper.EnumTypeWrapper(_STATUSPROTO)
_DATATYPEPROTO = _descriptor.EnumDescriptor(
name='DataTypeProto',
full_name='ai_flow.DataTypeProto',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='DATA_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INT32', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INT64', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FLOAT32', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FLOAT64', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STRING', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INT32ARRAY', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INT64ARRAY', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FlOAT32ARRAY', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FLOAT64ARRAY', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STRINGARRAY', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BYTES', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BYTESARRAY', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7154,
serialized_end=7368,
)
_sym_db.RegisterEnumDescriptor(_DATATYPEPROTO)
DataTypeProto = enum_type_wrapper.EnumTypeWrapper(_DATATYPEPROTO)
_STATEPROTO = _descriptor.EnumDescriptor(
name='StateProto',
full_name='ai_flow.StateProto',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='STATE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='INIT', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STARTING', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FINISHED', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='KILLING', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='KILLED', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7370,
serialized_end=7493,
)
_sym_db.RegisterEnumDescriptor(_STATEPROTO)
StateProto = enum_type_wrapper.EnumTypeWrapper(_STATEPROTO)
_EXECUTIONMODE = _descriptor.EnumDescriptor(
name='ExecutionMode',
full_name='ai_flow.ExecutionMode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='EXECUTION_MODE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BATCH', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='STREAM', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7495,
serialized_end=7565,
)
_sym_db.RegisterEnumDescriptor(_EXECUTIONMODE)
ExecutionMode = enum_type_wrapper.EnumTypeWrapper(_EXECUTIONMODE)
_MODELVERSIONSTATUS = _descriptor.EnumDescriptor(
name='ModelVersionStatus',
full_name='ai_flow.ModelVersionStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='PENDING_REGISTRATION', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED_REGISTRATION', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='READY', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PENDING_DELETION', index=3, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='FAILED_DELETION', index=4, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7567,
serialized_end=7692,
)
_sym_db.RegisterEnumDescriptor(_MODELVERSIONSTATUS)
ModelVersionStatus = enum_type_wrapper.EnumTypeWrapper(_MODELVERSIONSTATUS)
_MODELVERSIONSTAGE = _descriptor.EnumDescriptor(
name='ModelVersionStage',
full_name='ai_flow.ModelVersionStage',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='GENERATED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='VALIDATED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DEPLOYED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DEPRECATED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DELETED', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7694,
serialized_end=7786,
)
_sym_db.RegisterEnumDescriptor(_MODELVERSIONSTAGE)
ModelVersionStage = enum_type_wrapper.EnumTypeWrapper(_MODELVERSIONSTAGE)
_METRICTYPEPROTO = _descriptor.EnumDescriptor(
name='MetricTypeProto',
full_name='ai_flow.MetricTypeProto',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='DATASET', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MODEL', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=7788,
serialized_end=7829,
)
_sym_db.RegisterEnumDescriptor(_METRICTYPEPROTO)
MetricTypeProto = enum_type_wrapper.EnumTypeWrapper(_METRICTYPEPROTO)
SUCCESS = 0
INTERNAL_ERROR = 1
TEMPORARILY_UNAVAILABLE = 2
IO_ERROR = 3
BAD_REQUEST = 4
INVALID_PARAMETER_VALUE = 1000
ENDPOINT_NOT_FOUND = 1001
MALFORMED_REQUEST = 1002
INVALID_STATE = 1003
PERMISSION_DENIED = 1004
FEATURE_DISABLED = 1005
CUSTOMER_UNAUTHORIZED = 1006
REQUEST_LIMIT_EXCEEDED = 1007
RESOURCE_ALREADY_EXISTS = 2001
RESOURCE_DOES_NOT_EXIST = 2002
QUOTA_EXCEEDED = 3001
MAX_BLOCK_SIZE_EXCEEDED = 3002
MAX_READ_SIZE_EXCEEDED = 3003
OK = 0
ERROR = 1
DATA_TYPE_UNSPECIFIED = 0
INT32 = 1
INT64 = 2
FLOAT32 = 3
FLOAT64 = 4
STRING = 5
INT32ARRAY = 6
INT64ARRAY = 7
FlOAT32ARRAY = 8
FLOAT64ARRAY = 9
STRINGARRAY = 10
BYTES = 11
BYTESARRAY = 12
STATE_UNSPECIFIED = 0
INIT = 1
STARTING = 2
RUNNING = 3
FINISHED = 4
FAILED = 5
KILLING = 6
KILLED = 7
EXECUTION_MODE_UNSPECIFIED = 0
BATCH = 1
STREAM = 2
PENDING_REGISTRATION = 0
FAILED_REGISTRATION = 1
READY = 3
PENDING_DELETION = 4
FAILED_DELETION = 5
GENERATED = 0
VALIDATED = 1
DEPLOYED = 2
DEPRECATED = 3
DELETED = 4
DATASET = 0
MODEL = 1
_SCHEMAPROTO = _descriptor.Descriptor(
name='SchemaProto',
full_name='ai_flow.SchemaProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name_list', full_name='ai_flow.SchemaProto.name_list', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type_list', full_name='ai_flow.SchemaProto.type_list', index=1,
number=2, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=133,
)
_DATASETPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.DatasetProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.DatasetProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.DatasetProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_DATASETPROTO = _descriptor.Descriptor(
name='DatasetProto',
full_name='ai_flow.DatasetProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.DatasetProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.DatasetProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.DatasetProto.properties', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schema', full_name='ai_flow.DatasetProto.schema', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data_format', full_name='ai_flow.DatasetProto.data_format', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='ai_flow.DatasetProto.uri', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='ai_flow.DatasetProto.description', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create_time', full_name='ai_flow.DatasetProto.create_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_time', full_name='ai_flow.DatasetProto.update_time', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_name', full_name='ai_flow.DatasetProto.catalog_name', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_type', full_name='ai_flow.DatasetProto.catalog_type', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_database', full_name='ai_flow.DatasetProto.catalog_database', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_connection_uri', full_name='ai_flow.DatasetProto.catalog_connection_uri', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='catalog_table', full_name='ai_flow.DatasetProto.catalog_table', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_DATASETPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=846,
)
_MODELRELATIONPROTO = _descriptor.Descriptor(
name='ModelRelationProto',
full_name='ai_flow.ModelRelationProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.ModelRelationProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.ModelRelationProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='ai_flow.ModelRelationProto.project_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=848,
serialized_end=945,
)
_MODELPROTO = _descriptor.Descriptor(
name='ModelProto',
full_name='ai_flow.ModelProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.ModelProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.ModelProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='ai_flow.ModelProto.project_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_desc', full_name='ai_flow.ModelProto.model_desc', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=948,
serialized_end=1087,
)
_MODELVERSIONRELATIONPROTO = _descriptor.Descriptor(
name='ModelVersionRelationProto',
full_name='ai_flow.ModelVersionRelationProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='ai_flow.ModelVersionRelationProto.version', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_id', full_name='ai_flow.ModelVersionRelationProto.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_snapshot_id', full_name='ai_flow.ModelVersionRelationProto.project_snapshot_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1090,
serialized_end=1268,
)
_MODELVERSIONPROTO = _descriptor.Descriptor(
name='ModelVersionProto',
full_name='ai_flow.ModelVersionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='ai_flow.ModelVersionProto.version', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_id', full_name='ai_flow.ModelVersionProto.model_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_snapshot_id', full_name='ai_flow.ModelVersionProto.project_snapshot_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_path', full_name='ai_flow.ModelVersionProto.model_path', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_type', full_name='ai_flow.ModelVersionProto.model_type', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version_desc', full_name='ai_flow.ModelVersionProto.version_desc', index=5,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='current_stage', full_name='ai_flow.ModelVersionProto.current_stage', index=6,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create_time', full_name='ai_flow.ModelVersionProto.create_time', index=7,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1271,
serialized_end=1695,
)
_JOBPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.JobProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.JobProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.JobProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_JOBPROTO = _descriptor.Descriptor(
name='JobProto',
full_name='ai_flow.JobProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.JobProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.JobProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow_execution_id', full_name='ai_flow.JobProto.workflow_execution_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='job_state', full_name='ai_flow.JobProto.job_state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.JobProto.properties', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='job_id', full_name='ai_flow.JobProto.job_id', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_time', full_name='ai_flow.JobProto.start_time', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_time', full_name='ai_flow.JobProto.end_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_uri', full_name='ai_flow.JobProto.log_uri', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='ai_flow.JobProto.signature', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow_execution', full_name='ai_flow.JobProto.workflow_execution', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='execution_label', full_name='ai_flow.JobProto.execution_label', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_JOBPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1698,
serialized_end=2296,
)
_WORKFLOWPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.WorkflowProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.WorkflowProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.WorkflowProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_WORKFLOWPROTO = _descriptor.Descriptor(
name='WorkflowProto',
full_name='ai_flow.WorkflowProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.WorkflowProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.WorkflowProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow_json', full_name='ai_flow.WorkflowProto.workflow_json', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.WorkflowProto.properties', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='ai_flow.WorkflowProto.namespace', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_WORKFLOWPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2299,
serialized_end=2525,
)
_WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.WorkflowExecutionProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.WorkflowExecutionProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.WorkflowExecutionProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_WORKFLOWEXECUTIONPROTO = _descriptor.Descriptor(
name='WorkflowExecutionProto',
full_name='ai_flow.WorkflowExecutionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.WorkflowExecutionProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.WorkflowExecutionProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='ai_flow.WorkflowExecutionProto.project_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='execution_state', full_name='ai_flow.WorkflowExecutionProto.execution_state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.WorkflowExecutionProto.properties', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_time', full_name='ai_flow.WorkflowExecutionProto.start_time', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_time', full_name='ai_flow.WorkflowExecutionProto.end_time', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='log_uri', full_name='ai_flow.WorkflowExecutionProto.log_uri', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow_json', full_name='ai_flow.WorkflowExecutionProto.workflow_json', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='ai_flow.WorkflowExecutionProto.signature', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='execution_id', full_name='ai_flow.WorkflowExecutionProto.execution_id', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow', full_name='ai_flow.WorkflowExecutionProto.workflow', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='context', full_name='ai_flow.WorkflowExecutionProto.context', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2528,
serialized_end=3151,
)
_PROJECTPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.ProjectProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.ProjectProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.ProjectProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_PROJECTPROTO = _descriptor.Descriptor(
name='ProjectProto',
full_name='ai_flow.ProjectProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.ProjectProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.ProjectProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.ProjectProto.properties', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='ai_flow.ProjectProto.uri', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_PROJECTPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3154,
serialized_end=3349,
)
_WORKFLOWMETAPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.WorkflowMetaProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.WorkflowMetaProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.WorkflowMetaProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_WORKFLOWMETAPROTO = _descriptor.Descriptor(
name='WorkflowMetaProto',
full_name='ai_flow.WorkflowMetaProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.WorkflowMetaProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.WorkflowMetaProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_id', full_name='ai_flow.WorkflowMetaProto.project_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.WorkflowMetaProto.properties', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create_time', full_name='ai_flow.WorkflowMetaProto.create_time', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_time', full_name='ai_flow.WorkflowMetaProto.update_time', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='context_extractor_in_bytes', full_name='ai_flow.WorkflowMetaProto.context_extractor_in_bytes', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='graph', full_name='ai_flow.WorkflowMetaProto.graph', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_WORKFLOWMETAPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3352,
serialized_end=3744,
)
_WORKFLOWSNAPSHOTPROTO = _descriptor.Descriptor(
name='WorkflowSnapshotProto',
full_name='ai_flow.WorkflowSnapshotProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.WorkflowSnapshotProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow_id', full_name='ai_flow.WorkflowSnapshotProto.workflow_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='ai_flow.WorkflowSnapshotProto.uri', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='ai_flow.WorkflowSnapshotProto.signature', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create_time', full_name='ai_flow.WorkflowSnapshotProto.create_time', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3747,
serialized_end=3976,
)
_ARTIFACTPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.ArtifactProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.ArtifactProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.ArtifactProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_ARTIFACTPROTO = _descriptor.Descriptor(
name='ArtifactProto',
full_name='ai_flow.ArtifactProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.ArtifactProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='ai_flow.ArtifactProto.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.ArtifactProto.properties', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='artifact_type', full_name='ai_flow.ArtifactProto.artifact_type', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='ai_flow.ArtifactProto.uri', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='ai_flow.ArtifactProto.description', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create_time', full_name='ai_flow.ArtifactProto.create_time', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='update_time', full_name='ai_flow.ArtifactProto.update_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_ARTIFACTPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3979,
serialized_end=4380,
)
_REGISTEREDMODELPARAM = _descriptor.Descriptor(
name='RegisteredModelParam',
full_name='ai_flow.RegisteredModelParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.RegisteredModelParam.model_name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_desc', full_name='ai_flow.RegisteredModelParam.model_desc', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4382,
serialized_end=4504,
)
_MODELVERSIONPARAM = _descriptor.Descriptor(
name='ModelVersionParam',
full_name='ai_flow.ModelVersionParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_path', full_name='ai_flow.ModelVersionParam.model_path', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_type', full_name='ai_flow.ModelVersionParam.model_type', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version_desc', full_name='ai_flow.ModelVersionParam.version_desc', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='current_stage', full_name='ai_flow.ModelVersionParam.current_stage', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4507,
serialized_end=4729,
)
_MODELMETAPARAM = _descriptor.Descriptor(
name='ModelMetaParam',
full_name='ai_flow.ModelMetaParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.ModelMetaParam.model_name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_version', full_name='ai_flow.ModelMetaParam.model_version', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4731,
serialized_end=4849,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='ai_flow.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='return_code', full_name='ai_flow.Response.return_code', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='return_msg', full_name='ai_flow.Response.return_msg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='ai_flow.Response.data', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4851,
serialized_end=4916,
)
_REGISTEREDMODELMETA = _descriptor.Descriptor(
name='RegisteredModelMeta',
full_name='ai_flow.RegisteredModelMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.RegisteredModelMeta.model_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_desc', full_name='ai_flow.RegisteredModelMeta.model_desc', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4918,
serialized_end=5009,
)
_MODELVERSIONMETA = _descriptor.Descriptor(
name='ModelVersionMeta',
full_name='ai_flow.ModelVersionMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.ModelVersionMeta.model_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_version', full_name='ai_flow.ModelVersionMeta.model_version', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_path', full_name='ai_flow.ModelVersionMeta.model_path', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_type', full_name='ai_flow.ModelVersionMeta.model_type', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version_desc', full_name='ai_flow.ModelVersionMeta.version_desc', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version_status', full_name='ai_flow.ModelVersionMeta.version_status', index=5,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='current_stage', full_name='ai_flow.ModelVersionMeta.current_stage', index=6,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='create_time', full_name='ai_flow.ModelVersionMeta.create_time', index=7,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5012,
serialized_end=5350,
)
_REGISTEREDMODELDETAIL = _descriptor.Descriptor(
name='RegisteredModelDetail',
full_name='ai_flow.RegisteredModelDetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='registered_model', full_name='ai_flow.RegisteredModelDetail.registered_model', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='latest_model_version', full_name='ai_flow.RegisteredModelDetail.latest_model_version', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5353,
serialized_end=5489,
)
_REGISTEREDMODELMETAS = _descriptor.Descriptor(
name='RegisteredModelMetas',
full_name='ai_flow.RegisteredModelMetas',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='registered_models', full_name='ai_flow.RegisteredModelMetas.registered_models', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5491,
serialized_end=5570,
)
_RESULTPROTO = _descriptor.Descriptor(
name='ResultProto',
full_name='ai_flow.ResultProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ai_flow.ResultProto.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error_message', full_name='ai_flow.ResultProto.error_message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5572,
serialized_end=5646,
)
_METRICMETAPROTO_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='ai_flow.MetricMetaProto.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ai_flow.MetricMetaProto.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='ai_flow.MetricMetaProto.PropertiesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=846,
)
_METRICMETAPROTO = _descriptor.Descriptor(
name='MetricMetaProto',
full_name='ai_flow.MetricMetaProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='metric_name', full_name='ai_flow.MetricMetaProto.metric_name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_type', full_name='ai_flow.MetricMetaProto.metric_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_desc', full_name='ai_flow.MetricMetaProto.metric_desc', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='project_name', full_name='ai_flow.MetricMetaProto.project_name', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dataset_name', full_name='ai_flow.MetricMetaProto.dataset_name', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_name', full_name='ai_flow.MetricMetaProto.model_name', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='job_name', full_name='ai_flow.MetricMetaProto.job_name', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_time', full_name='ai_flow.MetricMetaProto.start_time', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='end_time', full_name='ai_flow.MetricMetaProto.end_time', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='ai_flow.MetricMetaProto.uri', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='ai_flow.MetricMetaProto.tags', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='ai_flow.MetricMetaProto.properties', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_METRICMETAPROTO_PROPERTIESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5649,
serialized_end=6313,
)
_METRICSUMMARYPROTO = _descriptor.Descriptor(
name='MetricSummaryProto',
full_name='ai_flow.MetricSummaryProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='ai_flow.MetricSummaryProto.uuid', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_name', full_name='ai_flow.MetricSummaryProto.metric_name', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_key', full_name='ai_flow.MetricSummaryProto.metric_key', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_value', full_name='ai_flow.MetricSummaryProto.metric_value', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metric_timestamp', full_name='ai_flow.MetricSummaryProto.metric_timestamp', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='model_version', full_name='ai_flow.MetricSummaryProto.model_version', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='job_execution_id', full_name='ai_flow.MetricSummaryProto.job_execution_id', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6316,
serialized_end=6666,
)
_SCHEMAPROTO.fields_by_name['type_list'].enum_type = _DATATYPEPROTO
_DATASETPROTO_PROPERTIESENTRY.containing_type = _DATASETPROTO
_DATASETPROTO.fields_by_name['properties'].message_type = _DATASETPROTO_PROPERTIESENTRY
_DATASETPROTO.fields_by_name['schema'].message_type = _SCHEMAPROTO
_DATASETPROTO.fields_by_name['data_format'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['description'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_DATASETPROTO.fields_by_name['update_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_DATASETPROTO.fields_by_name['catalog_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['catalog_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['catalog_database'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['catalog_connection_uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DATASETPROTO.fields_by_name['catalog_table'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELRELATIONPROTO.fields_by_name['project_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELPROTO.fields_by_name['project_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELPROTO.fields_by_name['model_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONRELATIONPROTO.fields_by_name['version'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_MODELVERSIONRELATIONPROTO.fields_by_name['model_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELVERSIONRELATIONPROTO.fields_by_name['project_snapshot_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELVERSIONPROTO.fields_by_name['version'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_MODELVERSIONPROTO.fields_by_name['model_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELVERSIONPROTO.fields_by_name['project_snapshot_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_MODELVERSIONPROTO.fields_by_name['model_path'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPROTO.fields_by_name['model_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPROTO.fields_by_name['version_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPROTO.fields_by_name['current_stage'].enum_type = _MODELVERSIONSTAGE
_MODELVERSIONPROTO.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._DOUBLEVALUE
_JOBPROTO_PROPERTIESENTRY.containing_type = _JOBPROTO
_JOBPROTO.fields_by_name['workflow_execution_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_JOBPROTO.fields_by_name['job_state'].enum_type = _STATEPROTO
_JOBPROTO.fields_by_name['properties'].message_type = _JOBPROTO_PROPERTIESENTRY
_JOBPROTO.fields_by_name['job_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_JOBPROTO.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_JOBPROTO.fields_by_name['end_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_JOBPROTO.fields_by_name['log_uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_JOBPROTO.fields_by_name['signature'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_JOBPROTO.fields_by_name['workflow_execution'].message_type = _WORKFLOWEXECUTIONPROTO
_JOBPROTO.fields_by_name['execution_label'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWPROTO_PROPERTIESENTRY.containing_type = _WORKFLOWPROTO
_WORKFLOWPROTO.fields_by_name['workflow_json'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWPROTO.fields_by_name['properties'].message_type = _WORKFLOWPROTO_PROPERTIESENTRY
_WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY.containing_type = _WORKFLOWEXECUTIONPROTO
_WORKFLOWEXECUTIONPROTO.fields_by_name['project_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['execution_state'].enum_type = _STATEPROTO
_WORKFLOWEXECUTIONPROTO.fields_by_name['properties'].message_type = _WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY
_WORKFLOWEXECUTIONPROTO.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['end_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['log_uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['workflow_json'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['signature'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWEXECUTIONPROTO.fields_by_name['workflow'].message_type = _WORKFLOWPROTO
_WORKFLOWEXECUTIONPROTO.fields_by_name['context'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_PROJECTPROTO_PROPERTIESENTRY.containing_type = _PROJECTPROTO
_PROJECTPROTO.fields_by_name['properties'].message_type = _PROJECTPROTO_PROPERTIESENTRY
_PROJECTPROTO.fields_by_name['uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWMETAPROTO_PROPERTIESENTRY.containing_type = _WORKFLOWMETAPROTO
_WORKFLOWMETAPROTO.fields_by_name['project_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWMETAPROTO.fields_by_name['properties'].message_type = _WORKFLOWMETAPROTO_PROPERTIESENTRY
_WORKFLOWMETAPROTO.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWMETAPROTO.fields_by_name['update_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWMETAPROTO.fields_by_name['graph'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWSNAPSHOTPROTO.fields_by_name['workflow_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_WORKFLOWSNAPSHOTPROTO.fields_by_name['uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWSNAPSHOTPROTO.fields_by_name['signature'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_WORKFLOWSNAPSHOTPROTO.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ARTIFACTPROTO_PROPERTIESENTRY.containing_type = _ARTIFACTPROTO
_ARTIFACTPROTO.fields_by_name['properties'].message_type = _ARTIFACTPROTO_PROPERTIESENTRY
_ARTIFACTPROTO.fields_by_name['artifact_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ARTIFACTPROTO.fields_by_name['uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ARTIFACTPROTO.fields_by_name['description'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ARTIFACTPROTO.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_ARTIFACTPROTO.fields_by_name['update_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_REGISTEREDMODELPARAM.fields_by_name['model_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_REGISTEREDMODELPARAM.fields_by_name['model_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPARAM.fields_by_name['model_path'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPARAM.fields_by_name['model_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPARAM.fields_by_name['version_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONPARAM.fields_by_name['current_stage'].enum_type = _MODELVERSIONSTAGE
_MODELMETAPARAM.fields_by_name['model_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELMETAPARAM.fields_by_name['model_version'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_REGISTEREDMODELMETA.fields_by_name['model_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONMETA.fields_by_name['model_path'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONMETA.fields_by_name['model_type'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONMETA.fields_by_name['version_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_MODELVERSIONMETA.fields_by_name['version_status'].enum_type = _MODELVERSIONSTATUS
_MODELVERSIONMETA.fields_by_name['current_stage'].enum_type = _MODELVERSIONSTAGE
_REGISTEREDMODELDETAIL.fields_by_name['registered_model'].message_type = _REGISTEREDMODELMETA
_REGISTEREDMODELDETAIL.fields_by_name['latest_model_version'].message_type = _MODELVERSIONMETA
_REGISTEREDMODELMETAS.fields_by_name['registered_models'].message_type = _REGISTEREDMODELMETA
_RESULTPROTO.fields_by_name['status'].enum_type = _STATUSPROTO
_METRICMETAPROTO_PROPERTIESENTRY.containing_type = _METRICMETAPROTO
_METRICMETAPROTO.fields_by_name['metric_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['metric_type'].enum_type = _METRICTYPEPROTO
_METRICMETAPROTO.fields_by_name['metric_desc'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['project_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['dataset_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['model_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['job_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['start_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_METRICMETAPROTO.fields_by_name['end_time'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_METRICMETAPROTO.fields_by_name['uri'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['tags'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICMETAPROTO.fields_by_name['properties'].message_type = _METRICMETAPROTO_PROPERTIESENTRY
_METRICSUMMARYPROTO.fields_by_name['metric_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICSUMMARYPROTO.fields_by_name['metric_key'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICSUMMARYPROTO.fields_by_name['metric_value'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_METRICSUMMARYPROTO.fields_by_name['metric_timestamp'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_METRICSUMMARYPROTO.fields_by_name['model_version'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE
_METRICSUMMARYPROTO.fields_by_name['job_execution_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
DESCRIPTOR.message_types_by_name['SchemaProto'] = _SCHEMAPROTO
DESCRIPTOR.message_types_by_name['DatasetProto'] = _DATASETPROTO
DESCRIPTOR.message_types_by_name['ModelRelationProto'] = _MODELRELATIONPROTO
DESCRIPTOR.message_types_by_name['ModelProto'] = _MODELPROTO
DESCRIPTOR.message_types_by_name['ModelVersionRelationProto'] = _MODELVERSIONRELATIONPROTO
DESCRIPTOR.message_types_by_name['ModelVersionProto'] = _MODELVERSIONPROTO
DESCRIPTOR.message_types_by_name['JobProto'] = _JOBPROTO
DESCRIPTOR.message_types_by_name['WorkflowProto'] = _WORKFLOWPROTO
DESCRIPTOR.message_types_by_name['WorkflowExecutionProto'] = _WORKFLOWEXECUTIONPROTO
DESCRIPTOR.message_types_by_name['ProjectProto'] = _PROJECTPROTO
DESCRIPTOR.message_types_by_name['WorkflowMetaProto'] = _WORKFLOWMETAPROTO
DESCRIPTOR.message_types_by_name['WorkflowSnapshotProto'] = _WORKFLOWSNAPSHOTPROTO
DESCRIPTOR.message_types_by_name['ArtifactProto'] = _ARTIFACTPROTO
DESCRIPTOR.message_types_by_name['RegisteredModelParam'] = _REGISTEREDMODELPARAM
DESCRIPTOR.message_types_by_name['ModelVersionParam'] = _MODELVERSIONPARAM
DESCRIPTOR.message_types_by_name['ModelMetaParam'] = _MODELMETAPARAM
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
DESCRIPTOR.message_types_by_name['RegisteredModelMeta'] = _REGISTEREDMODELMETA
DESCRIPTOR.message_types_by_name['ModelVersionMeta'] = _MODELVERSIONMETA
DESCRIPTOR.message_types_by_name['RegisteredModelDetail'] = _REGISTEREDMODELDETAIL
DESCRIPTOR.message_types_by_name['RegisteredModelMetas'] = _REGISTEREDMODELMETAS
DESCRIPTOR.message_types_by_name['ResultProto'] = _RESULTPROTO
DESCRIPTOR.message_types_by_name['MetricMetaProto'] = _METRICMETAPROTO
DESCRIPTOR.message_types_by_name['MetricSummaryProto'] = _METRICSUMMARYPROTO
DESCRIPTOR.enum_types_by_name['ReturnCode'] = _RETURNCODE
DESCRIPTOR.enum_types_by_name['StatusProto'] = _STATUSPROTO
DESCRIPTOR.enum_types_by_name['DataTypeProto'] = _DATATYPEPROTO
DESCRIPTOR.enum_types_by_name['StateProto'] = _STATEPROTO
DESCRIPTOR.enum_types_by_name['ExecutionMode'] = _EXECUTIONMODE
DESCRIPTOR.enum_types_by_name['ModelVersionStatus'] = _MODELVERSIONSTATUS
DESCRIPTOR.enum_types_by_name['ModelVersionStage'] = _MODELVERSIONSTAGE
DESCRIPTOR.enum_types_by_name['MetricTypeProto'] = _METRICTYPEPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SchemaProto = _reflection.GeneratedProtocolMessageType('SchemaProto', (_message.Message,), {
'DESCRIPTOR' : _SCHEMAPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.SchemaProto)
})
_sym_db.RegisterMessage(SchemaProto)
DatasetProto = _reflection.GeneratedProtocolMessageType('DatasetProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _DATASETPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.DatasetProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _DATASETPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.DatasetProto)
})
_sym_db.RegisterMessage(DatasetProto)
_sym_db.RegisterMessage(DatasetProto.PropertiesEntry)
ModelRelationProto = _reflection.GeneratedProtocolMessageType('ModelRelationProto', (_message.Message,), {
'DESCRIPTOR' : _MODELRELATIONPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelRelationProto)
})
_sym_db.RegisterMessage(ModelRelationProto)
ModelProto = _reflection.GeneratedProtocolMessageType('ModelProto', (_message.Message,), {
'DESCRIPTOR' : _MODELPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelProto)
})
_sym_db.RegisterMessage(ModelProto)
ModelVersionRelationProto = _reflection.GeneratedProtocolMessageType('ModelVersionRelationProto', (_message.Message,), {
'DESCRIPTOR' : _MODELVERSIONRELATIONPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelVersionRelationProto)
})
_sym_db.RegisterMessage(ModelVersionRelationProto)
ModelVersionProto = _reflection.GeneratedProtocolMessageType('ModelVersionProto', (_message.Message,), {
'DESCRIPTOR' : _MODELVERSIONPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelVersionProto)
})
_sym_db.RegisterMessage(ModelVersionProto)
JobProto = _reflection.GeneratedProtocolMessageType('JobProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _JOBPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.JobProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _JOBPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.JobProto)
})
_sym_db.RegisterMessage(JobProto)
_sym_db.RegisterMessage(JobProto.PropertiesEntry)
WorkflowProto = _reflection.GeneratedProtocolMessageType('WorkflowProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _WORKFLOWPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _WORKFLOWPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowProto)
})
_sym_db.RegisterMessage(WorkflowProto)
_sym_db.RegisterMessage(WorkflowProto.PropertiesEntry)
WorkflowExecutionProto = _reflection.GeneratedProtocolMessageType('WorkflowExecutionProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowExecutionProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _WORKFLOWEXECUTIONPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowExecutionProto)
})
_sym_db.RegisterMessage(WorkflowExecutionProto)
_sym_db.RegisterMessage(WorkflowExecutionProto.PropertiesEntry)
ProjectProto = _reflection.GeneratedProtocolMessageType('ProjectProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _PROJECTPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ProjectProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _PROJECTPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ProjectProto)
})
_sym_db.RegisterMessage(ProjectProto)
_sym_db.RegisterMessage(ProjectProto.PropertiesEntry)
WorkflowMetaProto = _reflection.GeneratedProtocolMessageType('WorkflowMetaProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _WORKFLOWMETAPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowMetaProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _WORKFLOWMETAPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowMetaProto)
})
_sym_db.RegisterMessage(WorkflowMetaProto)
_sym_db.RegisterMessage(WorkflowMetaProto.PropertiesEntry)
WorkflowSnapshotProto = _reflection.GeneratedProtocolMessageType('WorkflowSnapshotProto', (_message.Message,), {
'DESCRIPTOR' : _WORKFLOWSNAPSHOTPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.WorkflowSnapshotProto)
})
_sym_db.RegisterMessage(WorkflowSnapshotProto)
ArtifactProto = _reflection.GeneratedProtocolMessageType('ArtifactProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _ARTIFACTPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ArtifactProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _ARTIFACTPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ArtifactProto)
})
_sym_db.RegisterMessage(ArtifactProto)
_sym_db.RegisterMessage(ArtifactProto.PropertiesEntry)
RegisteredModelParam = _reflection.GeneratedProtocolMessageType('RegisteredModelParam', (_message.Message,), {
'DESCRIPTOR' : _REGISTEREDMODELPARAM,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.RegisteredModelParam)
})
_sym_db.RegisterMessage(RegisteredModelParam)
ModelVersionParam = _reflection.GeneratedProtocolMessageType('ModelVersionParam', (_message.Message,), {
'DESCRIPTOR' : _MODELVERSIONPARAM,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelVersionParam)
})
_sym_db.RegisterMessage(ModelVersionParam)
ModelMetaParam = _reflection.GeneratedProtocolMessageType('ModelMetaParam', (_message.Message,), {
'DESCRIPTOR' : _MODELMETAPARAM,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelMetaParam)
})
_sym_db.RegisterMessage(ModelMetaParam)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _RESPONSE,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.Response)
})
_sym_db.RegisterMessage(Response)
RegisteredModelMeta = _reflection.GeneratedProtocolMessageType('RegisteredModelMeta', (_message.Message,), {
'DESCRIPTOR' : _REGISTEREDMODELMETA,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.RegisteredModelMeta)
})
_sym_db.RegisterMessage(RegisteredModelMeta)
ModelVersionMeta = _reflection.GeneratedProtocolMessageType('ModelVersionMeta', (_message.Message,), {
'DESCRIPTOR' : _MODELVERSIONMETA,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ModelVersionMeta)
})
_sym_db.RegisterMessage(ModelVersionMeta)
RegisteredModelDetail = _reflection.GeneratedProtocolMessageType('RegisteredModelDetail', (_message.Message,), {
'DESCRIPTOR' : _REGISTEREDMODELDETAIL,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.RegisteredModelDetail)
})
_sym_db.RegisterMessage(RegisteredModelDetail)
RegisteredModelMetas = _reflection.GeneratedProtocolMessageType('RegisteredModelMetas', (_message.Message,), {
'DESCRIPTOR' : _REGISTEREDMODELMETAS,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.RegisteredModelMetas)
})
_sym_db.RegisterMessage(RegisteredModelMetas)
ResultProto = _reflection.GeneratedProtocolMessageType('ResultProto', (_message.Message,), {
'DESCRIPTOR' : _RESULTPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.ResultProto)
})
_sym_db.RegisterMessage(ResultProto)
MetricMetaProto = _reflection.GeneratedProtocolMessageType('MetricMetaProto', (_message.Message,), {
'PropertiesEntry' : _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), {
'DESCRIPTOR' : _METRICMETAPROTO_PROPERTIESENTRY,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.MetricMetaProto.PropertiesEntry)
})
,
'DESCRIPTOR' : _METRICMETAPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.MetricMetaProto)
})
_sym_db.RegisterMessage(MetricMetaProto)
_sym_db.RegisterMessage(MetricMetaProto.PropertiesEntry)
MetricSummaryProto = _reflection.GeneratedProtocolMessageType('MetricSummaryProto', (_message.Message,), {
'DESCRIPTOR' : _METRICSUMMARYPROTO,
'__module__' : 'message_pb2'
# @@protoc_insertion_point(class_scope:ai_flow.MetricSummaryProto)
})
_sym_db.RegisterMessage(MetricSummaryProto)
DESCRIPTOR._options = None
_DATASETPROTO_PROPERTIESENTRY._options = None
_JOBPROTO_PROPERTIESENTRY._options = None
_WORKFLOWPROTO_PROPERTIESENTRY._options = None
_WORKFLOWEXECUTIONPROTO_PROPERTIESENTRY._options = None
_PROJECTPROTO_PROPERTIESENTRY._options = None
_WORKFLOWMETAPROTO_PROPERTIESENTRY._options = None
_ARTIFACTPROTO_PROPERTIESENTRY._options = None
_METRICMETAPROTO_PROPERTIESENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 47.805839 | 12,989 | 0.769887 |
ba08393a82b8c3f297e9c51f13c5fa53c10a7ff8
| 286 |
py
|
Python
|
InternetOfThings101/hello.py
|
Guanatos/theiotlearninginitiative
|
2ea81180f0911b5915ca1ba9af695d7c1330a349
|
[
"Apache-2.0"
] | null | null | null |
InternetOfThings101/hello.py
|
Guanatos/theiotlearninginitiative
|
2ea81180f0911b5915ca1ba9af695d7c1330a349
|
[
"Apache-2.0"
] | null | null | null |
InternetOfThings101/hello.py
|
Guanatos/theiotlearninginitiative
|
2ea81180f0911b5915ca1ba9af695d7c1330a349
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
my_str = "Hello World!!!"
split = my_str.split()
print(split)
A = list(range(10))
print(A)
B = [11,12,13,14]
print(B)
A.append(12)
print("Append to A",A)
age = 55
if age == 35:
print("Age is equal to 35!")
for x in range(3,16):
if x % 2 == 0:
print(x)
| 16.823529 | 31 | 0.594406 |
90798118444d755b3011d6760d284aecf1c6a3ac
| 286 |
py
|
Python
|
app/constants/letter_type.py
|
joelbcastillo/NYCOpenRecords
|
001dfa21cc33d75a9067cae78752c5ba5734718b
|
[
"Apache-2.0"
] | 37 |
2016-01-21T18:33:56.000Z
|
2021-10-24T01:43:20.000Z
|
app/constants/letter_type.py
|
CityOfNewYork/NYCOpenRecords
|
476a236a573e6f3a2f96c6537a30ee27b2bd3a2b
|
[
"Apache-2.0"
] | 179 |
2016-01-21T21:33:31.000Z
|
2022-02-15T21:31:35.000Z
|
app/constants/letter_type.py
|
joelbcastillo/NYCOpenRecords
|
001dfa21cc33d75a9067cae78752c5ba5734718b
|
[
"Apache-2.0"
] | 13 |
2017-05-19T17:27:31.000Z
|
2020-07-05T00:55:29.000Z
|
from app.constants import (
determination_type,
response_type
)
letter_type = [
determination_type.ACKNOWLEDGMENT,
determination_type.EXTENSION,
determination_type.CLOSING,
determination_type.DENIAL,
determination_type.REOPENING,
response_type.LETTER
]
| 20.428571 | 38 | 0.762238 |
accfdd21b9fc322814e78dd12f667976516bc06e
| 16,865 |
py
|
Python
|
YOLO/.history/pytorch-yolo-v3/video_demo_v1_20201106020535.py
|
jphacks/D_2003
|
60a5684d549862e85bdf758069518702d9925a48
|
[
"MIT"
] | 1 |
2020-11-07T07:58:13.000Z
|
2020-11-07T07:58:13.000Z
|
YOLO/.history/pytorch-yolo-v3/video_demo_v1_20201106020535.py
|
jphacks/D_2003
|
60a5684d549862e85bdf758069518702d9925a48
|
[
"MIT"
] | null | null | null |
YOLO/.history/pytorch-yolo-v3/video_demo_v1_20201106020535.py
|
jphacks/D_2003
|
60a5684d549862e85bdf758069518702d9925a48
|
[
"MIT"
] | 4 |
2020-11-02T02:51:45.000Z
|
2020-11-07T02:54:47.000Z
|
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
from darknet import Darknet
from preprocess import prep_image, inp_to_image
import pandas as pd
import random
import argparse
import pickle as pkl
import requests
from requests.auth import HTTPDigestAuth
import io
from PIL import Image, ImageDraw, ImageFilter
import play
import csv
import itertools
with open('csv/Lidar.csv', 'r', encoding="utf-8_sig", newline = '') as f:
l = csv.reader(f)
LiDAR = [row for row in l]
# for row in LiDAR:
# print(row)
print("LiDAR_len", len(LiDAR))
def prep_image(img, inp_dim):
# CNNに通すために画像を加工する
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def count(x, img, count):
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
print("label:\n", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
return count
def write(x, img,camId):
global count
global point
p = [0,0]
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
print(camId, "_c0:",c1)
print(camId, "_c1:",c2)
label = "{0}".format(classes[cls])
print("label:", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
p[0] = (c2[0]+c1[0])/2
p[1] = (c2[1]+c1[1])/2
point[camId].append(p)
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
# モジュールの引数を作成
parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.25)
# confidenceは信頼性
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
# nms_threshは閾値
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "160", type = str)
# resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。
return parser.parse_args() # 引数を解析し、返す
def cvpaste(img, imgback, x, y, angle, scale):
# x and y are the distance from the center of the background image
r = img.shape[0]
c = img.shape[1]
rb = imgback.shape[0]
cb = imgback.shape[1]
hrb=round(rb/2)
hcb=round(cb/2)
hr=round(r/2)
hc=round(c/2)
# Copy the forward image and move to the center of the background image
imgrot = np.zeros((rb,cb,3),np.uint8)
imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]
# Rotation and scaling
M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Translation
M = np.float32([[1,0,x],[0,1,y]])
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Makeing mask
imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of the forward image in the background image
img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)
# Take only region of the forward image.
img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)
# Paste the forward image on the background image
imgpaste = cv2.add(img1_bg,img2_fg)
return imgpaste
def cosineTheorem(Lidar, radian1, radian2):
theta = abs(radian1-radian2)
distance = Lidar[radian1][1] ** 2 + Lidar[radian2][1] ** 2 - 2 * Lidar[radian1][1] * Lidar[radian2][1] * math.cos(abs(radian2 - radian1))
return distance
def combinations_count(n, r):
return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))
# def beep(freq, dur=100):
# winsound.Beep(freq, dur)
if __name__ == '__main__':
#学習前YOLO
# cfgfile = "cfg/yolov3.cfg" # 設定ファイル
# weightsfile = "weight/yolov3.weights" # 重みファイル
# classes = load_classes('data/coco.names') # 識別クラスのリスト
#マスク学習後YOLO
cfgfile = "cfg/mask.cfg" # 設定ファイル
weightsfile = "weight/mask_1500.weights" # 重みファイル
classes = load_classes('data/mask.names') # 識別クラスのリスト
num_classes = 80 # クラスの数
args = arg_parse() # 引数を取得
confidence = float(args.confidence) # 信頼性の設定値を取得
nms_thesh = float(args.nms_thresh) # 閾値を取得
start = 0
CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか
num_classes = 80 # クラスの数
bbox_attrs = 5 + num_classes
max = 0 #限界人数
num_camera = 1 #camera数
model = [[] for i in range(num_camera)]
inp_dim = [[] for i in range(num_camera)]
cap = [[] for i in range(num_camera)]
ret = [[] for i in range(num_camera)]
frame = [[] for i in range(num_camera)]
img = [[] for i in range(num_camera)]
orig_im = [[] for i in range(num_camera)]
dim = [[] for i in range(num_camera)]
# output = [[] for i in range(num_camera)]
# output = torch.tensor(output)
# print("output_shape\n", output.shape)
for i in range(num_camera):
model[i] = Darknet(cfgfile) #model1の作成
model[i].load_weights(weightsfile) # model1に重みを読み込む
model[i].net_info["height"] = args.reso
inp_dim[i] = int(model[i].net_info["height"])
assert inp_dim[i] % 32 == 0
assert inp_dim[i] > 32
#mixer.init() #初期化
if CUDA:
for i in range(num_camera):
model[i].cuda() #CUDAが使用可能であればcudaを起動
for i in range(num_camera):
model[i].eval()
cap[0] = cv2.VideoCapture(1) #カメラを指定(USB接続)
# cap[1] = cv2.VideoCapture(1) #カメラを指定(USB接続)
# cap = cv2.VideoCapture("movies/sample.mp4")
#cap = cv2.VideoCapture("movies/one_v2.avi")
# Use the next line if your camera has a username and password
# cap = cv2.VideoCapture('protocol://username:password@IP:port/1')
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/1') #(ネットワーク接続)
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/80')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4:80/video')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/camera-cgi/admin/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.5:80/snapshot.jpg?user=admin&pwd=admin&strm=0')
print('-1')
#assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認
img1 = cv2.imread("images/phase_1.jpg")
img2 = cv2.imread("images/phase_2.jpg")
img3 = cv2.imread("images/phase_2_red.jpg")
img4 = cv2.imread("images/phase_3.jpg")
#mixer.music.load("voice/voice_3.m4a")
#print(img1)
frames = 0
count_frame = 0 #フレーム数カウント
flag = 0 #密状態(0:疎密,1:密入り)
start = time.time()
print('-1')
while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間
count=0 #人数をカウント
point = [[] for i in range(num_camera)]
for i in range(num_camera):
ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得
if (ret[i] for i in range(num_camera)):
# 解析準備としてキャプチャ画像を加工
for i in range(num_camera):
img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])
if CUDA:
for i in range(num_camera):
im_dim[i] = im_dim[i].cuda()
img[i] = img[i].cuda()
for i in range(num_camera):
# output[i] = model[i](Variable(img[i]), CUDA)
output = model[i](Variable(img[i]), CUDA)
#print("output:\n", output)
# output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)
output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
# print("output", i, ":\n", output[i])
print(output.shape)
"""
# FPSの表示
if (type(output[i]) == int for i in range(num_camera)):
print("表示")
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# qキーを押すとFPS表示の終了
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
for i in range(num_camera):
output[i][:,1:5] = torch.clamp(output[i][:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output[i][:,[1,3]] *= frame[i].shape[1]
output[i][:,[2,4]] *= frame[i].shape[0]
"""
# FPSの表示
if type(output) == int:
print("表示")
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# qキーを押すとFPS表示の終了
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
for i in range(num_camera):
output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output[:,[1,3]] *= frame[i].shape[1]
output[:,[2,4]] *= frame[i].shape[0]
colors = pkl.load(open("pallete", "rb"))
#count = lambda x: count(x, orig_im, count) #人数をカウント
"""
for i in range(num_camera):
list(map(lambda x: write(x, orig_im[i]), output[i]))
print("count:\n",count)
"""
for i in range(num_camera):
list(map(lambda x: write(x, orig_im[i], i), output))
print("count:\n",count)
print("count_frame", count_frame)
print("framex", frame[0].shape[1])
print("framey", frame[0].shape[0])
print("point0",point[0])
#LiDARの情報の人識別
radian_lists = []
close_list = [0] * 4
dense_list = [0] * 4
for k, (radian, length) in enumerate(LiDAR):
radian_cam = [[] for i in range(len(point))]
num_person = 0
# print("k:", k)
if k % 90 == 0:
# print("hahahah")
if not k == 0:
radian_lists.append(radian_list)
radian_list = []
if k < 90:
for num, p in enumerate(point[0]):
radian_cam[num] = p[0] / frame[0].shape[1] * 100
for dif in range(10):
for radi_num in range(len(radian_cam)):
if int(radian)+dif-5 == int(radian_cam[radi_num]):
num_person += 1
radian_list.append(radian)
if num_person > 1:
close_list[0] = 1
if num_person > 2:
dense_list[0] = 1
elif k < 180:
for num, p in enumerate(point[0]):
radian_cam[num] = p[0] / frame[0].shape[1] * 100
for dif in range(10):
for radi_num in range(len(radian_cam)):
if int(radian)+dif-5 == int(radian_cam[radi_num]):
num_person += 1
radian_list.append(radian)
if num_person > 1:
close_list[1] = 1
if num_person > 2:
dense_list[1] = 1
elif k < 270:
for num, p in enumerate(point[0]):
radian_cam[num] = p[0] / frame[0].shape[1] * 100
for dif in range(10):
for radi_num in range(len(radian_cam)):
if int(radian)+dif-5 == int(radian_cam[radi_num]):
num_person += 1
radian_list.append(radian)
if num_person > 1:
close_list[2] = 1
if num_person > 2:
dense_list[2] = 1
else:
for num, p in enumerate(point[0]):
radian_cam[num] = p[0] / frame[0].shape[1] * 100
for dif in range(10):
for radi_num in range(len(radian_cam)):
if int(radian)+dif-5 == int(radian_cam[radi_num]):
num_person += 1
radian_list.append(radian)
if num_person > 1:
close_list[3] = 1
if num_person > 2:
dense_list[3] = 1
print("radian_lists_len", len(radian_lists))
#距離計算
dis_list = []
for direction in range(4):
if len(radian_lists[direction]) > 1:
# n = combinations_k(len(radian_lists[direction]), 2)
dis_combination = list(itertools.combinations(radian_lists[direction], 2))
distance = [[] for i in range(len(dis_combination))]
print(type(LiDAR))
for num_dis, com_list in enumerate(dis_combination):
distance[num_dis] = cosineTheorem(LiDAR, int(com_list[0]), int(com_list[1]))
dis_list.append(distance)
#密集判定
for direction in range(4):
close = 0 #密接数
dense = 0 #密集数
for dis in distance[distance]:
if dis < 2:
close += 1
close_list[direction] = 1
if close > 1:
dense_list[direction] = 1
print("close_list", close_list)
print("dense_list", dense_list)
# print("point1",point[1])
if count > max:
count_frame += 1
#print("-1")
if count_frame <= 50:
x=0
y=0
angle=20
scale=1.5
for i in range(num_camera):
imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)
if flag == 1:
play.googlehome()
flag += 1
#mixer.music.play(1)
elif count_frame <= 100:
x=-30
y=10
angle=20
scale=1.1
if count_frame%2==1:
for i in range(num_camera):
imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)
else:
for i in range(num_camera):
imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)
if flag == 2:
play.googlehome()
flag += 1
else:
x=-30
y=0
angle=20
scale=1.5
for i in range(num_camera):
imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)
if count_frame > 101: #<--2フレームずらす
print("\007") #警告音
time.sleep(3)
if flag == 3:
play.googlehome()
flag += 1
cv2.imshow("frame", imgpaste)
else:
count_frame = 0
flag = 0
#print("-2")
for i in range(num_camera):
cv2.imshow("frame", orig_im[i])
# play.googlehome()
key = cv2.waitKey(1)
# qキーを押すと動画表示の終了
if key & 0xFF == ord('q'):
break
frames += 1
print("count_frame:\n", count_frame)
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| 36.190987 | 141 | 0.50756 |
3060ef061106c53f208fb5a26ba571ef0987dbb6
| 966 |
py
|
Python
|
venv/Lib/site-packages/nipype/algorithms/tests/test_splitmerge.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | 585 |
2015-01-12T16:06:47.000Z
|
2022-03-26T14:51:08.000Z
|
nipype/algorithms/tests/test_splitmerge.py
|
tamires-consulting/nipype
|
b7879d75a63b6500b2e7d2c3eba5aa7670339274
|
[
"Apache-2.0"
] | 2,329 |
2015-01-01T09:56:41.000Z
|
2022-03-30T14:24:49.000Z
|
nipype/algorithms/tests/test_splitmerge.py
|
tamires-consulting/nipype
|
b7879d75a63b6500b2e7d2c3eba5aa7670339274
|
[
"Apache-2.0"
] | 487 |
2015-01-20T01:04:52.000Z
|
2022-03-21T21:22:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nipype.testing import example_data
def test_split_and_merge(tmpdir):
import numpy as np
import nibabel as nb
import os.path as op
import os
from nipype.algorithms.misc import split_rois, merge_rois
in_mask = example_data("tpms_msk.nii.gz")
dwfile = tmpdir.join("dwi.nii.gz").strpath
mask_img = nb.load(in_mask)
mskdata = np.asanyarray(mask_img.dataobj)
aff = mask_img.affine
dwshape = (mskdata.shape[0], mskdata.shape[1], mskdata.shape[2], 6)
dwdata = np.random.normal(size=dwshape)
tmpdir.chdir()
nb.Nifti1Image(dwdata.astype(np.float32), aff, None).to_filename(dwfile)
resdw, resmsk, resid = split_rois(dwfile, in_mask, roishape=(20, 20, 2))
merged = merge_rois(resdw, resid, in_mask)
dwmerged = nb.load(merged).get_fdata(dtype=np.float32)
dwmasked = dwdata * mskdata[:, :, :, np.newaxis]
assert np.allclose(dwmasked, dwmerged)
| 29.272727 | 76 | 0.690476 |
bbc881559703ba0607fd5b6445ca6ff7c47ade54
| 1,594 |
py
|
Python
|
vox/show_notices.py
|
drocco007/vox_linux
|
9807bc3af8bc35f8f6634019ec4bc22b5e3b2e33
|
[
"MIT"
] | 5 |
2015-03-25T11:56:10.000Z
|
2018-02-03T21:11:27.000Z
|
vox/show_notices.py
|
drocco007/vox_linux
|
9807bc3af8bc35f8f6634019ec4bc22b5e3b2e33
|
[
"MIT"
] | null | null | null |
vox/show_notices.py
|
drocco007/vox_linux
|
9807bc3af8bc35f8f6634019ec4bc22b5e3b2e33
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import sys
from glib import GError
import pynotify
import bus
from commands import SHOW_NOTIFICATION
from process_utils import spawn_daemon_process
def notifier():
pynotify.init(sys.argv[0])
title = 'Dragon Naturally Speaking'
notice = pynotify.Notification(title, '')
notice.show()
def show_notice(message, retry=True, notice=notice):
try:
if not pynotify.is_initted():
pynotify.init(sys.argv[0])
if not notice:
notice = pynotify.Notification(title, message)
else:
notice.update(title, message)
notice.show()
except GError:
# libnotify's documentation is… awful. This song and dance is required
# because at some point in the process lifecycle pynotify loses its
# connection and raises. Disconnecting and reestablishing the
# connection gets things back on track.
notice = None
pynotify.uninit()
if retry:
show_notice(message, False)
return show_notice
def show_notice_worker(host='localhost'):
socket = bus.connect_subscribe(host=host,
subscriptions=(SHOW_NOTIFICATION,))
show_notice = notifier()
while True:
try:
message = socket.recv()[1:]
show_notice(message)
except:
print 'Error showing notice:', message
def show_notices(host='localhost'):
return [spawn_daemon_process(show_notice_worker, call_kw={'host': host})]
| 26.566667 | 82 | 0.614806 |
f54bb984f0fd3558aee69bf3b6754c275c1a33e0
| 10,066 |
py
|
Python
|
monai/metrics/metric.py
|
vijayakumargopal/MONAI
|
10ffd1f1424e1dcafbce8d71481bed9c7a250a89
|
[
"Apache-2.0"
] | 2,971 |
2019-10-16T23:53:16.000Z
|
2022-03-31T20:58:24.000Z
|
monai/metrics/metric.py
|
vijayakumargopal/MONAI
|
10ffd1f1424e1dcafbce8d71481bed9c7a250a89
|
[
"Apache-2.0"
] | 2,851 |
2020-01-10T16:23:44.000Z
|
2022-03-31T22:14:53.000Z
|
monai/metrics/metric.py
|
vijayakumargopal/MONAI
|
10ffd1f1424e1dcafbce8d71481bed9c7a250a89
|
[
"Apache-2.0"
] | 614 |
2020-01-14T19:18:01.000Z
|
2022-03-31T14:06:14.000Z
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, List, Optional
import torch
from monai.config import TensorOrList
from monai.utils import evenly_divisible_all_gather
class Metric(ABC):
"""
Base class of all Metrics interface.
`__call__` is designed to execute metric computation.
"""
@abstractmethod
def __call__(self, *args: Any, **kwds: Any):
"""
API to execute the metric computation.
"""
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
class IterationMetric(Metric):
"""
Base class of Metrics interface for computation on a batch of tensors, usually the data of 1 iteration.
`__call__` is supposed to compute independent logic for several samples of `y_pred` and `y`(optional).
Usually, subclass only needs to implement the `_compute_tensor` function for computation process.
The input data shape should be `list of channel-first tensors` or a `batch-first tensor`.
"""
def __call__(self, y_pred: TensorOrList, y: Optional[TensorOrList] = None): # type: ignore
"""
Execute basic computation for model prediction and ground truth.
It can support both `list of channel-first Tensor` and `batch-first Tensor`.
And users can execute on every batch of data, then accumulate the results, or
accumulate the original `y_pred` and `y`, then execute on the accumulated data.
Args:
y_pred: the model prediction data to compute, must be a list of `channel-first` Tensor
or a `batch-first` Tensor.
y: the ground truth to compute, must be a list of `channel-first` Tensor
or a `batch-first` Tensor.
"""
ret: TensorOrList
if isinstance(y_pred, (list, tuple)) or isinstance(y, (list, tuple)):
# if y_pred or y is a list of channel-first data, add batch dim and compute metric
ret = self._compute_list(y_pred, y)
elif isinstance(y_pred, torch.Tensor):
y_ = y.detach() if y is not None and isinstance(y, torch.Tensor) else None
ret = self._compute_tensor(y_pred.detach(), y_)
else:
raise ValueError("y_pred or y must be a list of `channel-first` Tensors or a `batch-first` Tensor.")
return ret
def _compute_list(self, y_pred: TensorOrList, y: Optional[TensorOrList] = None):
"""
Excute the computation for the y_pred and y items of a iteration, the data is in the list shape.
Will concat the results to guarantee the output shape of ret is BCHW[D], otherwise it's list of batch-first,
which is against our principle that data in metrics should be BCHW[D] or list of channel-first.
Note: subclass may enhance the operation with multi-threads to accelerate.
"""
ret: TensorOrList
if y is not None:
ret = [self._compute_tensor(p.detach().unsqueeze(0), y_.detach().unsqueeze(0)) for p, y_ in zip(y_pred, y)]
else:
ret = [self._compute_tensor(p_.detach().unsqueeze(0), None) for p_ in y_pred]
# concat the list of results
if isinstance(ret[0], torch.Tensor):
ret = torch.cat(ret, dim=0)
elif isinstance(ret[0], (list, tuple)) and all(isinstance(i, torch.Tensor) for i in ret[0]):
# if _compute_tensor() returned not only 1 Tensor, concat them separately
ret = [torch.cat([k[i] for k in ret], dim=0) for i in range(len(ret[0]))]
return ret
@abstractmethod
def _compute_tensor(self, y_pred: torch.Tensor, y: Optional[torch.Tensor] = None):
"""
computation logic for the y_pred and y of a iteration, the data should be `batch-first` Tensors.
Every subclass metric should implement its own computation logic according to its algorithm.
"""
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
class Cumulative(ABC):
"""
Utility class for the typical cumulative computation process based on PyTorch Tensors.
It cumulates tensors in the buffer, then sync across distributed ranks and aggregate.
To speed up computation with multi-processing, PyTorch programs usually split data to distributed ranks
by `DistributedSampler` before an epoch, every rank then computes only based on its own data part and
`add` to the buffers in its process. Eventually, sync the values of all ranks to compute the final results.
Note: the data list should have the same length every time calling `add()` in a round,
it will automatically create buffers according to the length of data list.
Typically, this class is expected to execute the steps referring to below examples::
cum = Cumulative()
cum.add(x, y)
cum.add(a, b)
cum.add(c, d)
cum.aggregate()
result = cum.get_buffer() # optional
cum.reset()
"""
def __init__(self):
self.buffer_num: int = 0
self._buffers: Optional[List[List[torch.Tensor]]] = None
self._synced_tensors: Optional[List[Optional[torch.Tensor]]] = None
self._synced: bool = False
def reset(self):
"""
Reset the buffers for cumulative tensors and the synced results.
"""
self._buffers = None
self._synced_tensors = None
self._synced = False
def add(self, *data: torch.Tensor):
"""
Add samples to the cumulative buffers.
Args:
data: list of input tensor, make sure the input data order is always the same in a round.
every item of data will be added to the corresponding buffer.
"""
data_len = len(data)
if self._buffers is None:
self._buffers = [[] for _ in range(data_len)]
elif len(self._buffers) != data_len:
raise ValueError(f"data length: {data_len} doesn't match buffers length: {len(self._buffers)}.")
if self._synced_tensors is None:
self._synced_tensors = [None for _ in range(data_len)]
for i, d in enumerate(data):
if not isinstance(d, torch.Tensor):
raise ValueError(f"the data to cumulate in a buffer must be PyTorch Tensor, but got: {type(d)}.")
self._buffers[i].append(d)
self._synced = False
@abstractmethod
def aggregate(self, *args: Any, **kwds: Any):
"""
Aggregate final results based on the buffers.
"""
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
def _sync(self):
"""
All gather the buffers across distributed ranks for aggregating.
Every buffer will be concatenated as a PyTorch Tensor.
"""
self._synced_tensors = [evenly_divisible_all_gather(torch.cat(b, dim=0), concat=True) for b in self._buffers]
self._synced = True
def get_buffer(self):
"""
Get the synced buffers list.
A typical usage is to generate the metrics report based on the raw metric details.
"""
if not self._synced:
self._sync()
return self._synced_tensors[0] if len(self._synced_tensors) == 1 else self._synced_tensors
class CumulativeIterationMetric(Cumulative, IterationMetric):
"""
Base class of cumulative metric which computes on batch data of every iteration and aggregate.
Typically, it computes some intermediate results for every iteration, cumulates in buffers,
then syncs across all the distributed ranks and aggregates for the final result when epoch completed.
For example, `MeanDice` inherits this class and the usage:
.. code-block:: python
dice_metric = DiceMetric(include_background=True, reduction="mean")
for val_data in val_loader:
val_outputs = model(val_data["img"])
val_outputs = [postprocessing_transform(i) for i in decollate_batch(val_outputs)]
# compute metric for current iteration
dice_metric(y_pred=val_outputs, y=val_data["seg"])
# aggregate the final mean dice result
metric = dice_metric.aggregate().item()
# reset the status for next computation round
dice_metric.reset()
And to load `predictions` and `labels` from files, then compute metrics with multi-processing, please refer to:
https://github.com/Project-MONAI/tutorials/blob/master/modules/compute_metric.py.
"""
def __call__(self, y_pred: TensorOrList, y: Optional[TensorOrList] = None): # type: ignore
"""
Execute basic computation for model prediction and ground truth.
It can support both `list of channel-first Tensor` and `batch-first Tensor`.
Users call this API to execute computation on every batch of data, then accumulate the results,
or accumulate the original `y_pred` and `y`, then execute on the accumulated data.
Args:
y_pred: the model prediction data to compute, must be a list of `channel-first` Tensor
or a `batch-first` Tensor.
y: the ground truth to compute, must be a list of `channel-first` Tensor
or a `batch-first` Tensor.
"""
ret = super().__call__(y_pred=y_pred, y=y)
if isinstance(ret, (tuple, list)):
self.add(*ret)
else:
self.add(ret)
return ret
| 41.254098 | 119 | 0.661534 |
d2e75b9cd4d7a4ea745a88029f049fb214739b7b
| 283 |
py
|
Python
|
Backend/Tools for Pictures/copyfiles.py
|
RaresTeodor/Hacklytics2021
|
d329880efa062ca3d85592f51bca5f4e91618d2e
|
[
"MIT"
] | null | null | null |
Backend/Tools for Pictures/copyfiles.py
|
RaresTeodor/Hacklytics2021
|
d329880efa062ca3d85592f51bca5f4e91618d2e
|
[
"MIT"
] | null | null | null |
Backend/Tools for Pictures/copyfiles.py
|
RaresTeodor/Hacklytics2021
|
d329880efa062ca3d85592f51bca5f4e91618d2e
|
[
"MIT"
] | null | null | null |
import shutil
with open("../names.txt") as f:
content = f.readlines()
content = [x.strip() for x in content]
for superstar in content:
newname = superstar.replace(" ", "")
directory = "./" + newname + "/"
shutil.copy(directory + newname + ".png", './')
| 25.727273 | 55 | 0.575972 |
cd6b1eaf10be5e0429bcdbb1b2ff218d05d45adb
| 6,304 |
py
|
Python
|
System_of_Beams/testing_collection/test_runner.py
|
ChairOfStructuralMechanicsTUM/Mechanics_Apps
|
b064a42d4df3fa9bde62a5cff9cb27ca61b0127c
|
[
"MIT"
] | 11 |
2017-05-06T17:05:29.000Z
|
2020-11-12T09:26:47.000Z
|
System_of_Beams/testing_collection/test_runner.py
|
ChairOfStructuralMechanicsTUM/Mechanics_Apps
|
b064a42d4df3fa9bde62a5cff9cb27ca61b0127c
|
[
"MIT"
] | 49 |
2017-04-20T11:26:11.000Z
|
2020-05-29T13:18:06.000Z
|
System_of_Beams/testing_collection/test_runner.py
|
ChairOfStructuralMechanicsTUM/Mechanics_Apps
|
b064a42d4df3fa9bde62a5cff9cb27ca61b0127c
|
[
"MIT"
] | 4 |
2017-02-14T12:55:34.000Z
|
2022-01-12T15:07:07.000Z
|
from testing_collection import visualisation_tests as visu_tests
from Classes.CurrentDocument import CurrentDoc
def run_tests(curr_doc: CurrentDoc):
"""
1.) write test case and add it to file test_cases.py
2.) Call it in this function (run_tests() will be called in 'System_of_Beams\main.py'
3.) make sure, the variable 'run_tests' in the file main.py is set to true
4.) Only the latest run test can be plotted (no opportunities up to now to run one after another)
5.) Results will be visualized at the bokeh server
"""
"""
VISUALISATION TESTS
"""
# print("Single beam lineload test")
# visu_tests.single_beam_lineload_visu(curr_doc)
# print("Final Software lab structure")
# visu_tests.final_structure_software_lab(curr_doc)
print('Test example Quirin') #19.11
visu_tests.example_unterlagen_visu(curr_doc)
# print("Visualise all possible nodedep elements")
# visu_tests.vis_all_possible_nodedep_ele(curr_doc)
"""
CALCULATION TESTS
"""
# print("Single beam lineload test") #24.11
# test_cases.single_beam_lineload_test(curr_doc)
# print('normal line load') #24.11
# test_cases.single_beam_normal_lineload_test(curr_doc)
# print("Single beam clamping test") #24.11
# test_cases.single_clamping_left_side(curr_doc)
# print("Two beam lineload test") #17.11
# test_cases.two_beam_lineload_test(curr_doc)
# print("Two beam lineload overdefined test") #17.11
# test_cases.single_beam_lineload_test_overdefined(curr_doc)
# print("Single beam lineload test underdefined") #24.11
# test_cases.single_beam_lineload_test_underdefined(curr_doc)
# print('Big beam out of free elements') #17.11
# test_cases.two_beam_combined_to_one_complete_lineload_test(curr_doc)
# print('Big beam out of free elements 2 l') #17.11
# test_cases.two_beam_combined_to_one_complete_lineload_test_2l(curr_doc)
# print('Single load in the middle') #17.11
# test_cases.two_beam_combined_to_one_single_load_middle(curr_doc)
# print('Seperated elements') #17.11
# test_cases.single_beam_lineload_test_seperated_elements(curr_doc)
# print('Joint test) #18.11
# test_cases.two_beam_combined_to_one_single_load_middle_joint(curr_doc)
#
# print('Clamping with single load test') #17.11
# test_cases.single_clamping_left_side_single_load(curr_doc)
# print('TM example') #17.11
# test_cases.example_from_sheet_2_4(curr_doc)
# print('Trapezlast') #17.11
# test_cases.single_beam_trapezload_test(curr_doc)
# print('Temperature test') #17.11
# test_cases.single_beam_temperature_test(curr_doc)
# print('Triangle test') #17.11
# test_cases.two_beam_triangle_load_middle(curr_doc)
# print('Temperature clamping') #18.11
# test_cases.single_clamping_left_side_temperature(curr_doc)
# print('ss13') #17.11
# test_cases.example_ss13(curr_doc)
# print('ss12') #17.11
# test_cases.example_ss12(curr_doc)
#
# print('ss12_vereinfacht') #17.11
# test_cases.example_ss12_vereinfacht(curr_doc)
# print('ss11') #17.11
# test_cases.example_ss11(curr_doc)
# print('ss14') #19.11
# test_cases.example_ss14(curr_doc)
# print('schraeg') #17.11
# test_cases.single_beam_schraeg(curr_doc)
# print('vertical') #17.11
# test_cases.single_beam_lineload_vertical_test(curr_doc)
# print('vertical single load') #17.11
# test_cases.single_beam_single_load_vertical_test(curr_doc)
# print('Test Ecke') #17.11
# test_cases.two_beam_corner_line_load(curr_doc)
# print('triangle_not_symmetric') #17.11
# test_cases.two_beam_triangle_load_middle_not_symmetrical(curr_doc)
# print('Test example Quirin') #19.11
# test_cases.example_unterlagen_test(curr_doc)
# print('Test Quirin vereinfacht') #19.11
# test_cases.example_unterlagen_test_vereinfacht(curr_doc)
# print('test cos') #18.11
# test_cases.single_beam_cos_test(curr_doc)
# print('test multiple elements') #19.11
# test_cases.multiple_elements(curr_doc)
# print('test case spring') #24.11
# test_cases.example_2_3_neu(curr_doc)
# print('Test case ss 15') #24.11
# test_cases.example_ss15(curr_doc)
# print('Test case ss 16') #24.11
# test_cases.example_SS_16(curr_doc)
# test_cases.single_beam_lineload_test_infinity(curr_doc)
# test_cases.final_structure_software_lab(curr_doc)
# test_cases.final_structure_software_lab(curr_doc)
| 44.70922 | 101 | 0.521415 |
91e4eba8b9d04faea73b94fa9d178eeceb497dbb
| 472 |
py
|
Python
|
output/models/nist_data/list_pkg/token/schema_instance/nistschema_sv_iv_list_token_max_length_1_xsd/nistschema_sv_iv_list_token_max_length_1.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1 |
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/token/schema_instance/nistschema_sv_iv_list_token_max_length_1_xsd/nistschema_sv_iv_list_token_max_length_1.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4 |
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/token/schema_instance/nistschema_sv_iv_list_token_max_length_1_xsd/nistschema_sv_iv_list_token_max_length_1.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "NISTSchema-SV-IV-list-token-maxLength-1-NS"
@dataclass
class NistschemaSvIvListTokenMaxLength1:
class Meta:
name = "NISTSchema-SV-IV-list-token-maxLength-1"
namespace = "NISTSchema-SV-IV-list-token-maxLength-1-NS"
value: List[str] = field(
default_factory=list,
metadata={
"max_length": 5,
"tokens": True,
}
)
| 23.6 | 64 | 0.646186 |
a2ae7e356ff7a79831a1c7eb71a09948031d45d7
| 694 |
py
|
Python
|
angrytux/config/Config.py
|
Wilson194/Angry-tux
|
62b7457948675071fe328b69ba5d85aab6b39ed1
|
[
"CC0-1.0"
] | null | null | null |
angrytux/config/Config.py
|
Wilson194/Angry-tux
|
62b7457948675071fe328b69ba5d85aab6b39ed1
|
[
"CC0-1.0"
] | 1 |
2019-02-05T04:50:27.000Z
|
2019-02-05T04:50:27.000Z
|
angrytux/config/Config.py
|
Wilson194/Angry-tux
|
62b7457948675071fe328b69ba5d85aab6b39ed1
|
[
"CC0-1.0"
] | null | null | null |
import os
from angrytux.model.Singleton import Singleton
class Config(metaclass=Singleton):
"""
Class for global game config
Has getitem from getting variables
Singleton class
"""
def __init__(self):
variables = {}
exec(open(os.path.join(os.path.dirname(__file__), 'Data.py'), 'r').read(), variables)
self.__config = variables['config']
self.__config['root_dir'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
def __getitem__(self, item):
if item in self.__config:
return self.__config.get(item)
else:
raise AttributeError('Item {} not found in config!'.format(item))
| 25.703704 | 98 | 0.634006 |
195e035980eaa21dead07d02e22de8b925c5ad88
| 944 |
py
|
Python
|
src/layers/infrastructure/providers/chapi_provider.py
|
v-martel/CrakTextSummarizer
|
888c261f8b86b6f0fbb38025ed2d4bca873e84a4
|
[
"MIT"
] | null | null | null |
src/layers/infrastructure/providers/chapi_provider.py
|
v-martel/CrakTextSummarizer
|
888c261f8b86b6f0fbb38025ed2d4bca873e84a4
|
[
"MIT"
] | null | null | null |
src/layers/infrastructure/providers/chapi_provider.py
|
v-martel/CrakTextSummarizer
|
888c261f8b86b6f0fbb38025ed2d4bca873e84a4
|
[
"MIT"
] | 2 |
2020-09-15T20:01:51.000Z
|
2020-09-15T20:12:40.000Z
|
from src.shared.config.configuration import configuration
from src.layers.infrastructure.providers.temporary.queries import queryMfcTagsAndTitles, queryMfcTotal
import requests
import json
class ChapiProvider:
def __init__(self):
self.config = configuration.layers.infrastructure.providers.chapi_provider
def get_total_vids_number(self):
response = requests.post(
url=self.config.chapi_url,
json={'query': queryMfcTotal(self.config.chapi_token)}
)
return json.loads(response.content.decode('utf-8'))['data'] if response.status_code == 200 else None
def get_vids_at_page(self, page: str) -> dict:
response = requests.post(
url=self.config.chapi_url,
json={'query': queryMfcTagsAndTitles(self.config.chapi_token, page)}
)
return json.loads(response.content.decode('utf-8'))['data'] if response.status_code == 200 else None
| 34.962963 | 108 | 0.701271 |
b12c1233a779fedc10dde49845e8162e5ffb6a46
| 479 |
py
|
Python
|
nomadgram/users/migrations/0016_auto_20180603_1125.py
|
yoosojeong/Nomadgram.
|
cd27f552e92f703ac925c8ef2266f89286f326d5
|
[
"MIT"
] | null | null | null |
nomadgram/users/migrations/0016_auto_20180603_1125.py
|
yoosojeong/Nomadgram.
|
cd27f552e92f703ac925c8ef2266f89286f326d5
|
[
"MIT"
] | null | null | null |
nomadgram/users/migrations/0016_auto_20180603_1125.py
|
yoosojeong/Nomadgram.
|
cd27f552e92f703ac925c8ef2266f89286f326d5
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.3 on 2018-06-03 02:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0015_auto_20180603_1022'),
]
operations = [
migrations.AlterField(
model_name='user',
name='gender',
field=models.CharField(choices=[('male', 'Male'), ('female', 'Female'), ('not-specified', 'Not specified')], max_length=80, null=True),
),
]
| 25.210526 | 147 | 0.597077 |
a3caa22dca604c3accf528d829986c4e84bcf5c0
| 948 |
py
|
Python
|
DAppVotingSystem/utility/migrations/0005_alter_booth_manager_phone_no_and_more.py
|
vidyakore/Blockchain-based-E-VotingSystem
|
a86ad14d9699b5430f0556b15261506e4f5c46a6
|
[
"MIT"
] | 1 |
2022-03-23T06:50:05.000Z
|
2022-03-23T06:50:05.000Z
|
DAppVotingSystem/utility/migrations/0005_alter_booth_manager_phone_no_and_more.py
|
vidyakore/Blockchain-based-E-VotingSystem
|
a86ad14d9699b5430f0556b15261506e4f5c46a6
|
[
"MIT"
] | null | null | null |
DAppVotingSystem/utility/migrations/0005_alter_booth_manager_phone_no_and_more.py
|
vidyakore/Blockchain-based-E-VotingSystem
|
a86ad14d9699b5430f0556b15261506e4f5c46a6
|
[
"MIT"
] | 1 |
2022-03-31T15:20:23.000Z
|
2022-03-31T15:20:23.000Z
|
# Generated by Django 4.0.3 on 2022-03-31 06:53
from django.db import migrations
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('utility', '0004_booth_manager_candidate_candidate_constituency_and_more'),
]
operations = [
migrations.AlterField(
model_name='booth_manager',
name='phone_no',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=13, region=None, unique=True),
),
migrations.AlterField(
model_name='contract_manager',
name='phone_no',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=13, region=None, unique=True),
),
migrations.AlterField(
model_name='voter',
name='phone_no',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=13, region=None, unique=True),
),
]
| 31.6 | 106 | 0.655063 |
239f6ff8e5ba1d247a8bd624f6c016f2c64a9d39
| 3,155 |
py
|
Python
|
Django/django_app/django_app/settings.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | null | null | null |
Django/django_app/django_app/settings.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | 8 |
2020-06-06T01:02:06.000Z
|
2022-03-12T00:24:13.000Z
|
Django/django_app/django_app/settings.py
|
justnclrk/Python
|
0922961cbd94694a69ae8132a5c33baf552d8d89
|
[
"MIT"
] | null | null | null |
"""
Django settings for django_app project.
Generated by 'django-admin startproject' using Django 1.11.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5-%eixdb*5j9pc*qhx%av5-xa7$x&&=2a1yprc=_zfia@#k(54'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.time_display',
'apps.blog_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.650407 | 91 | 0.696038 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.